content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
/run_analysis.R | no_license | stefMT2970/GetData010Assignment | R | false | false | 7,357 | r | ||
########################### DATA CLEANING ##############################
#####################################################################
###################### Missing Data #####################
######################################################################
is.na(x) # returns TRUE of x is missing
mydata$v1[mydata$v1==99] <- NA #Recoding Values to Missing
mean(x, na.rm=TRUE) # Excluding Missing Values while calculation
mydata[!complete.cases(mydata),] # list rows of data that have missing values
newdata <- na.omit(mydata) # create new dataset without missing data
#Detecting NAs
#==============
is.na(z) # Is it NA?
which (is.na(z)) # Which one is NA?
#To find all the rows in a data frame with at least one NA, try this:
unique (unlist (lapply (z, function (x) which (is.na (z)))))
#Ways to Exclude Missing Values
#===============================
#mean(), median(), colSums(), var(), sd(), min() and max() all take the na.rm argument
#Note that cor() and its relatives don't work that way: with those you need
#to supply the use= argument. This is to permit more complicated handling
#of missing values than simply omitting them.
na.action=na.fail #which just stops when it encounters any missing values
na.fail #Stop if any missing values are encountered
na.omit #Drop out any rows with missing values
na.exclude #Drop out rows with missing values, but keeps track of where
# they were (so that when you make predictions, for example,
#you end up with a vector whose length is that of the original response.)
na.pass #Take no action.
na.tree.replace (library (tree):
#For discrete variables, adds a new category called "NA" to replace the missing values.
na.gam.replace (library gam):
#Operates on discrete variables like na.tree.replace(); for numerics,
#NAs are replaced by the mean of the non-missing entries
#Example
a <- data.frame (c1 = 1:8, c2 = factor (c("a", "b", "a", "c", "b", "c", "a", "b")))
a[4,1] <- a[6,2] <- NA # This repeated assignment is legal and does what you expect.
levels(a$c2)
na.fail (a)
na.exclude (a)
a = na.gam.replace (a) #library(gam)
#Special Case 1a: Missing Values in Factor Vectors
#=====================================================
#We noted above that a missing value in a factor variable is displayed as <NA> rather than just NA.
#Again, missing values do not have a level, but you can change a missing value to one of the existing levels
a <- factor (c("a", "b", "c", "b", "c", "b", "a", "c", "c")) # create the factor
levels(a)
a[3] <- "d" #warning message tells you that some NAs have geen generated.
levels(a)[1] <- "AA"
as.character(a)
levels(a)
#Internal Storage and Extra Levels
#-------------------------------
a <- factor (c(1, 2, 3, 2, 3, 2, 1), levels=1:4, labels=c("Small", "Medium", "Large", "Huge"))
levels(a)
table(a)
table (a[,drop=T])
#Special Case 2: Missing Values in Character Vectors
#========================================================
#Character vectors can have missing values. They display as NA in the usual way.
#This really isn't a special case at all.
a <- factor (c(1, 2, 3, 2, 3, 2, 1), levels=1:4, labels=c("Small", "Medium", "Large", "Huge"))
a[3] <- NA
table (a)
table (a, exclude=NULL)
sum (is.na (a))
#Special Case 3: NaNs
#=====================
#In addition to NA, R has a special value NaN for "not a number." 0/0 is an
#example of a calculation that will produce a NaN. NaNs print as NaN, but
#generally act like NAs
#For example, a computation done on an NaN produces an NaN; if you try to extract
#the NaNth element of a vector, you get NA
#One more special value is Inf. If you need them, there are is.nan() and
#functions for finding things that are NaN or infinite and not NA.
#Why is my numeric variable a factor?
#====================================
# "numeric" variable actually contains some non-numeric entries (like "NA" or "Missing" or an empty space)
Steve$G <- as.numeric(levels(Steve$G)[Steve$G])
as.numeric(as.character(Steve$G))
#How do I convert factors to character vectors in a data frame?
#===============================================================
for (i in 1:ncol (a)) if (class (a[,i]) == "factor") a[,i] <- as.character(a[,i])
#When are factor variables a big pain?
#======================================
#Factor variables are a pain when you're cleaning your data because they're hard
#to update. My approach has always been to convert the variable to character with
#as.character(), then handle the variable as a character vector, and then convert
#it to factor (using factor() or as.factor()) at the end.
#Operations on Missing Values
#============================
x <- c(1, 2, NA, 4)
y<-c(2,5,8,NA)
z=cbind(x,y)
x + 1 # NA + 1 = NA
sum(x) # This produces NA because we can't add NAs
length(x) # This is okay
############
as.numeric (c("1", "2", "4"))
is.numeric(c("1", "2", "three", "4"))
c(1, 2, 3)[4]
NA - 1
a <- data.frame (a = 1:3, b = 2:4)
a[,4]
a[4,]
a[1,2] <- NA
a[a$b < 4,]
a[,is.numeric(a)]
class(a)
str(a)
###############
g <- as.data.frame(matrix(c(1:5, NA), ncol = 2))
g
na.omit(g)
na.exclude(g)
na.fail(g)
na.pass(g)
#Missing values in analysis
anscombe <- within(anscombe, {
y1[1:3] <- NA
})
model.omit <- lm(y2 ~ y1, data = anscombe, na.action = na.omit)
model.exclude <- lm(y2 ~ y1, data = anscombe,na.action = na.exclude)
resid(model.omit)
resid(model.exclude)
fitted(model.omit)
fitted(model.exclude)
x1 <- c(1, 4, 3, NA, 7)
x2 <- c("a", "B", NA, "NA")
mean(x1)
mean(x1, na.rm = TRUE)
summary(x1)
table(x1)
table(x1, useNA = "ifany")
table(1:3, useNA = "always")
x1s <- sort(x1)
length(x1s)
sort(x1, na.last = TRUE)
#========================================================================
############## mice package for missing values imputation ################
#https://gist.github.com/mick001/df77b69b30ef6ff9fc0b
data <- airquality
data[4:10,3] <- rep(NA,7)
data[1:5,4] <- NA
data <- data[-c(5,6)]
summary(data)
#check for features (columns) and samples (rows) where more than 5% of the
#data is missing using a simple function
pMiss <- function(x){sum(is.na(x))/length(x)*100}
apply(data,2,pMiss)
apply(data,1,pMiss)
#Using mice for looking at missing data pattern
install.packages("mice")
library(mice)
md.pattern(data)
#A perhaps more helpful visual representation can be obtained using the VIM package as follows
install.packages("VIM")
library(VIM)
aggr_plot <- aggr(data, col=c('navyblue','red'), numbers=TRUE, sortVars=TRUE, labels=names(data), cex.axis=.7, gap=3, ylab=c("Histogram of missing data","Pattern"))
#Another (hopefully) helpful visual approach is a special box plot
marginplot(data[c(1,2)])
#Imputing the missing data : The mice() function takes care of the imputing process
tempData <- mice(data,m=5,maxit=50,meth='pmm',seed=500)
summary(tempData)
#A couple of notes on the parameters:
#m=5 refers to the number of imputed datasets. Five is the default value.
#meth='pmm' refers to the imputation method.
#In this case we are using predictive mean matching as imputation method.
#Other imputation methods can be used, type methods(mice) for imputation methods.
#If you would like to check the imputed data, for instance for the variable Ozone
tempData$imp$Ozone
tempData$meth
completedData <- complete(tempData,1)
#Inspecting the distribution of original and imputed data
#use a scatterplot and plot Ozone against all the other variables
library(lattice)
xyplot(tempData,Ozone ~ Wind+Temp+Solar.R,pch=18,cex=1)
#What we would like to see is that the shape of the magenta points (imputed)
#matches the shape of the blue ones (observed). The matching shape tells us
#that the imputed values are indeed "plausible values"
#Another helpful plot is the density plot:
densityplot(tempData)
stripplot(tempData, pch = 20, cex = 1.2)
#Pooling
#============
#Suppose that the next step in our analysis is to fit a linear model to the data.
#You may ask what imputed dataset to choose. The mice package makes it again very
#easy to fit a a model to each of the imputed dataset and then pool the results
#together
modelFit1 <- with(tempData,lm(Temp~ Ozone+Solar.R+Wind))
summary(pool(modelFit1))
tempData2 <- mice(data,m=50,seed=245435)
modelFit2 <- with(tempData2,lm(Temp~ Ozone+Solar.R+Wind))
summary(pool(modelFit2))
#############################################################################
################ R Function : Creating Dummy variables ##########################
#========================================
DF <- data.frame(strcol = c("A", "A", "B", "F", "C", "G", "C", "D", "E", "F"))
for(level in unique(DF$strcol)){
DF[paste("strcol", level, sep = "_")] <- ifelse(DF$strcol == level, 1, 0)}
#--------------------------------
DF <- factor(c("A", "A", "B", "F", "C", "G", "C", "D", "E", "F"))
b = contrasts(DF)
b
contrasts(DF) <- contr.treatment(7)
contrasts(DF) <- contr.treatment(7,2)
contrasts(DF) <- contr.helmert(7)
#-----------------------------------------------------------
x=factor(c("A", "A", "B", "F", "C", "G", "C", "D", "E", "F"))
year.f = as.factor(x)
dummies = model.matrix(~year.f-1)
set.seed(1)
dat <- data.frame(sex = sample(c("male","female"), 10, replace = TRUE))
model.matrix( ~ sex - 1, data = dat)
head(model.matrix(~Carseats$ShelveLoc-1))
View(Carseats)
#----------------------------------------------------------------
set.seed(001) # generating some data
sex <- factor(sample(1:2, 10, replace=TRUE)) # this is what you have
sex<-factor(ifelse(as.numeric(sex)==2, 1,0)) # this is what you want
sex
#If you want labels to be 0 = Male and 1 = Female, then...
sex<-factor(ifelse(as.numeric(sex)==2, 1,0), labels=c('M', 'F'))
sex
#-------------------------------------------------------------------
library(ISLR)
attach(Carseats)
View(Carseats)
hsb2 <- within(Carseats, {
race.ct <- C(Carseats$ShelveLoc, treatment)
print(attributes(race.ct))
})
hsb3 <- within(Carseats, {
race.ct <- C(Carseats$ShelveLoc, helmert)
print(attributes(race.ct))
})
hsb4 <- within(Carseats, {
race.ch1 <- C(Carseats$ShelveLoc, helmert, 3)
print(attributes(race.ch1))
})
a = contrasts(Carseats$ShelveLoc)
contrasts(Carseats$ShelveLoc) <- contr.treatment(3)
#---------------------------------------------------
a#Actually you don't need to create a dummy variable in order to estimate a model
#using lm, let's see this example:
set.seed(001) # Generating some data
N <- 100
x <- rnorm(N, 50, 20)
y <- 20 + 3.5*x + rnorm(N)
sex <- factor(sample(1:2, N, replace=TRUE))
# Estimating the linear model
lm(y ~ x + sex) # using the first category as the baseline (this means sex==1)
# renaming the categories and labelling them
sex<-factor(ifelse(as.numeric(sex)==2, 1,0), labels=c('M', 'F'))
lm(y ~ x + sex) # the same results, baseline is 'Male'
#############################################################################
################ Zero- and Near Zero-Variance Predictors ##########################
library(caret)
db_train <- read.csv("C:\\Users\\Vamsi\\Desktop\\R.Alg\\practice\\kaggle\\Santander Customer Satisfaction\\train.csv", na.strings = "")
dim(db_train)
nzv1 <- db_train[,-nearZeroVar(db_train)]
dim(nzv1)
nzv2 = nearZeroVar(db_train, saveMetrics = TRUE)
dim(nzv2)
str(nzv2, vec.len=1)
nzv2[nzv2[,"zeroVar"] > 0, ]
nzv2[nzv2$nzv2,]
#############################################################################
################ Identifying Correlated Predictors ##########################
#While there are some models that thrive on correlated predictors (such as pls),
#other models may benefit from reducing the level of correlation between the
#predictors.
dim(db_train)
x1 <- cor(db_train)
chart.Correlation(x1)
corrplot(x1, type = "upper")
MatrCorLar <- melt(x1)
ggplot(x1, aes(x=Var1, y=Var2, fill=value))+geom_tile()
ggplot(x1, aes(x = Var1, y = Var2, fill = value))+geom_tile()
highCorr <- sum(abs(x1[upper.tri(x1)]) > .999)
library(usdm)
df = # Data Frame
vif(db_train)
qr(db_train)
qr(db_train)$pivot
| /R.help/data cleaning.R | no_license | vamsiry/Data-Science | R | false | false | 12,617 | r |
########################### DATA CLEANING ##############################
#####################################################################
###################### Missing Data #####################
######################################################################
is.na(x) # returns TRUE of x is missing
mydata$v1[mydata$v1==99] <- NA #Recoding Values to Missing
mean(x, na.rm=TRUE) # Excluding Missing Values while calculation
mydata[!complete.cases(mydata),] # list rows of data that have missing values
newdata <- na.omit(mydata) # create new dataset without missing data
#Detecting NAs
#==============
is.na(z) # Is it NA?
which (is.na(z)) # Which one is NA?
#To find all the rows in a data frame with at least one NA, try this:
unique (unlist (lapply (z, function (x) which (is.na (z)))))
#Ways to Exclude Missing Values
#===============================
#mean(), median(), colSums(), var(), sd(), min() and max() all take the na.rm argument
#Note that cor() and its relatives don't work that way: with those you need
#to supply the use= argument. This is to permit more complicated handling
#of missing values than simply omitting them.
na.action=na.fail #which just stops when it encounters any missing values
na.fail #Stop if any missing values are encountered
na.omit #Drop out any rows with missing values
na.exclude #Drop out rows with missing values, but keeps track of where
# they were (so that when you make predictions, for example,
#you end up with a vector whose length is that of the original response.)
na.pass #Take no action.
na.tree.replace (library (tree):
#For discrete variables, adds a new category called "NA" to replace the missing values.
na.gam.replace (library gam):
#Operates on discrete variables like na.tree.replace(); for numerics,
#NAs are replaced by the mean of the non-missing entries
#Example
a <- data.frame (c1 = 1:8, c2 = factor (c("a", "b", "a", "c", "b", "c", "a", "b")))
a[4,1] <- a[6,2] <- NA # This repeated assignment is legal and does what you expect.
levels(a$c2)
na.fail (a)
na.exclude (a)
a = na.gam.replace (a) #library(gam)
#Special Case 1a: Missing Values in Factor Vectors
#=====================================================
#We noted above that a missing value in a factor variable is displayed as <NA> rather than just NA.
#Again, missing values do not have a level, but you can change a missing value to one of the existing levels
a <- factor (c("a", "b", "c", "b", "c", "b", "a", "c", "c")) # create the factor
levels(a)
a[3] <- "d" #warning message tells you that some NAs have geen generated.
levels(a)[1] <- "AA"
as.character(a)
levels(a)
#Internal Storage and Extra Levels
#-------------------------------
a <- factor (c(1, 2, 3, 2, 3, 2, 1), levels=1:4, labels=c("Small", "Medium", "Large", "Huge"))
levels(a)
table(a)
table (a[,drop=T])
#Special Case 2: Missing Values in Character Vectors
#========================================================
#Character vectors can have missing values. They display as NA in the usual way.
#This really isn't a special case at all.
a <- factor (c(1, 2, 3, 2, 3, 2, 1), levels=1:4, labels=c("Small", "Medium", "Large", "Huge"))
a[3] <- NA
table (a)
table (a, exclude=NULL)
sum (is.na (a))
#Special Case 3: NaNs
#=====================
#In addition to NA, R has a special value NaN for "not a number." 0/0 is an
#example of a calculation that will produce a NaN. NaNs print as NaN, but
#generally act like NAs
#For example, a computation done on an NaN produces an NaN; if you try to extract
#the NaNth element of a vector, you get NA
#One more special value is Inf. If you need them, there are is.nan() and
#functions for finding things that are NaN or infinite and not NA.
#Why is my numeric variable a factor?
#====================================
# "numeric" variable actually contains some non-numeric entries (like "NA" or "Missing" or an empty space)
Steve$G <- as.numeric(levels(Steve$G)[Steve$G])
as.numeric(as.character(Steve$G))
#How do I convert factors to character vectors in a data frame?
#===============================================================
for (i in 1:ncol (a)) if (class (a[,i]) == "factor") a[,i] <- as.character(a[,i])
#When are factor variables a big pain?
#======================================
#Factor variables are a pain when you're cleaning your data because they're hard
#to update. My approach has always been to convert the variable to character with
#as.character(), then handle the variable as a character vector, and then convert
#it to factor (using factor() or as.factor()) at the end.
#Operations on Missing Values
#============================
x <- c(1, 2, NA, 4)
y<-c(2,5,8,NA)
z=cbind(x,y)
x + 1 # NA + 1 = NA
sum(x) # This produces NA because we can't add NAs
length(x) # This is okay
############
as.numeric (c("1", "2", "4"))
is.numeric(c("1", "2", "three", "4"))
c(1, 2, 3)[4]
NA - 1
a <- data.frame (a = 1:3, b = 2:4)
a[,4]
a[4,]
a[1,2] <- NA
a[a$b < 4,]
a[,is.numeric(a)]
class(a)
str(a)
###############
g <- as.data.frame(matrix(c(1:5, NA), ncol = 2))
g
na.omit(g)
na.exclude(g)
na.fail(g)
na.pass(g)
#Missing values in analysis
anscombe <- within(anscombe, {
y1[1:3] <- NA
})
model.omit <- lm(y2 ~ y1, data = anscombe, na.action = na.omit)
model.exclude <- lm(y2 ~ y1, data = anscombe,na.action = na.exclude)
resid(model.omit)
resid(model.exclude)
fitted(model.omit)
fitted(model.exclude)
x1 <- c(1, 4, 3, NA, 7)
x2 <- c("a", "B", NA, "NA")
mean(x1)
mean(x1, na.rm = TRUE)
summary(x1)
table(x1)
table(x1, useNA = "ifany")
table(1:3, useNA = "always")
x1s <- sort(x1)
length(x1s)
sort(x1, na.last = TRUE)
#========================================================================
############## mice package for missing values imputation ################
#https://gist.github.com/mick001/df77b69b30ef6ff9fc0b
data <- airquality
data[4:10,3] <- rep(NA,7)
data[1:5,4] <- NA
data <- data[-c(5,6)]
summary(data)
#check for features (columns) and samples (rows) where more than 5% of the
#data is missing using a simple function
pMiss <- function(x){sum(is.na(x))/length(x)*100}
apply(data,2,pMiss)
apply(data,1,pMiss)
#Using mice for looking at missing data pattern
install.packages("mice")
library(mice)
md.pattern(data)
#A perhaps more helpful visual representation can be obtained using the VIM package as follows
install.packages("VIM")
library(VIM)
aggr_plot <- aggr(data, col=c('navyblue','red'), numbers=TRUE, sortVars=TRUE, labels=names(data), cex.axis=.7, gap=3, ylab=c("Histogram of missing data","Pattern"))
#Another (hopefully) helpful visual approach is a special box plot
marginplot(data[c(1,2)])
#Imputing the missing data : The mice() function takes care of the imputing process
tempData <- mice(data,m=5,maxit=50,meth='pmm',seed=500)
summary(tempData)
#A couple of notes on the parameters:
#m=5 refers to the number of imputed datasets. Five is the default value.
#meth='pmm' refers to the imputation method.
#In this case we are using predictive mean matching as imputation method.
#Other imputation methods can be used, type methods(mice) for imputation methods.
#If you would like to check the imputed data, for instance for the variable Ozone
tempData$imp$Ozone
tempData$meth
completedData <- complete(tempData,1)
#Inspecting the distribution of original and imputed data
#use a scatterplot and plot Ozone against all the other variables
library(lattice)
xyplot(tempData,Ozone ~ Wind+Temp+Solar.R,pch=18,cex=1)
#What we would like to see is that the shape of the magenta points (imputed)
#matches the shape of the blue ones (observed). The matching shape tells us
#that the imputed values are indeed "plausible values"
#Another helpful plot is the density plot:
densityplot(tempData)
stripplot(tempData, pch = 20, cex = 1.2)
#Pooling
#============
#Suppose that the next step in our analysis is to fit a linear model to the data.
#You may ask what imputed dataset to choose. The mice package makes it again very
#easy to fit a a model to each of the imputed dataset and then pool the results
#together
modelFit1 <- with(tempData,lm(Temp~ Ozone+Solar.R+Wind))
summary(pool(modelFit1))
tempData2 <- mice(data,m=50,seed=245435)
modelFit2 <- with(tempData2,lm(Temp~ Ozone+Solar.R+Wind))
summary(pool(modelFit2))
#############################################################################
################ R Function : Creating Dummy variables ##########################
#========================================
DF <- data.frame(strcol = c("A", "A", "B", "F", "C", "G", "C", "D", "E", "F"))
for(level in unique(DF$strcol)){
DF[paste("strcol", level, sep = "_")] <- ifelse(DF$strcol == level, 1, 0)}
#--------------------------------
DF <- factor(c("A", "A", "B", "F", "C", "G", "C", "D", "E", "F"))
b = contrasts(DF)
b
contrasts(DF) <- contr.treatment(7)
contrasts(DF) <- contr.treatment(7,2)
contrasts(DF) <- contr.helmert(7)
#-----------------------------------------------------------
x=factor(c("A", "A", "B", "F", "C", "G", "C", "D", "E", "F"))
year.f = as.factor(x)
dummies = model.matrix(~year.f-1)
set.seed(1)
dat <- data.frame(sex = sample(c("male","female"), 10, replace = TRUE))
model.matrix( ~ sex - 1, data = dat)
head(model.matrix(~Carseats$ShelveLoc-1))
View(Carseats)
#----------------------------------------------------------------
set.seed(001) # generating some data
sex <- factor(sample(1:2, 10, replace=TRUE)) # this is what you have
sex<-factor(ifelse(as.numeric(sex)==2, 1,0)) # this is what you want
sex
#If you want labels to be 0 = Male and 1 = Female, then...
sex<-factor(ifelse(as.numeric(sex)==2, 1,0), labels=c('M', 'F'))
sex
#-------------------------------------------------------------------
library(ISLR)
attach(Carseats)
View(Carseats)
hsb2 <- within(Carseats, {
race.ct <- C(Carseats$ShelveLoc, treatment)
print(attributes(race.ct))
})
hsb3 <- within(Carseats, {
race.ct <- C(Carseats$ShelveLoc, helmert)
print(attributes(race.ct))
})
hsb4 <- within(Carseats, {
race.ch1 <- C(Carseats$ShelveLoc, helmert, 3)
print(attributes(race.ch1))
})
a = contrasts(Carseats$ShelveLoc)
contrasts(Carseats$ShelveLoc) <- contr.treatment(3)
#---------------------------------------------------
a#Actually you don't need to create a dummy variable in order to estimate a model
#using lm, let's see this example:
set.seed(001) # Generating some data
N <- 100
x <- rnorm(N, 50, 20)
y <- 20 + 3.5*x + rnorm(N)
sex <- factor(sample(1:2, N, replace=TRUE))
# Estimating the linear model
lm(y ~ x + sex) # using the first category as the baseline (this means sex==1)
# renaming the categories and labelling them
sex<-factor(ifelse(as.numeric(sex)==2, 1,0), labels=c('M', 'F'))
lm(y ~ x + sex) # the same results, baseline is 'Male'
#############################################################################
################ Zero- and Near Zero-Variance Predictors ##########################
library(caret)
db_train <- read.csv("C:\\Users\\Vamsi\\Desktop\\R.Alg\\practice\\kaggle\\Santander Customer Satisfaction\\train.csv", na.strings = "")
dim(db_train)
nzv1 <- db_train[,-nearZeroVar(db_train)]
dim(nzv1)
nzv2 = nearZeroVar(db_train, saveMetrics = TRUE)
dim(nzv2)
str(nzv2, vec.len=1)
nzv2[nzv2[,"zeroVar"] > 0, ]
nzv2[nzv2$nzv2,]
#############################################################################
################ Identifying Correlated Predictors ##########################
#While there are some models that thrive on correlated predictors (such as pls),
#other models may benefit from reducing the level of correlation between the
#predictors.
dim(db_train)
x1 <- cor(db_train)
chart.Correlation(x1)
corrplot(x1, type = "upper")
MatrCorLar <- melt(x1)
ggplot(x1, aes(x=Var1, y=Var2, fill=value))+geom_tile()
ggplot(x1, aes(x = Var1, y = Var2, fill = value))+geom_tile()
highCorr <- sum(abs(x1[upper.tri(x1)]) > .999)
library(usdm)
df = # Data Frame
vif(db_train)
qr(db_train)
qr(db_train)$pivot
|
TAMDistance <- function(x, y){
if (is(try(TAMInitialCheck(x, y)))[1] == "try-error") {
return(NA)
} else {
# Calculates the DTW between two time series and stores the optimal warping path.
res = dtw(x, y)
px <- unlist(res["index1"], use.names = FALSE) - 1
py <- unlist(res["index2"], use.names = FALSE) - 1
dpx <- diff(px)
dpy <- diff(py)
# Counts the number of samples in delay, advance and phase
delay <- length(which(dpx %in% c(0)))
advance <- length(which(dpy %in% c(0)))
phase <- length(which(((dpx == 1) * (dpy == 1)) %in% c(1)))
# Get the length of both time series
len_y <- py[length(py)]
len_x <- px[length(px)]
# Calculates the ratios of delay, advance and phase
p_delay <- delay * 1. / len_y
p_advance <- advance * 1. / len_x
p_phase <- phase * 1. / min(len_x, len_y)
tam <- p_advance + p_delay + (1 - p_phase)
return(tam)
}
}
TAMInitialCheck <- function(x, y){
if (! is.numeric(x) | ! is.numeric(y)) {
stop('The series must be numeric', call.=FALSE)
}
if (! is.vector(x) | ! is.vector(y)) {
stop('The series must be univariate vectors', call.=FALSE)
}
if (length(x) <= 1 | length(y) <= 1) {
stop('The series must have a more than one point', call.=FALSE)
}
if (any(is.na(x)) | any(is.na(y))) {
stop('There are missing values in the series', call.=FALSE)
}
} | /R/tam_distance.R | no_license | cran/TSdist | R | false | false | 1,381 | r | TAMDistance <- function(x, y){
if (is(try(TAMInitialCheck(x, y)))[1] == "try-error") {
return(NA)
} else {
# Calculates the DTW between two time series and stores the optimal warping path.
res = dtw(x, y)
px <- unlist(res["index1"], use.names = FALSE) - 1
py <- unlist(res["index2"], use.names = FALSE) - 1
dpx <- diff(px)
dpy <- diff(py)
# Counts the number of samples in delay, advance and phase
delay <- length(which(dpx %in% c(0)))
advance <- length(which(dpy %in% c(0)))
phase <- length(which(((dpx == 1) * (dpy == 1)) %in% c(1)))
# Get the length of both time series
len_y <- py[length(py)]
len_x <- px[length(px)]
# Calculates the ratios of delay, advance and phase
p_delay <- delay * 1. / len_y
p_advance <- advance * 1. / len_x
p_phase <- phase * 1. / min(len_x, len_y)
tam <- p_advance + p_delay + (1 - p_phase)
return(tam)
}
}
TAMInitialCheck <- function(x, y){
if (! is.numeric(x) | ! is.numeric(y)) {
stop('The series must be numeric', call.=FALSE)
}
if (! is.vector(x) | ! is.vector(y)) {
stop('The series must be univariate vectors', call.=FALSE)
}
if (length(x) <= 1 | length(y) <= 1) {
stop('The series must have a more than one point', call.=FALSE)
}
if (any(is.na(x)) | any(is.na(y))) {
stop('There are missing values in the series', call.=FALSE)
}
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/capture_doc.R
\docType{data}
\name{capture}
\alias{capture}
\title{capture}
\format{A data frame containing 1000 rows and 10 columns
\describe{
\item{size}{(integer) - The size (length) of the ship (maybe in meters?)}
\item{cannons}{(integer) - The number of cannons on the ship}
\item{style}{(string) - The style of the ship (either modern or classic)}
\item{warnshot}{(binary) - Did the ship fire a warning shot at the Perilous Pigeon when it got close?}
\item{date}{(integer) - The date of the capture (1 = January 1, 365 = December 31)}
\item{heardof}{(binary) - Was the target ship recognized by the captain's first mate?}
\item{decorations}{(integer) - An integer between 1 and 10 indicating how decorated the ship was. 1 means it looks totally common and shabby, 10 means it is among the finest looking ship you've ever seen!}
\item{daysfromshore}{(integer) - How many days from the nearest land was the ship when it was found?}
\item{speed}{(integer) - How fast was the ship going when it was caught?}
\item{treasure}{(numeric) - How much treasure was found on the ship when it was captured?}
}}
\source{
2015 annual international pirate meeting at the Bodensee in Konstanz, Germany
}
\usage{
capture
}
\description{
A dataframe containing a historical record of every ship the Perilous Pigeon captured on the Bodensee in the years 2014 and 2015
}
\examples{
}
\keyword{datasets}
| /man/capture.Rd | no_license | wyim-pgl/yarrr | R | false | true | 1,491 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/capture_doc.R
\docType{data}
\name{capture}
\alias{capture}
\title{capture}
\format{A data frame containing 1000 rows and 10 columns
\describe{
\item{size}{(integer) - The size (length) of the ship (maybe in meters?)}
\item{cannons}{(integer) - The number of cannons on the ship}
\item{style}{(string) - The style of the ship (either modern or classic)}
\item{warnshot}{(binary) - Did the ship fire a warning shot at the Perilous Pigeon when it got close?}
\item{date}{(integer) - The date of the capture (1 = January 1, 365 = December 31)}
\item{heardof}{(binary) - Was the target ship recognized by the captain's first mate?}
\item{decorations}{(integer) - An integer between 1 and 10 indicating how decorated the ship was. 1 means it looks totally common and shabby, 10 means it is among the finest looking ship you've ever seen!}
\item{daysfromshore}{(integer) - How many days from the nearest land was the ship when it was found?}
\item{speed}{(integer) - How fast was the ship going when it was caught?}
\item{treasure}{(numeric) - How much treasure was found on the ship when it was captured?}
}}
\source{
2015 annual international pirate meeting at the Bodensee in Konstanz, Germany
}
\usage{
capture
}
\description{
A dataframe containing a historical record of every ship the Perilous Pigeon captured on the Bodensee in the years 2014 and 2015
}
\examples{
}
\keyword{datasets}
|
#4
# Import auckland hourly data
akl_hourly <- read_csv("akl_weather_hourly_2016.csv")
# Examine structure of time column
str(akl_hourly$time)
# Examine head of time column
head(akl_hourly$time)
# A plot using just time
ggplot(akl_hourly, aes(x = time, y = temperature)) +
geom_line(aes(group = make_date(year, month, mday)), alpha = 0.2)
| /Working with time and date in R/Problems in practice/4.R | no_license | SaiSharanyaY/DataCamp-Data-Scientist-with-R-Track. | R | false | false | 360 | r | #4
# Import auckland hourly data
akl_hourly <- read_csv("akl_weather_hourly_2016.csv")
# Examine structure of time column
str(akl_hourly$time)
# Examine head of time column
head(akl_hourly$time)
# A plot using just time
ggplot(akl_hourly, aes(x = time, y = temperature)) +
geom_line(aes(group = make_date(year, month, mday)), alpha = 0.2)
|
tab5rows <- read.table("household_power_consumption.txt", header = TRUE, nrows = 5,sep = ";")
classes <- sapply(tab5rows,class)
classes[1]="character"
tabAll <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", nrows = 70000, na.strings = "?",colClasses=classes)
dataset <- rbind(subset(tabAll,Date=="1/2/2007"),subset(tabAll,Date=="2/2/2007"))
png(filename="plot1.png",width=480,height=480)
hist(as.numeric(dataset$Global_active_power),breaks=12,col="red",main="Global Active Power",xlab="Global Active Power (kilowatts)")
dev.off()
| /plot1.R | no_license | ashetti/ExData_Plotting1 | R | false | false | 556 | r | tab5rows <- read.table("household_power_consumption.txt", header = TRUE, nrows = 5,sep = ";")
classes <- sapply(tab5rows,class)
classes[1]="character"
tabAll <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", nrows = 70000, na.strings = "?",colClasses=classes)
dataset <- rbind(subset(tabAll,Date=="1/2/2007"),subset(tabAll,Date=="2/2/2007"))
png(filename="plot1.png",width=480,height=480)
hist(as.numeric(dataset$Global_active_power),breaks=12,col="red",main="Global Active Power",xlab="Global Active Power (kilowatts)")
dev.off()
|
\alias{GtkVScale}
\alias{gtkVScale}
\name{GtkVScale}
\title{GtkVScale}
\description{A vertical slider widget for selecting a value from a range}
\section{Methods and Functions}{
\code{\link{gtkVScaleNew}(adjustment = NULL, show = TRUE)}\cr
\code{\link{gtkVScaleNewWithRange}(min, max, step, show = TRUE)}\cr
\code{gtkVScale(adjustment = NULL, min, max, step, show = TRUE)}
}
\section{Hierarchy}{\preformatted{GObject
+----GInitiallyUnowned
+----GtkObject
+----GtkWidget
+----GtkRange
+----GtkScale
+----GtkVScale}}
\section{Interfaces}{GtkVScale implements
AtkImplementorIface, \code{\link{GtkBuildable}} and \code{\link{GtkOrientable}}.}
\section{Detailed Description}{The \code{\link{GtkVScale}} widget is used to allow the user to select a value using
a vertical slider. To create one, use \code{\link{gtkHScaleNewWithRange}}.
The position to show the current value, and the number of decimal places
shown can be set using the parent \code{\link{GtkScale}} class's functions.}
\section{Structures}{\describe{\item{\verb{GtkVScale}}{
The \code{\link{GtkVScale}} struct contains private data only, and
should be accessed using the functions below.
}}}
\section{Convenient Construction}{\code{gtkVScale} is the result of collapsing the constructors of \code{GtkVScale} (\code{\link{gtkVScaleNew}}, \code{\link{gtkVScaleNewWithRange}}) and accepts a subset of its arguments matching the required arguments of one of its delegate constructors.}
\references{\url{https://developer.gnome.org/gtk2/stable/GtkVScale.html}}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
| /man/GtkVScale.Rd | no_license | cran/RGtk2 | R | false | false | 1,703 | rd | \alias{GtkVScale}
\alias{gtkVScale}
\name{GtkVScale}
\title{GtkVScale}
\description{A vertical slider widget for selecting a value from a range}
\section{Methods and Functions}{
\code{\link{gtkVScaleNew}(adjustment = NULL, show = TRUE)}\cr
\code{\link{gtkVScaleNewWithRange}(min, max, step, show = TRUE)}\cr
\code{gtkVScale(adjustment = NULL, min, max, step, show = TRUE)}
}
\section{Hierarchy}{\preformatted{GObject
+----GInitiallyUnowned
+----GtkObject
+----GtkWidget
+----GtkRange
+----GtkScale
+----GtkVScale}}
\section{Interfaces}{GtkVScale implements
AtkImplementorIface, \code{\link{GtkBuildable}} and \code{\link{GtkOrientable}}.}
\section{Detailed Description}{The \code{\link{GtkVScale}} widget is used to allow the user to select a value using
a vertical slider. To create one, use \code{\link{gtkHScaleNewWithRange}}.
The position to show the current value, and the number of decimal places
shown can be set using the parent \code{\link{GtkScale}} class's functions.}
\section{Structures}{\describe{\item{\verb{GtkVScale}}{
The \code{\link{GtkVScale}} struct contains private data only, and
should be accessed using the functions below.
}}}
\section{Convenient Construction}{\code{gtkVScale} is the result of collapsing the constructors of \code{GtkVScale} (\code{\link{gtkVScaleNew}}, \code{\link{gtkVScaleNewWithRange}}) and accepts a subset of its arguments matching the required arguments of one of its delegate constructors.}
\references{\url{https://developer.gnome.org/gtk2/stable/GtkVScale.html}}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
#set data pathway
datpath <- "C:/Users/Lina/Dropbox/Academics/Projects/Perennial Grasses Eastern Oregon/Data"
library(tidyverse)
#read in air temp data
# ATMar2021_25 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/March_2021/25temp.csv", sep=""))%>%
# mutate(ID = 25, Treatment = "severe")
# ATMar2021_26 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/March_2021/26temp.csv", sep=""))%>%
# mutate(ID = 26, Treatment = "moderate")
# ATMar2021_27 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/March_2021/27temp.csv", sep=""))%>%
# mutate(ID = 27, Treatment = "ambient")
# ATMar2021_28 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/March_2021/28temp.csv", sep=""))%>%
# mutate(ID = 27, Treatment = "ambient")
# ATMar2021_29 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/March_2021/29temp.csv", sep=""))%>%
# mutate(ID = 29, Treatment = "severe")
# ATMar2021_30 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/March_2021/30temp.csv", sep=""))%>%
# mutate(ID = 30, Treatment = "moderate")
# ATMar2021_31 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/March_2021/31temp.csv", sep=""))%>%
# mutate(ID = 31, Treatment = "severe")
# ATMar2021_32 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/March_2021/32temp.csv", sep=""))%>%
# mutate(ID = 32, Treatment = "moderate")
# ATMar2021_33 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/March_2021/33temp.csv", sep=""))%>%
# mutate(ID = 33, Treatment = "ambient")
# ATMar2021_34 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/March_2021/34temp.csv", sep=""))%>%
# mutate(ID = 34, Treatment = "moderate")
# ATMar2021_35 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/March_2021/35temp.csv", sep=""))%>%
# mutate(ID = 35, Treatment = "ambient")
# ATMar2021_36 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/March_2021/36temp.csv", sep=""))%>%
# mutate(ID = 36, Treatment = "severe")
ATMay2021_25 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/May_2021/25temp.csv", sep=""))%>%
mutate(ID = 25, Treatment = "severe")
ATMay2021_26 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/May_2021/26temp.csv", sep=""))%>%
mutate(ID = 26, Treatment = "moderate")
ATMay2021_27 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/May_2021/27temp.csv", sep=""))%>%
mutate(ID = 27, Treatment = "ambient")
ATMay2021_28 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/May_2021/28temp.csv", sep=""))%>%
mutate(ID = 27, Treatment = "ambient")
ATMay2021_29 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/May_2021/29temp.csv", sep=""))%>%
mutate(ID = 29, Treatment = "severe")
ATMay2021_30 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/May_2021/30temp.csv", sep=""))%>%
mutate(ID = 30, Treatment = "moderate")
ATMay2021_31 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/May_2021/31temp.csv", sep=""))%>%
mutate(ID = 31, Treatment = "severe")
ATMay2021_32 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/May_2021/32temp.csv", sep=""))%>%
mutate(ID = 32, Treatment = "moderate")
ATMay2021_33 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/May_2021/33temp.csv", sep=""))%>%
mutate(ID = 33, Treatment = "ambient")
ATMay2021_34 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/May_2021/34temp.csv", sep=""))%>%
mutate(ID = 34, Treatment = "moderate")
ATMay2021_35 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/May_2021/35temp.csv", sep=""))%>%
mutate(ID = 35, Treatment = "ambient")
ATMay2021_36 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/May_2021/36temp.csv", sep=""))%>%
mutate(ID = 36, Treatment = "severe")
ATSep2021_25 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/Sept_2021/25temp.csv", sep=""))%>%
mutate(ID = 25, Treatment = "severe")
ATSep2021_26 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/Sept_2021/26temp.csv", sep=""))%>%
mutate(ID = 26, Treatment = "moderate")
ATSep2021_27 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/Sept_2021/27temp.csv", sep=""))%>%
mutate(ID = 27, Treatment = "ambient")
ATSep2021_28 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/Sept_2021/28temp.csv", sep=""))%>%
mutate(ID = 27, Treatment = "ambient")
ATSep2021_29 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/Sept_2021/29temp.csv", sep=""))%>%
mutate(ID = 29, Treatment = "severe")
ATSep2021_30 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/Sept_2021/30temp.csv", sep=""))%>%
mutate(ID = 30, Treatment = "moderate")
ATSep2021_31 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/Sept_2021/31temp.csv", sep=""))%>%
mutate(ID = 31, Treatment = "severe")
ATSep2021_32 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/Sept_2021/32temp.csv", sep=""))%>%
mutate(ID = 32, Treatment = "moderate")
ATSep2021_33 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/Sept_2021/33temp.csv", sep=""))%>%
mutate(ID = 33, Treatment = "ambient")
ATSep2021_34 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/Sept_2021/34temp.csv", sep=""))%>%
mutate(ID = 34, Treatment = "moderate")
ATSep2021_35 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/Sept_2021/35temp.csv", sep=""))%>%
mutate(ID = 35, Treatment = "ambient")
ATSep2021_36 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/Sept_2021/36temp.csv", sep=""))%>%
mutate(ID = 36, Treatment = "severe")
ATFeb2022_25 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/Feb_2022/25temp.csv", sep=""))%>%
mutate(ID = 25, Treatment = "severe")
ATFeb2022_26 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/Feb_2022/26temp.csv", sep=""))%>%
mutate(ID = 26, Treatment = "moderate")
ATFeb2022_27 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/Feb_2022/27temp.csv", sep=""))%>%
mutate(ID = 27, Treatment = "ambient")
ATFeb2022_28 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/Feb_2022/28temp.csv", sep=""))%>%
mutate(ID = 27, Treatment = "ambient")
ATFeb2022_29 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/Feb_2022/29temp.csv", sep=""))%>%
mutate(ID = 29, Treatment = "severe")
ATFeb2022_30 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/Feb_2022/30temp.csv", sep=""))%>%
mutate(ID = 30, Treatment = "moderate")
ATFeb2022_31 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/Feb_2022/31temp.csv", sep=""))%>%
mutate(ID = 31, Treatment = "severe")
ATFeb2022_32 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/Feb_2022/32temp.csv", sep=""))%>%
mutate(ID = 32, Treatment = "moderate")
ATFeb2022_33 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/Feb_2022/33temp.csv", sep=""))%>%
mutate(ID = 33, Treatment = "ambient")
ATFeb2022_34 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/Feb_2022/34temp.csv", sep=""))%>%
mutate(ID = 34, Treatment = "moderate")
ATFeb2022_35 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/Feb_2022/35temp.csv", sep=""))%>%
mutate(ID = 35, Treatment = "ambient")
ATFeb2022_36 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/Feb_2022/36temp.csv", sep=""))%>%
mutate(ID = 36, Treatment = "severe")
ATJul2022_25 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/July_2022/25temp.csv", sep=""))%>%
mutate(ID = 25, Treatment = "severe")
ATJul2022_26 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/July_2022/26temp.csv", sep=""))%>%
mutate(ID = 26, Treatment = "moderate")
ATJul2022_27 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/July_2022/27temp.csv", sep=""))%>%
mutate(ID = 27, Treatment = "ambient")
ATJul2022_28 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/July_2022/28temp.csv", sep=""))%>%
mutate(ID = 27, Treatment = "ambient")
ATJul2022_29 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/July_2022/29temp.csv", sep=""))%>%
mutate(ID = 29, Treatment = "severe")
ATJul2022_30 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/July_2022/30temp.csv", sep=""))%>%
mutate(ID = 30, Treatment = "moderate")
ATJul2022_31 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/July_2022/31temp.csv", sep=""))%>%
mutate(ID = 31, Treatment = "severe")
ATJul2022_32 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/July_2022/32temp.csv", sep=""))%>%
mutate(ID = 32, Treatment = "moderate")
ATJul2022_33 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/July_2022/33temp.csv", sep=""))%>%
mutate(ID = 33, Treatment = "ambient")
ATJul2022_34 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/July_2022/34temp.csv", sep=""))%>%
mutate(ID = 34, Treatment = "moderate")
ATJul2022_35 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/July_2022/35temp.csv", sep=""))%>%
mutate(ID = 35, Treatment = "ambient")
ATJul2022_36 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/July_2022/36temp.csv", sep=""))%>%
mutate(ID = 36, Treatment = "severe")
air_temp_full <- full_join(ATMay2021_25, ATMay2021_26) %>%
full_join(., ATMay2021_27) %>%
full_join(., ATMay2021_28) %>%
full_join(., ATMay2021_29) %>%
full_join(., ATMay2021_30) %>%
full_join(., ATMay2021_31) %>%
full_join(., ATMay2021_32) %>%
full_join(., ATMay2021_33) %>%
full_join(., ATMay2021_34) %>%
full_join(., ATMay2021_35) %>%
full_join(., ATMay2021_36) %>%
full_join(., ATSep2021_25) %>%
full_join(., ATSep2021_26) %>%
full_join(., ATSep2021_27) %>%
full_join(., ATSep2021_28) %>%
full_join(., ATSep2021_29) %>%
full_join(., ATSep2021_30) %>%
full_join(., ATSep2021_31) %>%
full_join(., ATSep2021_32) %>%
full_join(., ATSep2021_33) %>%
full_join(., ATSep2021_34) %>%
full_join(., ATSep2021_35) %>%
full_join(., ATSep2021_36) %>%
full_join(., ATFeb2022_25) %>%
full_join(., ATFeb2022_26) %>%
full_join(., ATFeb2022_27) %>%
full_join(., ATFeb2022_28) %>%
full_join(., ATFeb2022_29) %>%
full_join(., ATFeb2022_30) %>%
full_join(., ATFeb2022_31) %>%
full_join(., ATFeb2022_32) %>%
full_join(., ATFeb2022_33) %>%
full_join(., ATFeb2022_34) %>%
full_join(., ATFeb2022_35) %>%
full_join(., ATFeb2022_36) %>%
full_join(., ATJul2022_25) %>%
full_join(., ATJul2022_26) %>%
full_join(., ATJul2022_27) %>%
full_join(., ATJul2022_28) %>%
full_join(., ATJul2022_29) %>%
full_join(., ATJul2022_30) %>%
full_join(., ATJul2022_31) %>%
full_join(., ATJul2022_32) %>%
full_join(., ATJul2022_33) %>%
full_join(., ATJul2022_34) %>%
full_join(., ATJul2022_35) %>%
full_join(., ATJul2022_36)
rm(ATMar2021_25, ATMar2021_26, ATMar2021_27, ATMar2021_28, ATMar2021_29,
ATMar2021_30, ATMar2021_31, ATMar2021_32, ATMar2021_33, ATMar2021_34, ATMar2021_35, ATMar2021_36)
rm(ATMay2021_25, ATMay2021_26, ATMay2021_27, ATMay2021_28, ATMay2021_29,
ATMay2021_30, ATMay2021_31, ATMay2021_32, ATMay2021_33, ATMay2021_34, ATMay2021_35, ATMay2021_36)
rm(ATSep2021_25, ATSep2021_26, ATSep2021_27, ATSep2021_28, ATSep2021_29,
ATSep2021_30, ATSep2021_31, ATSep2021_32, ATSep2021_33, ATSep2021_34, ATSep2021_35, ATSep2021_36)
rm(ATFeb2022_25, ATFeb2022_26, ATFeb2022_27, ATFeb2022_28, ATFeb2022_29,
ATFeb2022_30, ATFeb2022_31, ATFeb2022_32, ATFeb2022_33, ATFeb2022_34, ATFeb2022_35, ATFeb2022_36)
rm(ATJul2022_25, ATJul2022_26, ATJul2022_27, ATJul2022_28, ATJul2022_29,
ATJul2022_30, ATJul2022_31, ATJul2022_32, ATJul2022_33, ATJul2022_34, ATJul2022_35, ATJul2022_36)
| /data_compiling/compiling_air_temp.R | no_license | LinaAoyama/EO-perennial-grasses | R | false | false | 11,109 | r | #set data pathway
datpath <- "C:/Users/Lina/Dropbox/Academics/Projects/Perennial Grasses Eastern Oregon/Data"
library(tidyverse)
#read in air temp data
# ATMar2021_25 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/March_2021/25temp.csv", sep=""))%>%
# mutate(ID = 25, Treatment = "severe")
# ATMar2021_26 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/March_2021/26temp.csv", sep=""))%>%
# mutate(ID = 26, Treatment = "moderate")
# ATMar2021_27 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/March_2021/27temp.csv", sep=""))%>%
# mutate(ID = 27, Treatment = "ambient")
# ATMar2021_28 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/March_2021/28temp.csv", sep=""))%>%
# mutate(ID = 27, Treatment = "ambient")
# ATMar2021_29 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/March_2021/29temp.csv", sep=""))%>%
# mutate(ID = 29, Treatment = "severe")
# ATMar2021_30 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/March_2021/30temp.csv", sep=""))%>%
# mutate(ID = 30, Treatment = "moderate")
# ATMar2021_31 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/March_2021/31temp.csv", sep=""))%>%
# mutate(ID = 31, Treatment = "severe")
# ATMar2021_32 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/March_2021/32temp.csv", sep=""))%>%
# mutate(ID = 32, Treatment = "moderate")
# ATMar2021_33 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/March_2021/33temp.csv", sep=""))%>%
# mutate(ID = 33, Treatment = "ambient")
# ATMar2021_34 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/March_2021/34temp.csv", sep=""))%>%
# mutate(ID = 34, Treatment = "moderate")
# ATMar2021_35 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/March_2021/35temp.csv", sep=""))%>%
# mutate(ID = 35, Treatment = "ambient")
# ATMar2021_36 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/March_2021/36temp.csv", sep=""))%>%
# mutate(ID = 36, Treatment = "severe")
ATMay2021_25 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/May_2021/25temp.csv", sep=""))%>%
mutate(ID = 25, Treatment = "severe")
ATMay2021_26 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/May_2021/26temp.csv", sep=""))%>%
mutate(ID = 26, Treatment = "moderate")
ATMay2021_27 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/May_2021/27temp.csv", sep=""))%>%
mutate(ID = 27, Treatment = "ambient")
ATMay2021_28 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/May_2021/28temp.csv", sep=""))%>%
mutate(ID = 27, Treatment = "ambient")
ATMay2021_29 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/May_2021/29temp.csv", sep=""))%>%
mutate(ID = 29, Treatment = "severe")
ATMay2021_30 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/May_2021/30temp.csv", sep=""))%>%
mutate(ID = 30, Treatment = "moderate")
ATMay2021_31 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/May_2021/31temp.csv", sep=""))%>%
mutate(ID = 31, Treatment = "severe")
ATMay2021_32 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/May_2021/32temp.csv", sep=""))%>%
mutate(ID = 32, Treatment = "moderate")
ATMay2021_33 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/May_2021/33temp.csv", sep=""))%>%
mutate(ID = 33, Treatment = "ambient")
ATMay2021_34 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/May_2021/34temp.csv", sep=""))%>%
mutate(ID = 34, Treatment = "moderate")
ATMay2021_35 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/May_2021/35temp.csv", sep=""))%>%
mutate(ID = 35, Treatment = "ambient")
ATMay2021_36 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/May_2021/36temp.csv", sep=""))%>%
mutate(ID = 36, Treatment = "severe")
ATSep2021_25 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/Sept_2021/25temp.csv", sep=""))%>%
mutate(ID = 25, Treatment = "severe")
ATSep2021_26 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/Sept_2021/26temp.csv", sep=""))%>%
mutate(ID = 26, Treatment = "moderate")
ATSep2021_27 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/Sept_2021/27temp.csv", sep=""))%>%
mutate(ID = 27, Treatment = "ambient")
ATSep2021_28 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/Sept_2021/28temp.csv", sep=""))%>%
mutate(ID = 27, Treatment = "ambient")
ATSep2021_29 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/Sept_2021/29temp.csv", sep=""))%>%
mutate(ID = 29, Treatment = "severe")
ATSep2021_30 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/Sept_2021/30temp.csv", sep=""))%>%
mutate(ID = 30, Treatment = "moderate")
ATSep2021_31 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/Sept_2021/31temp.csv", sep=""))%>%
mutate(ID = 31, Treatment = "severe")
ATSep2021_32 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/Sept_2021/32temp.csv", sep=""))%>%
mutate(ID = 32, Treatment = "moderate")
ATSep2021_33 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/Sept_2021/33temp.csv", sep=""))%>%
mutate(ID = 33, Treatment = "ambient")
ATSep2021_34 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/Sept_2021/34temp.csv", sep=""))%>%
mutate(ID = 34, Treatment = "moderate")
ATSep2021_35 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/Sept_2021/35temp.csv", sep=""))%>%
mutate(ID = 35, Treatment = "ambient")
ATSep2021_36 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/Sept_2021/36temp.csv", sep=""))%>%
mutate(ID = 36, Treatment = "severe")
ATFeb2022_25 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/Feb_2022/25temp.csv", sep=""))%>%
mutate(ID = 25, Treatment = "severe")
ATFeb2022_26 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/Feb_2022/26temp.csv", sep=""))%>%
mutate(ID = 26, Treatment = "moderate")
ATFeb2022_27 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/Feb_2022/27temp.csv", sep=""))%>%
mutate(ID = 27, Treatment = "ambient")
ATFeb2022_28 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/Feb_2022/28temp.csv", sep=""))%>%
mutate(ID = 27, Treatment = "ambient")
ATFeb2022_29 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/Feb_2022/29temp.csv", sep=""))%>%
mutate(ID = 29, Treatment = "severe")
ATFeb2022_30 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/Feb_2022/30temp.csv", sep=""))%>%
mutate(ID = 30, Treatment = "moderate")
ATFeb2022_31 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/Feb_2022/31temp.csv", sep=""))%>%
mutate(ID = 31, Treatment = "severe")
ATFeb2022_32 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/Feb_2022/32temp.csv", sep=""))%>%
mutate(ID = 32, Treatment = "moderate")
ATFeb2022_33 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/Feb_2022/33temp.csv", sep=""))%>%
mutate(ID = 33, Treatment = "ambient")
ATFeb2022_34 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/Feb_2022/34temp.csv", sep=""))%>%
mutate(ID = 34, Treatment = "moderate")
ATFeb2022_35 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/Feb_2022/35temp.csv", sep=""))%>%
mutate(ID = 35, Treatment = "ambient")
ATFeb2022_36 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/Feb_2022/36temp.csv", sep=""))%>%
mutate(ID = 36, Treatment = "severe")
ATJul2022_25 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/July_2022/25temp.csv", sep=""))%>%
mutate(ID = 25, Treatment = "severe")
ATJul2022_26 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/July_2022/26temp.csv", sep=""))%>%
mutate(ID = 26, Treatment = "moderate")
ATJul2022_27 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/July_2022/27temp.csv", sep=""))%>%
mutate(ID = 27, Treatment = "ambient")
ATJul2022_28 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/July_2022/28temp.csv", sep=""))%>%
mutate(ID = 27, Treatment = "ambient")
ATJul2022_29 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/July_2022/29temp.csv", sep=""))%>%
mutate(ID = 29, Treatment = "severe")
ATJul2022_30 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/July_2022/30temp.csv", sep=""))%>%
mutate(ID = 30, Treatment = "moderate")
ATJul2022_31 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/July_2022/31temp.csv", sep=""))%>%
mutate(ID = 31, Treatment = "severe")
ATJul2022_32 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/July_2022/32temp.csv", sep=""))%>%
mutate(ID = 32, Treatment = "moderate")
ATJul2022_33 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/July_2022/33temp.csv", sep=""))%>%
mutate(ID = 33, Treatment = "ambient")
ATJul2022_34 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/July_2022/34temp.csv", sep=""))%>%
mutate(ID = 34, Treatment = "moderate")
ATJul2022_35 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/July_2022/35temp.csv", sep=""))%>%
mutate(ID = 35, Treatment = "ambient")
ATJul2022_36 <- read_csv(paste(datpath, "/Soil_temp/cleaned_data/July_2022/36temp.csv", sep=""))%>%
mutate(ID = 36, Treatment = "severe")
air_temp_full <- full_join(ATMay2021_25, ATMay2021_26) %>%
full_join(., ATMay2021_27) %>%
full_join(., ATMay2021_28) %>%
full_join(., ATMay2021_29) %>%
full_join(., ATMay2021_30) %>%
full_join(., ATMay2021_31) %>%
full_join(., ATMay2021_32) %>%
full_join(., ATMay2021_33) %>%
full_join(., ATMay2021_34) %>%
full_join(., ATMay2021_35) %>%
full_join(., ATMay2021_36) %>%
full_join(., ATSep2021_25) %>%
full_join(., ATSep2021_26) %>%
full_join(., ATSep2021_27) %>%
full_join(., ATSep2021_28) %>%
full_join(., ATSep2021_29) %>%
full_join(., ATSep2021_30) %>%
full_join(., ATSep2021_31) %>%
full_join(., ATSep2021_32) %>%
full_join(., ATSep2021_33) %>%
full_join(., ATSep2021_34) %>%
full_join(., ATSep2021_35) %>%
full_join(., ATSep2021_36) %>%
full_join(., ATFeb2022_25) %>%
full_join(., ATFeb2022_26) %>%
full_join(., ATFeb2022_27) %>%
full_join(., ATFeb2022_28) %>%
full_join(., ATFeb2022_29) %>%
full_join(., ATFeb2022_30) %>%
full_join(., ATFeb2022_31) %>%
full_join(., ATFeb2022_32) %>%
full_join(., ATFeb2022_33) %>%
full_join(., ATFeb2022_34) %>%
full_join(., ATFeb2022_35) %>%
full_join(., ATFeb2022_36) %>%
full_join(., ATJul2022_25) %>%
full_join(., ATJul2022_26) %>%
full_join(., ATJul2022_27) %>%
full_join(., ATJul2022_28) %>%
full_join(., ATJul2022_29) %>%
full_join(., ATJul2022_30) %>%
full_join(., ATJul2022_31) %>%
full_join(., ATJul2022_32) %>%
full_join(., ATJul2022_33) %>%
full_join(., ATJul2022_34) %>%
full_join(., ATJul2022_35) %>%
full_join(., ATJul2022_36)
rm(ATMar2021_25, ATMar2021_26, ATMar2021_27, ATMar2021_28, ATMar2021_29,
ATMar2021_30, ATMar2021_31, ATMar2021_32, ATMar2021_33, ATMar2021_34, ATMar2021_35, ATMar2021_36)
rm(ATMay2021_25, ATMay2021_26, ATMay2021_27, ATMay2021_28, ATMay2021_29,
ATMay2021_30, ATMay2021_31, ATMay2021_32, ATMay2021_33, ATMay2021_34, ATMay2021_35, ATMay2021_36)
rm(ATSep2021_25, ATSep2021_26, ATSep2021_27, ATSep2021_28, ATSep2021_29,
ATSep2021_30, ATSep2021_31, ATSep2021_32, ATSep2021_33, ATSep2021_34, ATSep2021_35, ATSep2021_36)
rm(ATFeb2022_25, ATFeb2022_26, ATFeb2022_27, ATFeb2022_28, ATFeb2022_29,
ATFeb2022_30, ATFeb2022_31, ATFeb2022_32, ATFeb2022_33, ATFeb2022_34, ATFeb2022_35, ATFeb2022_36)
rm(ATJul2022_25, ATJul2022_26, ATJul2022_27, ATJul2022_28, ATJul2022_29,
ATJul2022_30, ATJul2022_31, ATJul2022_32, ATJul2022_33, ATJul2022_34, ATJul2022_35, ATJul2022_36)
|
#-------------------------------------------------
#-------------------------------------------------
#Panel Models - KFW Grid
#Testing in Panel the impact of being treated with demarcation
#On the Max Level of NDVI, measured as the yearly max NDVI value (LTDR)
#-------------------------------------------------
#-------------------------------------------------
library(devtools)
devtools::install_github("itpir/SCI@master")
library(SCI)
library(stargazer)
library(lmtest)
library(multiwayvcov)
loadLibs()
#-------------------------------------------------
#-------------------------------------------------
#Load in Processed Data - produced from script KFW_dataMerge.r
#-------------------------------------------------
#-------------------------------------------------
shpfile = "/Users/rbtrichler/Documents/AidData/KFW Brazil Eval/KFW_Points/ProcessedData/kfw_points_processed.shp"
dta_Shp = readShapePoints(shpfile)
#-----
# ** Build panel dataset - skip to reading it in if don't need to make any changes **
#-------------------------------------------------
#-------------------------------------------------
#Convert from a wide-form dataset for the Cross-sectional
#to a long-form dataset for the panel model.
#-------------------------------------------------
#-------------------------------------------------
#drop out unsed time-varying vars
reshape <- dta_Shp
reshape<-reshape[,order(names(reshape))]
reshape1<-reshape[,-grep("(_198)",names(reshape))]
reshape2<-reshape1[,-grep("(_199)",names(reshape1))]
reshape3<-reshape2[,-grep("(ntl)",names(reshape2))]
#prep to convert to long form panel dataset
kfw_wide<-reshape3
kfw_wide<-kfw_wide[,order(names(kfw_wide))]
#----------------------------------
#Convert to long form panel dataset
#----------------------------------
MeanT<-grep("MeanT_",names(kfw_wide))
MeanP<-grep("MeanP_",names(kfw_wide))
MinT<-grep("MinT_",names(kfw_wide))
MaxT<-grep("MaxT_",names(kfw_wide))
MinP<-grep("MinP_",names(kfw_wide))
MaxP<-grep("MaxP_",names(kfw_wide))
MaxL<-grep("MaxL_",names(kfw_wide))
Pop<-grep("Pop_",names(kfw_wide))
all_reshape <- c(MeanT,MeanP,MaxT,MaxP,MinP,MinT,MaxL,Pop)
psm_Long <- reshape(kfw_wide@data, varying=all_reshape, direction="long",idvar="ad_id",sep="_",timevar="Year")
## Add treatment and other vars
#Create years to demarcation
psm_Long$yrtodem <- NA
psm_Long$yrtodem=psm_Long$Year - psm_Long$demend_y
#Create demarcation treatment variable, using demend_y
#0 in years prior to demarcation, turns to 1 in year of demarcation
psmtest3 <- psm_Long
psmtest3$trtdem <- 0
psmtest3$trtdem[which(psmtest3$Year<psmtest3$demend_y)]<-0
psmtest3$trtdem[which(psmtest3$Year>=psmtest3$demend_y)]<-1
psm_Long <- psmtest3
#create categorical variable for distance to boundary
psm_Long$HubDistCat<-0
psm_Long$HubDistCat[psm_Long$HubDist>5]<-1
#create arc of deforestation variable
psm_Long$arc<-0
psm_Long$arc[which(psm_Long$UF=="PA" | psm_Long$UF=="TO")] <- 1
#write.csv(psm_Long,file="/Users/rbtrichler/Documents/AidData/KFW Brazil Eval/KFW_Points/ProcessedData/psm_Long.csv")
#-----
## *READ IN PANEL DATASET* (use unless you need to rebuild panel)
psm_Long= read.csv("/Users/rbtrichler/Documents/AidData/KFW Brazil Eval/KFW_Points/ProcessedData/psm_Long.csv")
#-------------------------------------------------
#-------------------------------------------------
#Run Panel Models
#-------------------------------------------------
#-------------------------------------------------
pModelMax_A = lm(MaxL ~ trtdem + factor(reu_id),data=psm_Long, weights=terrai_are)
summary(pModelMax_A)
clusterA <- cluster.vcov(pModelMax_A,cbind(psm_Long$reu_id,psm_Long$Year),force_posdef=TRUE)
CMREG_A <- coeftest(pModelMax_A, clusterA)
#print(CMREG_A)
pModelMax_B = lm(MaxL ~ trtdem+ MeanT + MeanP + MaxT + MaxP + MinT + MinP + Pop + factor(reu_id),
data=psm_Long, weights=terrai_are)
summary(pModelMax_B)
clusterB <- cluster.vcov(pModelMax_B,cbind(psm_Long$reu_id,psm_Long$Year),force_posdef=TRUE)
CMREG_B <- coeftest(pModelMax_B, clusterB)
#print(CMREG_B)
pModelMax_C = lm(MaxL ~ trtdem+ MeanT + MeanP + MaxT + MaxP + MinT + MinP + Pop+Year + factor(reu_id),
data=psm_Long, weights=terrai_are)
summary(pModelMax_C)
clusterC <- cluster.vcov(pModelMax_C,cbind(psm_Long$reu_id,psm_Long$Year),force_posdef=TRUE)
CMREG_C <- coeftest(pModelMax_C, clusterC)
#print(CMREG_C)
pModelMax_D = lm(MaxL ~ trtdem+ MeanT + MeanP + MaxT + MaxP + MinT + MinP + Pop+ factor(Year) + factor(reu_id),
data=psm_Long, weights=terrai_are)
summary(pModelMax_D)
clusterD <- cluster.vcov(pModelMax_D,cbind(psm_Long$reu_id,psm_Long$Year),force_posdef=TRUE)
CMREG_D <- coeftest(pModelMax_D, clusterD)
#print(CMREG_D)
pModelMax_E = lm(MaxL ~ trtdem+ MeanT + MeanP + MaxT + MaxP + MinT + MinP + Pop+
trtdem*HubDist + factor(reu_id)+factor(Year),
data=psm_Long, weights=terrai_are)
summary(pModelMax_E)
clusterE <- cluster.vcov(pModelMax_E,cbind(psm_Long$reu_id,psm_Long$Year),force_posdef=TRUE)
CMREG_E <- coeftest(pModelMax_E, clusterE)
#print(CMREG_E)
pModelMax_F = lm(MaxL ~ trtdem+ MeanT + MeanP + MaxT + MaxP + MinT + MinP + Pop +
trtdem*HubDistCat + factor(reu_id)+ factor(Year),
data=psm_Long, weights=terrai_are)
summary(pModelMax_F)
clusterF <- cluster.vcov(pModelMax_F,cbind(psm_Long$reu_id,psm_Long$Year),force_posdef=TRUE)
CMREG_F <- coeftest(pModelMax_F, clusterF)
#print(CMREG_F)
#-----------------------------
#Look at Arc of Deforestation
table(psm_Long$reu_id, psm_Long$UF)
psm_Long_arc<- psm_Long[psm_Long$UF=="PA" | psm_Long$UF=="TO",]
plot(psm_Long_arc$MaxL_)
hist(psm_Long_arc$MaxL_)
plot(psm_Long_TO$Year, psm_Long_TO$MaxL_)
psm_Long_TO <- psm_Long[psm_Long$UF=="TO",]
psm_Long_TO <- psm_Long_TO[psm_Long_TO$HubDistCat==0,]
ggplot(data = psm_Long_TO, aes(x=Year, y=MaxL_,group=reu_id, colour=factor(UF))) +
#geom_point(size=.5) +
geom_line(size=.5, linetype=2) +
stat_summary(fun.y=mean,aes(x=Year, y=MaxL_,group=reu_id,colour=factor(UF)),data=psm_Long_TO,geom='line',size=1.5)+
theme(axis.text.x=element_text(angle=90,hjust=1))
#------------------------------------------------------------------------
#------------------------------------------------------------------------
##set stargazer options
stargazer(pModelMax_A_fit $cmreg,pModelMax_B_fit $cmreg,pModelMax_C_fit $cmreg,pModelMax_D_fit$cmreg,
pModelMax_E_fit$cmreg,
type="html",align=TRUE,keep=c("TrtMnt","MeanT_","MeanP_","Pop_","MaxT_","MaxP_","MinT_","MinP_","Year"),
#covariate.labels=c("TrtMnt_demend_y","MeanT","MeanP","Pop","MaxT","MaxP","MinT","MinP","Year"),
omit.stat=c("f","ser"),
title="Regression Results",
dep.var.labels=c("Max NDVI")
)
stargazer(CMREG_A,CMREG_B,CMREG_C,CMREG_D,CMREG_E,CMREG_F, CMREG_G, CMREG_H,
type="html",align=TRUE,keep=c("TrtMnt","Pop_","MeanT_","MeanP_","MaxT_","MaxP_","MinT_","MinP_","Year"),
#covariate.labels=c("Treatment (Demarcation)","Population","Mean Temp","Mean Precip",
# "Max Temp","Max Precip","Min Temp","Min Precip","Year","Treatment*Boundary Distance",
# "Treatment*Boundary Distance(Cat)"),
add.lines=list(c("Observations","148,230","148,230","148,230","148,230","148,230","148,230","148,230","148,230"),
c("Year Fixed Effects?","No","No","No","No","No","Yes","Yes","Yes")),
omit.stat=c("f","ser"),
title="Regression Results",
dep.var.labels=c("Max NDVI")
)
#Weighted Summary Stats
psm_Long$commwt <- 1/psm_Long$terrai_are
stat_vars<-c("MaxL","Slope","Riv_Dist","Road_dist","Elevation","terrai_are","Pop",
"MeanT","MeanP","MinT","MinP","MaxT","MaxP","HubDist","commwt")
stats<-psm_Long[,(names(psm_Long) %in% stat_vars)]
#Used for JEEM submission
#Import html into Excel and modify within Excel (for JEEM 2nd resubmission)
stargazer(CMREG_A,CMREG_B,CMREG_C,CMREG_D,CMREG_E,CMREG_F,
type="html",align=TRUE,
omit=c("factor","Constant"),
covariate.labels=c("Treatment (Demarcation)","Mean Temp","Mean Precip",
"Max Temp","Max Precip","Min Temp","Min Precip","Population","Year",
"Boundary Distance","Bounday Distance (Cat)",
"Treatment (Dem) * Boundary Distance",
"Treatment (Dem) * Boundary Distance (Cat)" ),
add.lines=list(c("Observations","148,230","148,230","148,230","148,230","148,230","148,230"),
c("Community Fixed Effects?","Yes","Yes","Yes","Yes","Yes","Yes"),
c("Year Fixed Effects?","No","No","No","Yes","Yes","Yes")),
omit.stat=c("f","ser"),
title="Regression Results",
star.cutoffs = c(0.05, 0.01, 0.001),
dep.var.labels=c("Max NDVI")
)
##output tables directly to word doc
library(R2HTML)
library(stargazer)
red = runif(100,0.0,1.0)
green = runif(100,0.0,1.0)
blue = runif(100,0.0,1.0)
tDF <- data.frame(red, green, blue)
testModel <- lm(red~blue + green, data=tDF)
testModel2 <- lm(blue ~ green + red, data=tDF)
table_1<-stargazer(CMREG_A,CMREG_B,CMREG_C,CMREG_D,CMREG_E,CMREG_F,
type="html",align=TRUE,keep=c("TrtMnt","Pop_","MeanT_","MeanP_","MaxT_","MaxP_","MinT_","MinP_","Year"),
covariate.labels=c("Treatment (Demarcation)","Population","Mean Temp","Mean Precip",
"Max Temp","Max Precip","Min Temp","Min Precip","Year"),
add.lines=list(c("Observations","148,230","148,230","148,230","148,230","148,230","148,230"),
c("Community Fixed Effects?","Yes","Yes","Yes","Yes","Yes","Yes"),
c("Year Fixed Effects?","No","No","No","Yes","Yes","Yes")),
omit.stat=c("f","ser"),
title="Regression Results",
dep.var.labels=c("Max NDVI")
)
#Put the directory and name you want. Make sure the name ends
#in ".doc".
wordFile=file.path("/Users/rbtrichler/Documents/AidData/KFW Brazil Eval/KFW_Points","wordFile.docx")
#Don't ever change this.
cat("<html xmlns:o='urn:schemas-microsoft-com:office:office' xmlns:w='urn:schemas-microsoft-com:office:word' xmlns='http://www.w3.org/TR/REC-html40'>
<head><title>Microsoft Office HTML Example</title></head>
<body>", file=wordFile)
#Add any tables you want from stargazer.
cat(table_1, append=TRUE, file=wordFile)
#You can also add normal text and line splits if you want, i.e.:
text <- "The below table provides information on the BGR model, which has NULL results!:"
cat(text, append=TRUE, file=testdoc)
#And, I can add the other table:
#cat(table_2, append=TRUE, file=testdoc)
cat("\n</body></html>", append=TRUE, file=wordFile)
#----------------
# Scratch/Workspace/Archive
#----------------
pModelMax_F = lm(MaxL_ ~ TrtMnt_demend_y+ MeanT_ + MeanP_ + Pop_ + MaxT_ + MaxP_ + MinT_ + MinP_ + Year +
TrtMnt_demend_y*arc*HubDist + factor(reu_id),
data=psm_Long, weights=terrai_are)
summary(pModelMax_F)
clusterF <- cluster.vcov(pModelMax_F,cbind(psm_Long$reu_id,psm_Long$Year),force_posdef=TRUE)
CMREG_F <- coeftest(pModelMax_F, clusterF)
print(CMREG_F)
dta_Shp@data$MaxL_diff <- dta_Shp@data$MaxL_2001-dta_Shp@data$MaxL_2000
summary(dta_Shp@data$MaxL_diff)
sd(dta_Shp@data$MaxL_diff)
summary(dta_Shp@data$MaxL_2000)
sd(dta_Shp@data$MaxL_2000)
## !incorrect code for building panel dataset (but used for first two JEEM submissions)!
# varList = c("MaxL_")
# psm_Long <- BuildTimeSeries(dta=dta_Shp,idField="ad_id",varList_pre=varList,2000,2014,colYears=c("demend_y","apprend_y","regend_y"),
# interpYears=c("Slope","Road_dist","Riv_Dist","UF","Elevation","urbtravtim","terrai_are","Pop_","MeanT_","MeanP_","MaxT_","MaxP_","MinP_","MinT_","ntl_","HubDist","HubName","reu_id"))
# psm_Long$Year <- as.numeric(psm_Long$Year)
#
# write.csv(psm_Long,file="/Users/rbtrichler/Documents/AidData/KFW Brazil Eval/KFW_Points/ProcessedData/psm_Long.csv")
# psm_Long= read.csv("/Users/rbtrichler/Documents/AidData/KFW Brazil Eval/KFW_Points/ProcessedData/psm_Long.csv")
| /KFWPoints_panelResults_Max_Weighted.R | no_license | rtrichler/KFWPoints | R | false | false | 12,356 | r | #-------------------------------------------------
#-------------------------------------------------
#Panel Models - KFW Grid
#Testing in Panel the impact of being treated with demarcation
#On the Max Level of NDVI, measured as the yearly max NDVI value (LTDR)
#-------------------------------------------------
#-------------------------------------------------
library(devtools)
devtools::install_github("itpir/SCI@master")
library(SCI)
library(stargazer)
library(lmtest)
library(multiwayvcov)
loadLibs()
#-------------------------------------------------
#-------------------------------------------------
#Load in Processed Data - produced from script KFW_dataMerge.r
#-------------------------------------------------
#-------------------------------------------------
shpfile = "/Users/rbtrichler/Documents/AidData/KFW Brazil Eval/KFW_Points/ProcessedData/kfw_points_processed.shp"
dta_Shp = readShapePoints(shpfile)
#-----
# ** Build panel dataset - skip to reading it in if don't need to make any changes **
#-------------------------------------------------
#-------------------------------------------------
#Convert from a wide-form dataset for the Cross-sectional
#to a long-form dataset for the panel model.
#-------------------------------------------------
#-------------------------------------------------
#drop out unsed time-varying vars
reshape <- dta_Shp
reshape<-reshape[,order(names(reshape))]
reshape1<-reshape[,-grep("(_198)",names(reshape))]
reshape2<-reshape1[,-grep("(_199)",names(reshape1))]
reshape3<-reshape2[,-grep("(ntl)",names(reshape2))]
#prep to convert to long form panel dataset
kfw_wide<-reshape3
kfw_wide<-kfw_wide[,order(names(kfw_wide))]
#----------------------------------
#Convert to long form panel dataset
#----------------------------------
MeanT<-grep("MeanT_",names(kfw_wide))
MeanP<-grep("MeanP_",names(kfw_wide))
MinT<-grep("MinT_",names(kfw_wide))
MaxT<-grep("MaxT_",names(kfw_wide))
MinP<-grep("MinP_",names(kfw_wide))
MaxP<-grep("MaxP_",names(kfw_wide))
MaxL<-grep("MaxL_",names(kfw_wide))
Pop<-grep("Pop_",names(kfw_wide))
all_reshape <- c(MeanT,MeanP,MaxT,MaxP,MinP,MinT,MaxL,Pop)
psm_Long <- reshape(kfw_wide@data, varying=all_reshape, direction="long",idvar="ad_id",sep="_",timevar="Year")
## Add treatment and other vars
#Create years to demarcation
psm_Long$yrtodem <- NA
psm_Long$yrtodem=psm_Long$Year - psm_Long$demend_y
#Create demarcation treatment variable, using demend_y
#0 in years prior to demarcation, turns to 1 in year of demarcation
psmtest3 <- psm_Long
psmtest3$trtdem <- 0
psmtest3$trtdem[which(psmtest3$Year<psmtest3$demend_y)]<-0
psmtest3$trtdem[which(psmtest3$Year>=psmtest3$demend_y)]<-1
psm_Long <- psmtest3
#create categorical variable for distance to boundary
psm_Long$HubDistCat<-0
psm_Long$HubDistCat[psm_Long$HubDist>5]<-1
#create arc of deforestation variable
psm_Long$arc<-0
psm_Long$arc[which(psm_Long$UF=="PA" | psm_Long$UF=="TO")] <- 1
#write.csv(psm_Long,file="/Users/rbtrichler/Documents/AidData/KFW Brazil Eval/KFW_Points/ProcessedData/psm_Long.csv")
#-----
## *READ IN PANEL DATASET* (use unless you need to rebuild panel)
psm_Long= read.csv("/Users/rbtrichler/Documents/AidData/KFW Brazil Eval/KFW_Points/ProcessedData/psm_Long.csv")
#-------------------------------------------------
#-------------------------------------------------
#Run Panel Models
#-------------------------------------------------
#-------------------------------------------------
pModelMax_A = lm(MaxL ~ trtdem + factor(reu_id),data=psm_Long, weights=terrai_are)
summary(pModelMax_A)
clusterA <- cluster.vcov(pModelMax_A,cbind(psm_Long$reu_id,psm_Long$Year),force_posdef=TRUE)
CMREG_A <- coeftest(pModelMax_A, clusterA)
#print(CMREG_A)
pModelMax_B = lm(MaxL ~ trtdem+ MeanT + MeanP + MaxT + MaxP + MinT + MinP + Pop + factor(reu_id),
data=psm_Long, weights=terrai_are)
summary(pModelMax_B)
clusterB <- cluster.vcov(pModelMax_B,cbind(psm_Long$reu_id,psm_Long$Year),force_posdef=TRUE)
CMREG_B <- coeftest(pModelMax_B, clusterB)
#print(CMREG_B)
pModelMax_C = lm(MaxL ~ trtdem+ MeanT + MeanP + MaxT + MaxP + MinT + MinP + Pop+Year + factor(reu_id),
data=psm_Long, weights=terrai_are)
summary(pModelMax_C)
clusterC <- cluster.vcov(pModelMax_C,cbind(psm_Long$reu_id,psm_Long$Year),force_posdef=TRUE)
CMREG_C <- coeftest(pModelMax_C, clusterC)
#print(CMREG_C)
pModelMax_D = lm(MaxL ~ trtdem+ MeanT + MeanP + MaxT + MaxP + MinT + MinP + Pop+ factor(Year) + factor(reu_id),
data=psm_Long, weights=terrai_are)
summary(pModelMax_D)
clusterD <- cluster.vcov(pModelMax_D,cbind(psm_Long$reu_id,psm_Long$Year),force_posdef=TRUE)
CMREG_D <- coeftest(pModelMax_D, clusterD)
#print(CMREG_D)
pModelMax_E = lm(MaxL ~ trtdem+ MeanT + MeanP + MaxT + MaxP + MinT + MinP + Pop+
trtdem*HubDist + factor(reu_id)+factor(Year),
data=psm_Long, weights=terrai_are)
summary(pModelMax_E)
clusterE <- cluster.vcov(pModelMax_E,cbind(psm_Long$reu_id,psm_Long$Year),force_posdef=TRUE)
CMREG_E <- coeftest(pModelMax_E, clusterE)
#print(CMREG_E)
pModelMax_F = lm(MaxL ~ trtdem+ MeanT + MeanP + MaxT + MaxP + MinT + MinP + Pop +
trtdem*HubDistCat + factor(reu_id)+ factor(Year),
data=psm_Long, weights=terrai_are)
summary(pModelMax_F)
clusterF <- cluster.vcov(pModelMax_F,cbind(psm_Long$reu_id,psm_Long$Year),force_posdef=TRUE)
CMREG_F <- coeftest(pModelMax_F, clusterF)
#print(CMREG_F)
#-----------------------------
#Look at Arc of Deforestation
table(psm_Long$reu_id, psm_Long$UF)
psm_Long_arc<- psm_Long[psm_Long$UF=="PA" | psm_Long$UF=="TO",]
plot(psm_Long_arc$MaxL_)
hist(psm_Long_arc$MaxL_)
plot(psm_Long_TO$Year, psm_Long_TO$MaxL_)
psm_Long_TO <- psm_Long[psm_Long$UF=="TO",]
psm_Long_TO <- psm_Long_TO[psm_Long_TO$HubDistCat==0,]
ggplot(data = psm_Long_TO, aes(x=Year, y=MaxL_,group=reu_id, colour=factor(UF))) +
#geom_point(size=.5) +
geom_line(size=.5, linetype=2) +
stat_summary(fun.y=mean,aes(x=Year, y=MaxL_,group=reu_id,colour=factor(UF)),data=psm_Long_TO,geom='line',size=1.5)+
theme(axis.text.x=element_text(angle=90,hjust=1))
#------------------------------------------------------------------------
#------------------------------------------------------------------------
##set stargazer options
stargazer(pModelMax_A_fit $cmreg,pModelMax_B_fit $cmreg,pModelMax_C_fit $cmreg,pModelMax_D_fit$cmreg,
pModelMax_E_fit$cmreg,
type="html",align=TRUE,keep=c("TrtMnt","MeanT_","MeanP_","Pop_","MaxT_","MaxP_","MinT_","MinP_","Year"),
#covariate.labels=c("TrtMnt_demend_y","MeanT","MeanP","Pop","MaxT","MaxP","MinT","MinP","Year"),
omit.stat=c("f","ser"),
title="Regression Results",
dep.var.labels=c("Max NDVI")
)
stargazer(CMREG_A,CMREG_B,CMREG_C,CMREG_D,CMREG_E,CMREG_F, CMREG_G, CMREG_H,
type="html",align=TRUE,keep=c("TrtMnt","Pop_","MeanT_","MeanP_","MaxT_","MaxP_","MinT_","MinP_","Year"),
#covariate.labels=c("Treatment (Demarcation)","Population","Mean Temp","Mean Precip",
# "Max Temp","Max Precip","Min Temp","Min Precip","Year","Treatment*Boundary Distance",
# "Treatment*Boundary Distance(Cat)"),
add.lines=list(c("Observations","148,230","148,230","148,230","148,230","148,230","148,230","148,230","148,230"),
c("Year Fixed Effects?","No","No","No","No","No","Yes","Yes","Yes")),
omit.stat=c("f","ser"),
title="Regression Results",
dep.var.labels=c("Max NDVI")
)
#Weighted Summary Stats
psm_Long$commwt <- 1/psm_Long$terrai_are
stat_vars<-c("MaxL","Slope","Riv_Dist","Road_dist","Elevation","terrai_are","Pop",
"MeanT","MeanP","MinT","MinP","MaxT","MaxP","HubDist","commwt")
stats<-psm_Long[,(names(psm_Long) %in% stat_vars)]
#Used for JEEM submission
#Import html into Excel and modify within Excel (for JEEM 2nd resubmission)
stargazer(CMREG_A,CMREG_B,CMREG_C,CMREG_D,CMREG_E,CMREG_F,
type="html",align=TRUE,
omit=c("factor","Constant"),
covariate.labels=c("Treatment (Demarcation)","Mean Temp","Mean Precip",
"Max Temp","Max Precip","Min Temp","Min Precip","Population","Year",
"Boundary Distance","Bounday Distance (Cat)",
"Treatment (Dem) * Boundary Distance",
"Treatment (Dem) * Boundary Distance (Cat)" ),
add.lines=list(c("Observations","148,230","148,230","148,230","148,230","148,230","148,230"),
c("Community Fixed Effects?","Yes","Yes","Yes","Yes","Yes","Yes"),
c("Year Fixed Effects?","No","No","No","Yes","Yes","Yes")),
omit.stat=c("f","ser"),
title="Regression Results",
star.cutoffs = c(0.05, 0.01, 0.001),
dep.var.labels=c("Max NDVI")
)
##output tables directly to word doc
library(R2HTML)
library(stargazer)
red = runif(100,0.0,1.0)
green = runif(100,0.0,1.0)
blue = runif(100,0.0,1.0)
tDF <- data.frame(red, green, blue)
testModel <- lm(red~blue + green, data=tDF)
testModel2 <- lm(blue ~ green + red, data=tDF)
table_1<-stargazer(CMREG_A,CMREG_B,CMREG_C,CMREG_D,CMREG_E,CMREG_F,
type="html",align=TRUE,keep=c("TrtMnt","Pop_","MeanT_","MeanP_","MaxT_","MaxP_","MinT_","MinP_","Year"),
covariate.labels=c("Treatment (Demarcation)","Population","Mean Temp","Mean Precip",
"Max Temp","Max Precip","Min Temp","Min Precip","Year"),
add.lines=list(c("Observations","148,230","148,230","148,230","148,230","148,230","148,230"),
c("Community Fixed Effects?","Yes","Yes","Yes","Yes","Yes","Yes"),
c("Year Fixed Effects?","No","No","No","Yes","Yes","Yes")),
omit.stat=c("f","ser"),
title="Regression Results",
dep.var.labels=c("Max NDVI")
)
#Put the directory and name you want. Make sure the name ends
#in ".doc".
wordFile=file.path("/Users/rbtrichler/Documents/AidData/KFW Brazil Eval/KFW_Points","wordFile.docx")
#Don't ever change this.
cat("<html xmlns:o='urn:schemas-microsoft-com:office:office' xmlns:w='urn:schemas-microsoft-com:office:word' xmlns='http://www.w3.org/TR/REC-html40'>
<head><title>Microsoft Office HTML Example</title></head>
<body>", file=wordFile)
#Add any tables you want from stargazer.
cat(table_1, append=TRUE, file=wordFile)
#You can also add normal text and line splits if you want, i.e.:
text <- "The below table provides information on the BGR model, which has NULL results!:"
cat(text, append=TRUE, file=testdoc)
#And, I can add the other table:
#cat(table_2, append=TRUE, file=testdoc)
cat("\n</body></html>", append=TRUE, file=wordFile)
#----------------
# Scratch/Workspace/Archive
#----------------
pModelMax_F = lm(MaxL_ ~ TrtMnt_demend_y+ MeanT_ + MeanP_ + Pop_ + MaxT_ + MaxP_ + MinT_ + MinP_ + Year +
TrtMnt_demend_y*arc*HubDist + factor(reu_id),
data=psm_Long, weights=terrai_are)
summary(pModelMax_F)
clusterF <- cluster.vcov(pModelMax_F,cbind(psm_Long$reu_id,psm_Long$Year),force_posdef=TRUE)
CMREG_F <- coeftest(pModelMax_F, clusterF)
print(CMREG_F)
dta_Shp@data$MaxL_diff <- dta_Shp@data$MaxL_2001-dta_Shp@data$MaxL_2000
summary(dta_Shp@data$MaxL_diff)
sd(dta_Shp@data$MaxL_diff)
summary(dta_Shp@data$MaxL_2000)
sd(dta_Shp@data$MaxL_2000)
## !incorrect code for building panel dataset (but used for first two JEEM submissions)!
# varList = c("MaxL_")
# psm_Long <- BuildTimeSeries(dta=dta_Shp,idField="ad_id",varList_pre=varList,2000,2014,colYears=c("demend_y","apprend_y","regend_y"),
# interpYears=c("Slope","Road_dist","Riv_Dist","UF","Elevation","urbtravtim","terrai_are","Pop_","MeanT_","MeanP_","MaxT_","MaxP_","MinP_","MinT_","ntl_","HubDist","HubName","reu_id"))
# psm_Long$Year <- as.numeric(psm_Long$Year)
#
# write.csv(psm_Long,file="/Users/rbtrichler/Documents/AidData/KFW Brazil Eval/KFW_Points/ProcessedData/psm_Long.csv")
# psm_Long= read.csv("/Users/rbtrichler/Documents/AidData/KFW Brazil Eval/KFW_Points/ProcessedData/psm_Long.csv")
|
test_that("sandwich_text() works", {
expect_equal(sandwich_text("a", "h"), "hah")
expect_equal(sandwich_text("a", c("b", "h")), "bah")
expect_equal(
sandwich_text("testing a sentence", c("I'm ", ".")),
"I'm testing a sentence."
)
expect_equal(
sandwich_text(c("ally", "ail"), "s"),
c("sallys", "sails")
)
expect_equal(
sandwich_text(c("es", "a", "oo"), "t", collapse = ", "),
"test, tat, toot"
)
expect_error(sandwich_text("a", 1))
})
# length_sort() tests -----------------------------------------------------
test_that("length_sort() works", {
x <- c("ccc", "aaaa", "eee", "b", "DDD")
expect_equal(length_sort(x), c("b", "ccc", "eee", "DDD", "aaaa"))
expect_equal(
length_sort(x, decreasing = TRUE),
c("aaaa", "ccc", "eee", "DDD", "b")
)
})
test_that("length_sort() args inherited from order work", {
x <- c(1:9, NA, 100, 10)
expect_equal(length_sort(x), c(1:9, 10, 100, NA))
expect_equal(length_sort(x, decreasing = TRUE), c(100, 10, 1:9, NA))
expect_equal(length_sort(x, na.last = FALSE), c(NA, 1:9, 10, 100))
expect_equal(length_sort(x, na.last = NA), c(1:9, 10, 100))
})
test_that("length_sort() by_name argument works", {
x <- c(bb = 333, ccc = 1, a = 22)
expect_equal(length_sort(x), c(ccc = 1, a = 22, bb = 333))
expect_equal(
length_sort(x, by_name = TRUE),
c(a = 22, bb = 333, ccc = 1)
)
expect_equal(
length_sort(x, by_name = TRUE, decreasing = TRUE),
c(ccc = 1, bb = 333, a = 22)
)
})
# length_order() tests ----------------------------------------------------
test_that("length_order() works", {
x <- tibble::tibble(
x = 1:3,
y = c("b", "aa", "c"),
z = c("bb", "a", "c")
)
expect_equal(length_order(x, y), x[c(1, 3, 2), ])
expect_equal(length_order(x, c(y, z)), x[c(3, 1, 2), ])
expect_equal(length_order(x, c(y, z), decreasing = TRUE), x[c(2, 1, 3), ])
})
# all_duplicated() tests --------------------------------------------------
na_dup <- c(NA, 1, 1:7, NA)
df_dup <- data.frame(
x = c(NA, 1, 1:7, NA),
y = c(NA, 1, 1:7, NA)
)
test_that("all_duplicated() works", {
expect_equal(all_duplicated(1:10), rep(FALSE, 10))
expect_equal(
all_duplicated(c(1, 1:8, 1)),
c(TRUE, TRUE, rep(FALSE, 7), TRUE)
)
expect_equal(
all_duplicated(na_dup),
c(rep(TRUE, 3), rep(FALSE, 6), TRUE)
)
expect_equal(
all_duplicated(df_dup),
c(rep(TRUE, 3), rep(FALSE, 6), TRUE)
)
})
test_that("all_duplicated(incomparables = NA) works", {
expect_equal(
all_duplicated(1:10, incomparables = NA),
rep(FALSE, 10)
)
expect_equal(
all_duplicated(na_dup, incomparables = NA),
c(FALSE, rep(TRUE, 2), rep(FALSE, 6), FALSE)
)
# not implemented for data.frames
# expect_equal(
# all_duplicated(df_dup, incomparables = NA),
# c(FALSE, rep(TRUE, 2), rep(FALSE, 6), FALSE)
# )
})
| /tests/testthat/test-utils.R | permissive | DiseaseOntology/DO.utils | R | false | false | 3,065 | r | test_that("sandwich_text() works", {
expect_equal(sandwich_text("a", "h"), "hah")
expect_equal(sandwich_text("a", c("b", "h")), "bah")
expect_equal(
sandwich_text("testing a sentence", c("I'm ", ".")),
"I'm testing a sentence."
)
expect_equal(
sandwich_text(c("ally", "ail"), "s"),
c("sallys", "sails")
)
expect_equal(
sandwich_text(c("es", "a", "oo"), "t", collapse = ", "),
"test, tat, toot"
)
expect_error(sandwich_text("a", 1))
})
# length_sort() tests -----------------------------------------------------
test_that("length_sort() works", {
x <- c("ccc", "aaaa", "eee", "b", "DDD")
expect_equal(length_sort(x), c("b", "ccc", "eee", "DDD", "aaaa"))
expect_equal(
length_sort(x, decreasing = TRUE),
c("aaaa", "ccc", "eee", "DDD", "b")
)
})
test_that("length_sort() args inherited from order work", {
x <- c(1:9, NA, 100, 10)
expect_equal(length_sort(x), c(1:9, 10, 100, NA))
expect_equal(length_sort(x, decreasing = TRUE), c(100, 10, 1:9, NA))
expect_equal(length_sort(x, na.last = FALSE), c(NA, 1:9, 10, 100))
expect_equal(length_sort(x, na.last = NA), c(1:9, 10, 100))
})
test_that("length_sort() by_name argument works", {
x <- c(bb = 333, ccc = 1, a = 22)
expect_equal(length_sort(x), c(ccc = 1, a = 22, bb = 333))
expect_equal(
length_sort(x, by_name = TRUE),
c(a = 22, bb = 333, ccc = 1)
)
expect_equal(
length_sort(x, by_name = TRUE, decreasing = TRUE),
c(ccc = 1, bb = 333, a = 22)
)
})
# length_order() tests ----------------------------------------------------
test_that("length_order() works", {
x <- tibble::tibble(
x = 1:3,
y = c("b", "aa", "c"),
z = c("bb", "a", "c")
)
expect_equal(length_order(x, y), x[c(1, 3, 2), ])
expect_equal(length_order(x, c(y, z)), x[c(3, 1, 2), ])
expect_equal(length_order(x, c(y, z), decreasing = TRUE), x[c(2, 1, 3), ])
})
# all_duplicated() tests --------------------------------------------------
na_dup <- c(NA, 1, 1:7, NA)
df_dup <- data.frame(
x = c(NA, 1, 1:7, NA),
y = c(NA, 1, 1:7, NA)
)
test_that("all_duplicated() works", {
expect_equal(all_duplicated(1:10), rep(FALSE, 10))
expect_equal(
all_duplicated(c(1, 1:8, 1)),
c(TRUE, TRUE, rep(FALSE, 7), TRUE)
)
expect_equal(
all_duplicated(na_dup),
c(rep(TRUE, 3), rep(FALSE, 6), TRUE)
)
expect_equal(
all_duplicated(df_dup),
c(rep(TRUE, 3), rep(FALSE, 6), TRUE)
)
})
test_that("all_duplicated(incomparables = NA) works", {
expect_equal(
all_duplicated(1:10, incomparables = NA),
rep(FALSE, 10)
)
expect_equal(
all_duplicated(na_dup, incomparables = NA),
c(FALSE, rep(TRUE, 2), rep(FALSE, 6), FALSE)
)
# not implemented for data.frames
# expect_equal(
# all_duplicated(df_dup, incomparables = NA),
# c(FALSE, rep(TRUE, 2), rep(FALSE, 6), FALSE)
# )
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/iModel-dmod.R
\name{dmod}
\alias{dmod}
\alias{print.dModel}
\alias{fitted.dModel}
\alias{residuals.dModel}
\alias{triangulate.dModel}
\title{Log--linear model}
\usage{
dmod(formula, data, marginal = NULL, interactions = NULL, fit = TRUE,
details = 0)
}
\arguments{
\item{formula}{Model specification in one of the following forms: 1) a
right-hand sided formula, 2) as a list of generators, 3) an undirected
graph (represented either as a graphNEL object or as an adjacency
matrix). Notice that there are certain model specification shortcuts,
see Section 'details' below.}
\item{data}{Either a table or a dataframe. In the latter case, the dataframe
will be coerced to a table. See 'details' below.}
\item{marginal}{Should only a subset of the variables be used in connection
with the model specification shortcuts}
\item{interactions}{A number given the highest order interactions in the
model, see Section 'details' below.}
\item{fit}{Should the model be fitted.}
\item{details}{Control the amount of output; for debugging purposes.}
\item{...}{Additional arguments; currently no used.}
}
\value{
An object of class \code{dModel}.
}
\description{
Specification of log--linear (graphical) model. The 'd' in the name
\code{dmod} refers to that it is a (graphical) model for 'd'iscrete
variables
}
\details{
The independence model can be specified as \code{~.^1} and the
saturated model as \code{~.^.}. Setting e.g. \code{interactions=3}
implies that there will be at most three factor interactions in the
model.
Data can be specified as a table of counts or as a dataframe. If data is a
dataframe then it will be converted to a table (using \code{xtabs()}). This
means that if the dataframe contains numeric values then the you can get a
very sparse and high dimensional table. When a dataframe contains numeric
values it may be worthwhile to discretize data using the \code{cut()}
function.
The \code{marginal} argument can be used for specifying the independence or
saturated models for only a subset of the variables. When \code{marginal} is
given the corresponding marginal table of data is formed and used in the
analysis (notice that this is different from the behaviour of
\code{loglin()} which uses the full table.
The \code{triangulate()} method for discrete models (dModel objects) will
for a model look at the dependence graph for the model.
}
\examples{
## Graphical log-linear model
data(reinis)
dm1 <- dmod(~ .^., reinis)
dm2 <- backward(dm1, k=2)
dm3 <- backward(dm1, k=2, fixin=list(c("family", "phys", "systol")))
## At most 3-factor interactions
dm1<-dmod(~ .^., data=reinis, interactions=3)
}
\seealso{
\code{\link{cmod}}, \code{\link{mmod}}
}
\author{
Søren Højsgaard, \email{sorenh@math.aau.dk}
}
\keyword{models}
| /man/dmod.Rd | no_license | boennecd/gRim | R | false | true | 2,843 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/iModel-dmod.R
\name{dmod}
\alias{dmod}
\alias{print.dModel}
\alias{fitted.dModel}
\alias{residuals.dModel}
\alias{triangulate.dModel}
\title{Log--linear model}
\usage{
dmod(formula, data, marginal = NULL, interactions = NULL, fit = TRUE,
details = 0)
}
\arguments{
\item{formula}{Model specification in one of the following forms: 1) a
right-hand sided formula, 2) as a list of generators, 3) an undirected
graph (represented either as a graphNEL object or as an adjacency
matrix). Notice that there are certain model specification shortcuts,
see Section 'details' below.}
\item{data}{Either a table or a dataframe. In the latter case, the dataframe
will be coerced to a table. See 'details' below.}
\item{marginal}{Should only a subset of the variables be used in connection
with the model specification shortcuts}
\item{interactions}{A number given the highest order interactions in the
model, see Section 'details' below.}
\item{fit}{Should the model be fitted.}
\item{details}{Control the amount of output; for debugging purposes.}
\item{...}{Additional arguments; currently no used.}
}
\value{
An object of class \code{dModel}.
}
\description{
Specification of log--linear (graphical) model. The 'd' in the name
\code{dmod} refers to that it is a (graphical) model for 'd'iscrete
variables
}
\details{
The independence model can be specified as \code{~.^1} and the
saturated model as \code{~.^.}. Setting e.g. \code{interactions=3}
implies that there will be at most three factor interactions in the
model.
Data can be specified as a table of counts or as a dataframe. If data is a
dataframe then it will be converted to a table (using \code{xtabs()}). This
means that if the dataframe contains numeric values then the you can get a
very sparse and high dimensional table. When a dataframe contains numeric
values it may be worthwhile to discretize data using the \code{cut()}
function.
The \code{marginal} argument can be used for specifying the independence or
saturated models for only a subset of the variables. When \code{marginal} is
given the corresponding marginal table of data is formed and used in the
analysis (notice that this is different from the behaviour of
\code{loglin()} which uses the full table.
The \code{triangulate()} method for discrete models (dModel objects) will
for a model look at the dependence graph for the model.
}
\examples{
## Graphical log-linear model
data(reinis)
dm1 <- dmod(~ .^., reinis)
dm2 <- backward(dm1, k=2)
dm3 <- backward(dm1, k=2, fixin=list(c("family", "phys", "systol")))
## At most 3-factor interactions
dm1<-dmod(~ .^., data=reinis, interactions=3)
}
\seealso{
\code{\link{cmod}}, \code{\link{mmod}}
}
\author{
Søren Højsgaard, \email{sorenh@math.aau.dk}
}
\keyword{models}
|
\name{Sigmak}
\alias{Sigmak}
\docType{data}
\title{
%% ~~ data name/kind ... ~~
}
\description{
%% ~~ A concise (1-5 lines) description of the dataset. ~~
}
\usage{data("Sigmak")}
\format{
The format is:
List of 3
$ : num [1:4, 1:4] 0.6539 -0.0311 1.1941 0.4776 -0.0311 ...
..- attr(*, "dimnames")=List of 2
.. ..$ : chr [1:4] "Sepal.Length" "Sepal.Width" "Petal.Length" "Petal.Width"
.. ..$ : chr [1:4] "Sepal.Length" "Sepal.Width" "Petal.Length" "Petal.Width"
$ : num [1:4, 1:4] 0.6898 -0.0488 1.2416 0.5 -0.0488 ...
..- attr(*, "dimnames")=List of 2
.. ..$ : chr [1:4] "Sepal.Length" "Sepal.Width" "Petal.Length" "Petal.Width"
.. ..$ : chr [1:4] "Sepal.Length" "Sepal.Width" "Petal.Length" "Petal.Width"
$ : num [1:4, 1:4] 0.7 -0.0461 1.3149 0.5371 -0.0461 ...
..- attr(*, "dimnames")=List of 2
.. ..$ : chr [1:4] "Sepal.Length" "Sepal.Width" "Petal.Length" "Petal.Width"
.. ..$ : chr [1:4] "Sepal.Length" "Sepal.Width" "Petal.Length" "Petal.Width"
}
\details{
%% ~~ If necessary, more details than the __description__ above ~~
}
\source{
%% ~~ reference to a publication or URL from which the data were obtained ~~
}
\references{
%% ~~ possibly secondary sources and usages ~~
}
\examples{
data(Sigmak)
## maybe str(Sigmak) ; plot(Sigmak) ...
}
\keyword{datasets}
| /man/sigmak_1.Rd | no_license | SaraTouzani/EM | R | false | false | 1,297 | rd | \name{Sigmak}
\alias{Sigmak}
\docType{data}
\title{
%% ~~ data name/kind ... ~~
}
\description{
%% ~~ A concise (1-5 lines) description of the dataset. ~~
}
\usage{data("Sigmak")}
\format{
The format is:
List of 3
$ : num [1:4, 1:4] 0.6539 -0.0311 1.1941 0.4776 -0.0311 ...
..- attr(*, "dimnames")=List of 2
.. ..$ : chr [1:4] "Sepal.Length" "Sepal.Width" "Petal.Length" "Petal.Width"
.. ..$ : chr [1:4] "Sepal.Length" "Sepal.Width" "Petal.Length" "Petal.Width"
$ : num [1:4, 1:4] 0.6898 -0.0488 1.2416 0.5 -0.0488 ...
..- attr(*, "dimnames")=List of 2
.. ..$ : chr [1:4] "Sepal.Length" "Sepal.Width" "Petal.Length" "Petal.Width"
.. ..$ : chr [1:4] "Sepal.Length" "Sepal.Width" "Petal.Length" "Petal.Width"
$ : num [1:4, 1:4] 0.7 -0.0461 1.3149 0.5371 -0.0461 ...
..- attr(*, "dimnames")=List of 2
.. ..$ : chr [1:4] "Sepal.Length" "Sepal.Width" "Petal.Length" "Petal.Width"
.. ..$ : chr [1:4] "Sepal.Length" "Sepal.Width" "Petal.Length" "Petal.Width"
}
\details{
%% ~~ If necessary, more details than the __description__ above ~~
}
\source{
%% ~~ reference to a publication or URL from which the data were obtained ~~
}
\references{
%% ~~ possibly secondary sources and usages ~~
}
\examples{
data(Sigmak)
## maybe str(Sigmak) ; plot(Sigmak) ...
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/company-finanicial-statements.R
\name{fmp_cash_flow}
\alias{fmp_cash_flow}
\title{Company Cash Flow}
\usage{
fmp_cash_flow(symbol, quarterly = FALSE, as_reported = FALSE)
}
\arguments{
\item{symbol}{\code{character}. A vector of stock symbols.}
\item{quarterly}{\code{logical}. If \code{TRUE} return quarterly. If \code{FALSE}
return annual. Default is \code{FALSE}}
\item{as_reported}{\code{logical}. If \code{TRUE} return data formatted as reported. Default
is \code{FALSE}}
}
\value{
a \link[tibble:tibble-package]{tibble} of relevant financial information
}
\description{
Company Cash Flow
}
\examples{
\donttest{
my_stocks <- c('AAPL', 'GE')
d <- fmp_cash_flow(my_stocks, quarterly = TRUE)
}
}
\seealso{
Other \verb{Company Summaries}:
\code{\link{fmp_balance_sheet}()},
\code{\link{fmp_company_outlook}()},
\code{\link{fmp_dcf}()},
\code{\link{fmp_dividends}()},
\code{\link{fmp_enterprise_value}()},
\code{\link{fmp_financial_growth}()},
\code{\link{fmp_full_financial}()},
\code{\link{fmp_income}()},
\code{\link{fmp_institutional_holders}()},
\code{\link{fmp_key_executives}()},
\code{\link{fmp_key_metrics}()},
\code{\link{fmp_market_cap}()},
\code{\link{fmp_profile}()},
\code{\link{fmp_rating}()},
\code{\link{fmp_ratios}()},
\code{\link{fmp_shares_float}()},
\code{\link{fmp_splits}()}
}
\concept{\verb{Company Summaries}}
| /man/fmp_cash_flow.Rd | permissive | jpiburn/fmpapi | R | false | true | 1,419 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/company-finanicial-statements.R
\name{fmp_cash_flow}
\alias{fmp_cash_flow}
\title{Company Cash Flow}
\usage{
fmp_cash_flow(symbol, quarterly = FALSE, as_reported = FALSE)
}
\arguments{
\item{symbol}{\code{character}. A vector of stock symbols.}
\item{quarterly}{\code{logical}. If \code{TRUE} return quarterly. If \code{FALSE}
return annual. Default is \code{FALSE}}
\item{as_reported}{\code{logical}. If \code{TRUE} return data formatted as reported. Default
is \code{FALSE}}
}
\value{
a \link[tibble:tibble-package]{tibble} of relevant financial information
}
\description{
Company Cash Flow
}
\examples{
\donttest{
my_stocks <- c('AAPL', 'GE')
d <- fmp_cash_flow(my_stocks, quarterly = TRUE)
}
}
\seealso{
Other \verb{Company Summaries}:
\code{\link{fmp_balance_sheet}()},
\code{\link{fmp_company_outlook}()},
\code{\link{fmp_dcf}()},
\code{\link{fmp_dividends}()},
\code{\link{fmp_enterprise_value}()},
\code{\link{fmp_financial_growth}()},
\code{\link{fmp_full_financial}()},
\code{\link{fmp_income}()},
\code{\link{fmp_institutional_holders}()},
\code{\link{fmp_key_executives}()},
\code{\link{fmp_key_metrics}()},
\code{\link{fmp_market_cap}()},
\code{\link{fmp_profile}()},
\code{\link{fmp_rating}()},
\code{\link{fmp_ratios}()},
\code{\link{fmp_shares_float}()},
\code{\link{fmp_splits}()}
}
\concept{\verb{Company Summaries}}
|
#' Get Balance of Account or Space
#'
#' @param space_id character; Optional space ID for which balance shall be retrieved. See also \code{list_spaces()}
#' @examples
#' \dontrun{
#' balance()
#' balance(space_id = "e44907b7-d131-4ec9-9647-a2649480003d")
#' }
#' @export
balance <- function(space_id = NULL) {
stopifnot(length(space_id) <= 1)
balance_url <- character(0)
if (is.null(space_id)) {
balance_url <- sprintf("https://%s.lemon.markets/rest/v1/state/",
trading_url())
} else {
balance_url <- sprintf("https://%s.lemon.markets/rest/v1/spaces/%s/state/",
trading_url(),
space_id)
}
resp <- request_lemon(balance_url)
bal <- NULL
if (is.null(space_id)) {
bal <- content(resp)$state$balance
} else {
bal <- content(resp)$balance
}
as.numeric(bal)
}
| /R/balance.R | permissive | quantargo/lemonmarkets | R | false | false | 877 | r | #' Get Balance of Account or Space
#'
#' @param space_id character; Optional space ID for which balance shall be retrieved. See also \code{list_spaces()}
#' @examples
#' \dontrun{
#' balance()
#' balance(space_id = "e44907b7-d131-4ec9-9647-a2649480003d")
#' }
#' @export
balance <- function(space_id = NULL) {
stopifnot(length(space_id) <= 1)
balance_url <- character(0)
if (is.null(space_id)) {
balance_url <- sprintf("https://%s.lemon.markets/rest/v1/state/",
trading_url())
} else {
balance_url <- sprintf("https://%s.lemon.markets/rest/v1/spaces/%s/state/",
trading_url(),
space_id)
}
resp <- request_lemon(balance_url)
bal <- NULL
if (is.null(space_id)) {
bal <- content(resp)$state$balance
} else {
bal <- content(resp)$balance
}
as.numeric(bal)
}
|
#' Sample number of transitions between observations for a CTMC
#'
#' Uses properties of homogeneous Continuous time Markov Chains (CTMCs) to
#' sample the number of transitions between known endpoints and times.
#'
#' @param d0 index of depth bin in which the CTMC segment starts
#' @param df index of depth bin in which the CTMC segment ends
#' @param s0 dive stage in which the CTMC begins
#' @param sf dive estage in which the CTMC ends
#' @param t0 time at which \code{d0} is observed
#' @param tf time at which \code{df} is observed
#' @param t.stages stage transition times for the dive
#' @param rate.unif uniformization rate, for standardizing transition
#' rates between states
#' @param P.raw list of continuous time probability transition matrices, and
#' components.
#' @param P.tx list of discrete time probability transition matrices
#' @param n.bins number of rows in the \code{depths} matrix
#' @param max.tx maximum number of transitions between observations that will
#' be allowed during imputation
#'
#' @importFrom Matrix sparseVector Diagonal
#' @importFrom stats runif dpois
#' @importFrom expm expAtv
#'
# @example examples/dsdive.impute.sample_n.R
#'
dsdive.impute.sample_n = function(d0, df, s0, sf, t0, tf, t.stages, rate.unif,
P.raw, P.tx, n.bins, max.tx) {
# initialize output
s.range = s0:sf
n.stages = length(s.range)
n = numeric(n.stages)
# initialize initial state distribution
u0 = sparseVector(x = 1, i = d0, length = n.bins)
# determine window of time spent in each stage
dt.stages = sapply(s.range, function(s) {
min(tf, t.stages[s], na.rm = TRUE) - max(t0, t.stages[s-1])
})
# sample number of transitions in each stage
for(i in 1:n.stages) {
# extract current stage
s = s.range[i]
# diffuse bridging probabilities through other stages
uf = sparseVector(x = 1, i = df, length = n.bins)
if(i < n.stages) {
for(j in (i+1):n.stages) {
s2 = s.range[j]
if(dt.stages[j] == P.raw[[s2]]$obstx.tstep) {
uf = P.raw[[s2]]$obstx.mat %*% uf
} else {
uf = expAtv(A = as.matrix(P.raw[[s2]]$A),
t = dt.stages[j],
v = as.numeric(uf))[[1]]
}
}
}
#
# compute denominator by assembling u0, uf, and within-stage transitions
#
if(dt.stages[i] == P.raw[[s]]$obstx.tstep) {
utx = P.raw[[s]]$obstx.mat %*% uf
} else {
utx = expAtv(A = as.matrix(P.raw[[s]]$A),
t = dt.stages[i],
v = as.numeric(uf))[[1]]
}
C = as.numeric(u0 %*% utx)
#
# sample n[i] via inverse-transform method
#
# inverse-transform sampling variate
uC = runif(1) * C
# brute-force CDF inversion
pC = 0
for(n.step in 0:max.tx) {
# diffuse u0 wrt. another transition
if(n.step > 0) {
u0 = u0 %*% P.tx[[s]]
}
# probability of reaching df after n.step transitions
p.df = as.numeric(u0 %*% uf)
# aggregate probability mass
pC = pC + p.df * dpois(x = n.step, lambda = rate.unif * dt.stages[i])
# stop if minimal mass is exceeded
if(pC >= uC) {
n[i] = n.step
break
}
if(n.step==max.tx) {
msg = paste('Inverse-transform sampling failed; upper bound (max.tx)',
'reached before inversion completed for stage', s,
'Excess mass:', (uC-pC)/C, sep = ' ')
warning(msg)
}
}
}
n
} | /R/dsdive.impute.sample_n.R | no_license | jmhewitt/dsdive | R | false | false | 3,611 | r | #' Sample number of transitions between observations for a CTMC
#'
#' Uses properties of homogeneous Continuous time Markov Chains (CTMCs) to
#' sample the number of transitions between known endpoints and times.
#'
#' @param d0 index of depth bin in which the CTMC segment starts
#' @param df index of depth bin in which the CTMC segment ends
#' @param s0 dive stage in which the CTMC begins
#' @param sf dive estage in which the CTMC ends
#' @param t0 time at which \code{d0} is observed
#' @param tf time at which \code{df} is observed
#' @param t.stages stage transition times for the dive
#' @param rate.unif uniformization rate, for standardizing transition
#' rates between states
#' @param P.raw list of continuous time probability transition matrices, and
#' components.
#' @param P.tx list of discrete time probability transition matrices
#' @param n.bins number of rows in the \code{depths} matrix
#' @param max.tx maximum number of transitions between observations that will
#' be allowed during imputation
#'
#' @importFrom Matrix sparseVector Diagonal
#' @importFrom stats runif dpois
#' @importFrom expm expAtv
#'
# @example examples/dsdive.impute.sample_n.R
#'
dsdive.impute.sample_n = function(d0, df, s0, sf, t0, tf, t.stages, rate.unif,
P.raw, P.tx, n.bins, max.tx) {
# initialize output
s.range = s0:sf
n.stages = length(s.range)
n = numeric(n.stages)
# initialize initial state distribution
u0 = sparseVector(x = 1, i = d0, length = n.bins)
# determine window of time spent in each stage
dt.stages = sapply(s.range, function(s) {
min(tf, t.stages[s], na.rm = TRUE) - max(t0, t.stages[s-1])
})
# sample number of transitions in each stage
for(i in 1:n.stages) {
# extract current stage
s = s.range[i]
# diffuse bridging probabilities through other stages
uf = sparseVector(x = 1, i = df, length = n.bins)
if(i < n.stages) {
for(j in (i+1):n.stages) {
s2 = s.range[j]
if(dt.stages[j] == P.raw[[s2]]$obstx.tstep) {
uf = P.raw[[s2]]$obstx.mat %*% uf
} else {
uf = expAtv(A = as.matrix(P.raw[[s2]]$A),
t = dt.stages[j],
v = as.numeric(uf))[[1]]
}
}
}
#
# compute denominator by assembling u0, uf, and within-stage transitions
#
if(dt.stages[i] == P.raw[[s]]$obstx.tstep) {
utx = P.raw[[s]]$obstx.mat %*% uf
} else {
utx = expAtv(A = as.matrix(P.raw[[s]]$A),
t = dt.stages[i],
v = as.numeric(uf))[[1]]
}
C = as.numeric(u0 %*% utx)
#
# sample n[i] via inverse-transform method
#
# inverse-transform sampling variate
uC = runif(1) * C
# brute-force CDF inversion
pC = 0
for(n.step in 0:max.tx) {
# diffuse u0 wrt. another transition
if(n.step > 0) {
u0 = u0 %*% P.tx[[s]]
}
# probability of reaching df after n.step transitions
p.df = as.numeric(u0 %*% uf)
# aggregate probability mass
pC = pC + p.df * dpois(x = n.step, lambda = rate.unif * dt.stages[i])
# stop if minimal mass is exceeded
if(pC >= uC) {
n[i] = n.step
break
}
if(n.step==max.tx) {
msg = paste('Inverse-transform sampling failed; upper bound (max.tx)',
'reached before inversion completed for stage', s,
'Excess mass:', (uC-pC)/C, sep = ' ')
warning(msg)
}
}
}
n
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rescaling.R
\name{rescaling}
\alias{rescaling}
\alias{setmax}
\alias{setmean}
\alias{setmin}
\alias{setrange}
\alias{setsd}
\title{Rescaling Scores}
\usage{
setrange(x, y, to = range(y))
setmin(x, y, to = min(y))
setmax(x, y, to = max(y))
setmean(x, y, to = mean(y))
setsd(x, y, to = sd(y))
}
\arguments{
\item{x}{vector of scores to be modified.}
\item{y}{optional vector of scores from which new parameters will be taken.}
\item{to}{numeric value(s) that parameters in \code{x} will be set to,
defaulting to the \code{min}, \code{max}, \code{range}, \code{mean}, or
\code{sd} of \code{y}.}
}
\value{
Returns a vector of \code{length(x)} scores, with new parameters.
}
\description{
\code{setrange} truncates a quantitative variable to have a set minimum
and maximum. \code{setmin} truncates only to the minimum, and \code{setmax}
truncates only to the maximum. \code{setmean} and \code{setsd} set the
mean and standard deviation.
}
| /man/rescaling.Rd | no_license | jaydennord/epmr | R | false | true | 1,019 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rescaling.R
\name{rescaling}
\alias{rescaling}
\alias{setmax}
\alias{setmean}
\alias{setmin}
\alias{setrange}
\alias{setsd}
\title{Rescaling Scores}
\usage{
setrange(x, y, to = range(y))
setmin(x, y, to = min(y))
setmax(x, y, to = max(y))
setmean(x, y, to = mean(y))
setsd(x, y, to = sd(y))
}
\arguments{
\item{x}{vector of scores to be modified.}
\item{y}{optional vector of scores from which new parameters will be taken.}
\item{to}{numeric value(s) that parameters in \code{x} will be set to,
defaulting to the \code{min}, \code{max}, \code{range}, \code{mean}, or
\code{sd} of \code{y}.}
}
\value{
Returns a vector of \code{length(x)} scores, with new parameters.
}
\description{
\code{setrange} truncates a quantitative variable to have a set minimum
and maximum. \code{setmin} truncates only to the minimum, and \code{setmax}
truncates only to the maximum. \code{setmean} and \code{setsd} set the
mean and standard deviation.
}
|
# This code plots for each of the set of 1000 trees:
# * the percentage of node splits with hight posterior probability (> 0.95)
# * the number of nodes consistenly recovered across the set of 1000 trees, which
# is the number of node splits found in more than 950 trees.
library(ggplot2)
multimedusa <- read.csv("ancillary/supp_mat_14-consistently_recovered_splits_multimedusa_variable_topology.csv", header=FALSE)
medusa <- read.csv("ancillary/supp_mat_15-percentage_nodes_high_post_prob.csv", header=FALSE)
#png(filename="figures/plot_medusa_multimedusa_tests.png")
par(bty="l", cex.main=2, cex.lab=2)
par(mar=c(5,7,4,6)+0.1)
plot(multimedusa$V1, multimedusa$V2, col="red", type="l", xlab="set of 1000 trees",
ylab="number of consistently\n recovered nodes")
par(new=TRUE)
plot(medusa$V1, medusa$V2, col="blue", type="l", xaxt="n", yaxt="n", xlab="", ylab="")
axis(4)
mtext("percentage of nodes with\n high posterior probability", side=4, line=3, cex=2)
legend("topleft", legend=c("MEDUSA","multiMEDUSA"), lty=1, pch=1, col=c("blue","red"), inset=0.2)
#dev.off()
## plot percentage of good nodes versus consistently recovered nodes
par(new=TRUE)
svg(filename="ancillary/fig05.svg")
data <- as.data.frame(cbind(medusa$V2, multimedusa$V2))
p <- qplot(data$V1, data$V2, geom="point")
p + geom_smooth(method="lm", se=FALSE, aes(data$V1)) +
scale_y_continuous(breaks=c(5, 7.5, 10, 12.5, 15),
labels=c("5", "7.5", "10", "12.5", "15")) +
labs(title="Effect of percentage of \n nodes with high posterior probability on multiMEDUSA results") +
xlab("percentage of nodes with post. prob. > 0.95") +
ylab("number of nodes consistenly recovered from the \n multiMEDUSA analyses") +
theme_bw()
dev.off()
## plot width of confidence intervals for estimated ages versus consistently recovered nodes
multimedusa <- read.csv("output/multimedusa_output.csv", header=FALSE)
par(new=TRUE)
#png(filename="figures/plot_multimedusa_tests_on_confidence_intervals.png")
p <- qplot(multimedusa$V1, multimedusa$V2, geom="line", size=I(1),
main="Effect of confidence interval width of estimated ages
on multiMEDUSA results",
xlab="sets of trees with decreasing width of confidence intervals",
ylab="number of nodes consistenly recovered from the
multiMEDUSA analyses")
p + scale_y_continuous(breaks=c(5, 7.5, 10, 12.5, 15),
labels=c("5", "7.5", "10", "12.5", "15")) + theme_bw()
#dev.off()
| /code/plot_medusa_tests.R | no_license | carlosp420/nymphalidae_diversification | R | false | false | 2,513 | r | # This code plots for each of the set of 1000 trees:
# * the percentage of node splits with hight posterior probability (> 0.95)
# * the number of nodes consistenly recovered across the set of 1000 trees, which
# is the number of node splits found in more than 950 trees.
library(ggplot2)
multimedusa <- read.csv("ancillary/supp_mat_14-consistently_recovered_splits_multimedusa_variable_topology.csv", header=FALSE)
medusa <- read.csv("ancillary/supp_mat_15-percentage_nodes_high_post_prob.csv", header=FALSE)
#png(filename="figures/plot_medusa_multimedusa_tests.png")
par(bty="l", cex.main=2, cex.lab=2)
par(mar=c(5,7,4,6)+0.1)
plot(multimedusa$V1, multimedusa$V2, col="red", type="l", xlab="set of 1000 trees",
ylab="number of consistently\n recovered nodes")
par(new=TRUE)
plot(medusa$V1, medusa$V2, col="blue", type="l", xaxt="n", yaxt="n", xlab="", ylab="")
axis(4)
mtext("percentage of nodes with\n high posterior probability", side=4, line=3, cex=2)
legend("topleft", legend=c("MEDUSA","multiMEDUSA"), lty=1, pch=1, col=c("blue","red"), inset=0.2)
#dev.off()
## plot percentage of good nodes versus consistently recovered nodes
par(new=TRUE)
svg(filename="ancillary/fig05.svg")
data <- as.data.frame(cbind(medusa$V2, multimedusa$V2))
p <- qplot(data$V1, data$V2, geom="point")
p + geom_smooth(method="lm", se=FALSE, aes(data$V1)) +
scale_y_continuous(breaks=c(5, 7.5, 10, 12.5, 15),
labels=c("5", "7.5", "10", "12.5", "15")) +
labs(title="Effect of percentage of \n nodes with high posterior probability on multiMEDUSA results") +
xlab("percentage of nodes with post. prob. > 0.95") +
ylab("number of nodes consistenly recovered from the \n multiMEDUSA analyses") +
theme_bw()
dev.off()
## plot width of confidence intervals for estimated ages versus consistently recovered nodes
multimedusa <- read.csv("output/multimedusa_output.csv", header=FALSE)
par(new=TRUE)
#png(filename="figures/plot_multimedusa_tests_on_confidence_intervals.png")
p <- qplot(multimedusa$V1, multimedusa$V2, geom="line", size=I(1),
main="Effect of confidence interval width of estimated ages
on multiMEDUSA results",
xlab="sets of trees with decreasing width of confidence intervals",
ylab="number of nodes consistenly recovered from the
multiMEDUSA analyses")
p + scale_y_continuous(breaks=c(5, 7.5, 10, 12.5, 15),
labels=c("5", "7.5", "10", "12.5", "15")) + theme_bw()
#dev.off()
|
# Wx != Wy
# W free (no regularization)
# OK, except some non-optimal convergence with full covariances.
# which corresponds to pcca.Check asap.
library(dmt)
#source("~/local/Rpackages/dmt/SVN/dmt/pkg/R/fit.dependency.model.R")
#source("~/local/Rpackages/dmt/SVN/dmt/pkg/R/internals.R")
#source("~/local/Rpackages/dmt/SVN/dmt/pkg/R/initialize2.R")
#source("~/local/Rpackages/dmt/SVN/dmt/pkg/R/costs.R")
#source("~/local/Rpackages/dmt/SVN/dmt/pkg/R/get.W.R")
#source("~/local/Rpackages/dmt/SVN/dmt/pkg/R/phi.updates.R")
#source("~/local/Rpackages/dmt/SVN/dmt/pkg/R/dependency.score.R")
#source("~/local/Rpackages/dmt/SVN/dmt/pkg/R/optimize.parameters.R")
#source("~/local/Rpackages/dmt/SVN/dmt/pkg/R/pfa.R")
#source("~/local/Rpackages/dmt/SVN/dmt/pkg/R/ppca.R")
#source("~/local/Rpackages/dmt/SVN/dmt/pkg/R/pcca.R")
#source("~/local/Rpackages/dmt/SVN/dmt/pkg/R/pcca.with.isotropic.margins.R")
#source("~/local/Rpackages/dmt/SVN/dmt/pkg/R/M.set.R")
#source("~/local/Rpackages/dmt/SVN/dmt/pkg/R/set.beta.R")
#source("~/local/Rpackages/dmt/SVN/dmt/pkg/R/W.cca.EM.R")
priors <- list(Nm.wx.wy.sigma = Inf)
# TODO confirm also with list() and list(Nm.wx.wy.sigma = NULL)
N <- 100
zDim <- 1
xDim <- 8
yDim <- 6
###############################################################
# For each marginal covariance structure,
# test model performance
cors.list <- list()
zdims <- seq(1, min(xDim, yDim), 2) # test with various latent variable dimensionalities
# TODO: other marginal covariance structures for Wx = Wy; W>=0
#for (marginalCovariances in c("full")) {
for (marginalCovariances in c("isotropic", "diagonal", "identical isotropic", "full")) {
cors <- c()
for (zDim in zdims) {
print(paste(marginalCovariances, " / zDim: ", zDim))
toy <- generate.toydata(N = N, zDim = zDim, xDim = xDim, yDim = yDim,
marginal.covariances = marginalCovariances,
priors = priors)
res <- fit.dependency.model(toy$X, toy$Y, zDimension = zDim,
marginalCovariances = marginalCovariances,
priors = priors, matched = FALSE, verbose = TRUE)
vec <- compare.estimate.and.truth(res, toy)
cors <- rbind(cors, vec)
}
colnames(cors) <- c("wx", "wy", "phix", "phiy")
rownames(cors) <- as.character(zdims)
cors.list[[marginalCovariances]] <- cors
}
print(cors.list)
| /devel/dmt/www/tests/complete/tests/depmod.R | permissive | antagomir/DMT | R | false | false | 2,394 | r | # Wx != Wy
# W free (no regularization)
# OK, except some non-optimal convergence with full covariances.
# which corresponds to pcca.Check asap.
library(dmt)
#source("~/local/Rpackages/dmt/SVN/dmt/pkg/R/fit.dependency.model.R")
#source("~/local/Rpackages/dmt/SVN/dmt/pkg/R/internals.R")
#source("~/local/Rpackages/dmt/SVN/dmt/pkg/R/initialize2.R")
#source("~/local/Rpackages/dmt/SVN/dmt/pkg/R/costs.R")
#source("~/local/Rpackages/dmt/SVN/dmt/pkg/R/get.W.R")
#source("~/local/Rpackages/dmt/SVN/dmt/pkg/R/phi.updates.R")
#source("~/local/Rpackages/dmt/SVN/dmt/pkg/R/dependency.score.R")
#source("~/local/Rpackages/dmt/SVN/dmt/pkg/R/optimize.parameters.R")
#source("~/local/Rpackages/dmt/SVN/dmt/pkg/R/pfa.R")
#source("~/local/Rpackages/dmt/SVN/dmt/pkg/R/ppca.R")
#source("~/local/Rpackages/dmt/SVN/dmt/pkg/R/pcca.R")
#source("~/local/Rpackages/dmt/SVN/dmt/pkg/R/pcca.with.isotropic.margins.R")
#source("~/local/Rpackages/dmt/SVN/dmt/pkg/R/M.set.R")
#source("~/local/Rpackages/dmt/SVN/dmt/pkg/R/set.beta.R")
#source("~/local/Rpackages/dmt/SVN/dmt/pkg/R/W.cca.EM.R")
priors <- list(Nm.wx.wy.sigma = Inf)
# TODO confirm also with list() and list(Nm.wx.wy.sigma = NULL)
N <- 100
zDim <- 1
xDim <- 8
yDim <- 6
###############################################################
# For each marginal covariance structure,
# test model performance
cors.list <- list()
zdims <- seq(1, min(xDim, yDim), 2) # test with various latent variable dimensionalities
# TODO: other marginal covariance structures for Wx = Wy; W>=0
#for (marginalCovariances in c("full")) {
for (marginalCovariances in c("isotropic", "diagonal", "identical isotropic", "full")) {
cors <- c()
for (zDim in zdims) {
print(paste(marginalCovariances, " / zDim: ", zDim))
toy <- generate.toydata(N = N, zDim = zDim, xDim = xDim, yDim = yDim,
marginal.covariances = marginalCovariances,
priors = priors)
res <- fit.dependency.model(toy$X, toy$Y, zDimension = zDim,
marginalCovariances = marginalCovariances,
priors = priors, matched = FALSE, verbose = TRUE)
vec <- compare.estimate.and.truth(res, toy)
cors <- rbind(cors, vec)
}
colnames(cors) <- c("wx", "wy", "phix", "phiy")
rownames(cors) <- as.character(zdims)
cors.list[[marginalCovariances]] <- cors
}
print(cors.list)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pprof.R
\name{random_port}
\alias{random_port}
\title{Choose a random free TCP port.}
\usage{
random_port(lower = 49152L, upper = 65535L)
}
\arguments{
\item{lower}{Integer of length 1, lower bound of the port number.}
\item{upper}{Integer of length 1, upper bound of the port number.}
}
\value{
Port number, positive integer of length 1.
}
\description{
Choose a random free TCP port.
}
\details{
This function is a simple wrapper around
\code{parallelly::freePort()} with the default port range
covering ephemeral ports only.
}
\examples{
random_port()
}
| /man/random_port.Rd | no_license | cran/proffer | R | false | true | 636 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pprof.R
\name{random_port}
\alias{random_port}
\title{Choose a random free TCP port.}
\usage{
random_port(lower = 49152L, upper = 65535L)
}
\arguments{
\item{lower}{Integer of length 1, lower bound of the port number.}
\item{upper}{Integer of length 1, upper bound of the port number.}
}
\value{
Port number, positive integer of length 1.
}
\description{
Choose a random free TCP port.
}
\details{
This function is a simple wrapper around
\code{parallelly::freePort()} with the default port range
covering ephemeral ports only.
}
\examples{
random_port()
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mosquito_biology.R
\name{calculate_carrying_capacity}
\alias{calculate_carrying_capacity}
\title{Calculate the vector carrying capacity}
\usage{
calculate_carrying_capacity(parameters)
}
\arguments{
\item{parameters}{model parameters}
}
\description{
taken from
"Modelling the impact of vector control interventions on Anopheles gambiae
population dynamics"
}
| /man/calculate_carrying_capacity.Rd | permissive | EllieSherrardSmith/malariasimulation | R | false | true | 438 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mosquito_biology.R
\name{calculate_carrying_capacity}
\alias{calculate_carrying_capacity}
\title{Calculate the vector carrying capacity}
\usage{
calculate_carrying_capacity(parameters)
}
\arguments{
\item{parameters}{model parameters}
}
\description{
taken from
"Modelling the impact of vector control interventions on Anopheles gambiae
population dynamics"
}
|
/Exploratory_Data_Analysis/Project_2/plot4.R | no_license | freestander/coursera-data-science-specialization | R | false | false | 1,092 | r | ||
listings <- read.csv("D:/R Excel Sessions/Assignments/Association Rules/listings.csv")
head(listings)
listings$Price_band <- cut(listings$price,breaks = c(0,1000,3000,5000,7000,10000),labels = c("Less_than_1k","1k-3k","3k-5k","5k-7k",">7k"))
levels(listings$Price_band)
View(listings)
library(arules)
colnames(listings)
listings_new <- listings[,-c(1:5,7:8,10,14:15)]
str(listings)
colnames(listings_new)
#Preparing a band for minimum_nights
listings_new$minimum_nights[listings_new$minimum_nights > 11] <- "More_10_nights"
View(listings_new)
#Factorization
listings_new$minimum_nights <- as.factor(listings_new$minimum_nights)
listings_new$number_of_reviews <- as.factor(listings_new$number_of_reviews)
listings_new$availability_365 <- as.factor((listings_new$availability_365))
str(listings_new)
levels(listings_new$Price_band)
attach(listing)
arules::inspect(rules)
rules.sorted <- sort(rules,by="lift")
arules::inspect(rules.sorted)
rules<-apriori(listings_new,parameter=list(support=.2,confidence=.1))
arules::inspect(rules)
rules<-apriori(listings_new,parameter=list(support=.2,confidence=.1),appearance = list(rhs=c("Price_band=Less_than_1k","Price_band=1k-3k","Price_band=3k-5k","Price_band=5k-7k","Price_band=>7k"),default="lhs"),control=list(verbose=F))
arules::inspect(rules)
| /Association_Listings.R | no_license | Smitag2526/Data-Science | R | false | false | 1,364 | r |
listings <- read.csv("D:/R Excel Sessions/Assignments/Association Rules/listings.csv")
head(listings)
listings$Price_band <- cut(listings$price,breaks = c(0,1000,3000,5000,7000,10000),labels = c("Less_than_1k","1k-3k","3k-5k","5k-7k",">7k"))
levels(listings$Price_band)
View(listings)
library(arules)
colnames(listings)
listings_new <- listings[,-c(1:5,7:8,10,14:15)]
str(listings)
colnames(listings_new)
#Preparing a band for minimum_nights
listings_new$minimum_nights[listings_new$minimum_nights > 11] <- "More_10_nights"
View(listings_new)
#Factorization
listings_new$minimum_nights <- as.factor(listings_new$minimum_nights)
listings_new$number_of_reviews <- as.factor(listings_new$number_of_reviews)
listings_new$availability_365 <- as.factor((listings_new$availability_365))
str(listings_new)
levels(listings_new$Price_band)
attach(listing)
arules::inspect(rules)
rules.sorted <- sort(rules,by="lift")
arules::inspect(rules.sorted)
rules<-apriori(listings_new,parameter=list(support=.2,confidence=.1))
arules::inspect(rules)
rules<-apriori(listings_new,parameter=list(support=.2,confidence=.1),appearance = list(rhs=c("Price_band=Less_than_1k","Price_band=1k-3k","Price_band=3k-5k","Price_band=5k-7k","Price_band=>7k"),default="lhs"),control=list(verbose=F))
arules::inspect(rules)
|
#######################################################################
#######################################################################
#######################################################################
# Design of experiment
lvls <- list(NA)
fixlev<-c(0,0.5) # levels for not interesting parameters
seqlev<-c(1, 2, 3) # levels for interesting parameters
lvls[[1]] <- c(0) # violation
lvls[[2]] <- seqlev
lvls[[3]] <- c(0)
lvls[[4]] <- 0.5
lvls[[5]] <- seqlev
lvls[[6]] <- 0
lvls[[7]] <- c(0) # violation
lvls[[8]] <- c(0)
lvls[[9]] <- c(0)
lvls[[10]] <- c(0)
lvls[[11]] <- seqlev
interceptc<-(0) # intercept for C1 and C2
intercepty<-(-1) # intercept for Y1 and Y2
intercepth<-(-1) #intercept for H
interceptz<-0.5
interceptx<-0.5
### factorial design
dsgn <- as.matrix(expand.grid(lvls[[1]], lvls[[2]], lvls[[3]], lvls[[4]], lvls[[5]], lvls[[6]], lvls[[7]], lvls[[8]], lvls[[9]], lvls[[10]], lvls[[11]]))
dim(dsgn)
dim(dsgn)[1]
# only null
#dsgn<-dsgn[dsgn[,5]==0|dsgn[,6]==0,]
diff_theta<-rep(0,dim(dsgn)[1])
########################################################################
### CALCULATE TRUE VALUES OF diff
matt <- matrix(0, dim(dsgn)[1], 2)
true_gammas<-list()
for (i in c(1:dim(dsgn)[1])) {
truetheta11 <- 0
truetheta00 <- 0
true_gammas[[i]]<-data.frame(expand.grid(c(0,1),c(0,1),c(0,1),c(0,1),c(0,1),c(0,1),c(1), c("H","C1","C2","Y1","Y2"), c(NA)))
colnames(true_gammas[[i]])<-c("H","C1","C2","X1","X2","Y1","Y2", "cond", "gamma")
myH<-0
myZ<-0
for(theta in c(0,1)){
kk <- 1
individual <- matrix(0, 8, 8)
if(theta==0){ myX1<-myX2<-0}
if(theta==1){ myX1<-myX2<-1}
for (c2i in 0:1) {
for (y1i in 0:1) {
for (c1i in 0:1) {
individual[kk, 1:3] <- c(y1i, c1i, c2i)
# P(C1=1 or =0)
pc1<-1/(1+exp(-(interceptc )))
aa <- (as.numeric(c1i == 1) * (pc1) + as.numeric(c1i == 0) * (1-pc1))
true_gammas[[i]][true_gammas[[i]]$cond=="C1" & true_gammas[[i]]$C1== c1i,]$gamma<-aa
# P(Y1=1 or =0|C1,X1)
py1<-1/(1+exp(-(intercepty + dsgn[i, 5]*c1i + dsgn[i, 3]*myX1 + dsgn[i, 8]*myH)))
bb <- as.numeric(y1i == 1) * (py1) + as.numeric(y1i == 0) * (1 - py1)
true_gammas[[i]][true_gammas[[i]]$cond=="Y1" &
true_gammas[[i]]$Y1== y1i & true_gammas[[i]]$X1==myX1 & true_gammas[[i]]$C1==c1i,]$gamma<-bb
# P(C2=1 or =0|C1,X1)
pc2<-(1/(1+exp(-(interceptc + dsgn[i, 4]*c1i + dsgn[i, 6]*myX1))))
cc <- as.numeric(c2i == 1) * (pc2) + as.numeric(c2i == 0)*(1-pc2)
true_gammas[[i]][true_gammas[[i]]$cond=="C2" &
true_gammas[[i]]$C1== c1i & true_gammas[[i]]$C2== c2i & true_gammas[[i]]$X1== myX1,]$gamma<-cc
# P(Y2=1|C2,X2,Y1)
dd <- (as.numeric(y1i == 1) * 1 + as.numeric(y1i == 0) *
( (1/(1+exp(-(intercepty+dsgn[i, 9]*myZ + dsgn[i, 5]*c2i + dsgn[i, 3]*myX2 + dsgn[i, 8]*myH)))) ))
true_gammas[[i]][true_gammas[[i]]$cond=="Y2" &
true_gammas[[i]]$Y2== 1 & true_gammas[[i]]$C2== c2i & true_gammas[[i]]$X2== myX2 &
true_gammas[[i]]$Y1== y1i,]$gamma<-dd
individual[kk, 5:8] <- c(aa, bb, cc, dd)
# Product
individual[kk, 4] <- aa * bb * cc * dd
if(theta==1){ truetheta11 <- truetheta11 + individual[kk, 4] }
if(theta==1){ truetheta00 <- truetheta00 + individual[kk, 4] }
kk <- kk + 1
}
}
}
matt[i, ] <- c(truetheta00, truetheta11)}
# For H marginals
for (h1i in 0:1) {
# P(H=1 or =0)
ph1<-1/(1+exp(-(intercepth )))
hh <- (as.numeric(h1i == 1) * (ph1) + as.numeric(h1i == 0) * (1-ph1))
true_gammas[[i]][true_gammas[[i]]$cond=="H" & true_gammas[[i]]$H== h1i,]$gamma<-hh
}
}
diff_theta<-round(matt[,2]-matt[,1],5)
# confirming that every event has a plaussible chance of occuring:
for(i in 1:dim(dsgn)[1]) {
print(dim(true_gammas[[i]][true_gammas[[i]]$gamma>0.95 | true_gammas[[i]]$gamma<0.05,])[1]-32)
if((dim(true_gammas[[i]][true_gammas[[i]]$gamma>0.95 | true_gammas[[i]]$gamma<0.05,])[1]-32)>0){print(dsgn[i,])}
}
########################################################################
# Functions
########################################################################
C2probB <- function(h=c(0,1), c2=c(1), c1=c(0,1), x1=c(0,1), nm = 1) {
ss <- sum(simdata$C2[simdata$H %in% h & simdata$C1 %in% c1 & simdata$X1 %in% x1])
nn <- length(simdata$C2[simdata$H %in% h & simdata$C1 %in% c1 & simdata$X1 %in% x1])
pp <- (rbeta(nm, (ss + 1), (nn - ss + 1)))
if (c2 %in% 0) {return(1 - as.numeric(pp))}
if (c2 %in% 1) {return(as.numeric(pp))}}
Y2probB<-function(h=c(0,1), y2=c(1),c2=c(0,1),x2=c(0,1),y1=c(0,1), c1=c(0,1), x1=c(0,1), nm=1){
ss<-sum(simdata$Y2[simdata$H %in% h &
simdata$C2%in% c2 & simdata$X2%in% x2 &
simdata$Y1%in% y1 & simdata$C1%in% c1 & simdata$X1%in% x1])
nn<-length(simdata$Y2[simdata$H %in% h &
simdata$C2%in% c2 & simdata$X2%in% x2 &
simdata$Y1%in% y1 & simdata$C1%in% c1 & simdata$X1%in% x1])
pp<-(rbeta(nm,(ss+1),(nn-ss+1)))
if(y2%in%0){return(1-as.numeric(pp))}
if(y2%in%1){return(as.numeric(pp))}}
Y1probB<-function(h=c(0,1), y1=1,c1=c(0,1),x1=c(0,1),nm=1){
ss<-sum(simdata$Y1[simdata$H %in% h & simdata$X1%in%x1 & simdata$C1%in%c1])
nn<-length(simdata$Y1[simdata$H %in% h & simdata$X1%in%x1 & simdata$C1%in%c1])
pp<-(rbeta(nm,(ss+1),(nn-ss+1)))
if(y1%in%0){return(1-as.numeric(pp))}
if(y1%in%1){return(as.numeric(pp))}}
C1probB <- function(h=c(0,1), c1=1, nm = 1) {
ss <- sum(simdata$C1[simdata$H %in% h])
nn <- length(simdata$C1[simdata$H %in% h])
pp <- (rbeta(nm, (ss + 1), (nn - ss + 1)))
if (c1 %in% 0) {return(1 - as.numeric(pp))}
if (c1 %in% 1) {return(as.numeric(pp))}}
HprobB <- function(h=1, nm = 1) {
ss <- sum(simdata$H)
nn <- length(simdata$H)
pp <- (rbeta(nm, (ss + 1), (nn - ss + 1)))
if (h == 0) {return(1 - as.numeric(pp))}
if (h == 1) {return(as.numeric(pp))}}
########################################################################
# SIMULATIONS STUDY
########################################################################
########################################################################
ppp <- 1
kkk <- 1
est <- matrix(0, dim(dsgn)[1], 2)
result <- matrix(0, dim(dsgn)[1], 2)
numsim <- 100 ### number of datasets generated under each scenario
n <- 10000 ### size of each dataset
set.seed(13) ### for reproducibility
nB<-45 ### number of MC samples
Bayesresult <- matrix(0, dim(dsgn)[1] * numsim, 8)
########################################################################
simdatasum<-matrix(0,dim(dsgn)[1],8)
colnames(simdatasum)<-c( "H", "Z", "C1", "X1", "C2", "Y1", "X2", "Y2")
my_gammas_wY1_wH<-list()
my_gammas_wY1_H<-list()
my_gammas_Y1_wH<-list()
my_gammas_Y1_H<-list()
for (i in c(1:dim(dsgn)[1])) {
#for (i in 1) {
my_gammas_wY1_wH[[i]]<-matrix(0, dim(true_gammas[[1]])[1],numsim)
my_gammas_wY1_H[[i]]<-matrix(0, dim(true_gammas[[1]])[1],numsim)
my_gammas_Y1_wH[[i]]<-matrix(0, dim(true_gammas[[1]])[1],numsim)
my_gammas_Y1_H[[i]]<-matrix(0, dim(true_gammas[[1]])[1],numsim)
for (j in 1:numsim) { ### loop over datasets
print(c(i, j))
print(kkk/(dim(dsgn)[1] * numsim))
H <- rbinom(n, size = 1, prob = (1/(1+exp(-(intercepth)))) )
Z <- rbinom(n, 1, (1/(1+exp(-(interceptz + dsgn[i, 10]*H)))) )
C1 <- rbinom(n, size = 1, prob = (1/(1+exp(-(interceptc)))) )
X1<- rbinom(n, 1, (1/(1+exp(-(interceptx + dsgn[i, 1]*C1 + dsgn[i, 2]*H)))) )
C2 <-rbinom(n, 1, (1/(1+exp(-(interceptc + dsgn[i, 4]*C1 + dsgn[i, 6]*X1)))) )
Y1 <-rbinom(n, 1, (1/(1+exp(-(intercepty + dsgn[i, 5]*C1 + dsgn[i, 3]*X1 + dsgn[i, 8]*H)))) )
X2 <-rbinom(n, 1, (1/(1+exp(-(interceptx + dsgn[i, 1]*C2 + dsgn[i, 2]*H + dsgn[i, 7]*Y1 + dsgn[i, 9]*Z + dsgn[i, 11]*X1)))) )
Y2<-rbinom(n, 1, (1/(1+exp(-(intercepty + dsgn[i, 9]*Z + dsgn[i, 5]*C2 + dsgn[i, 3]*X2 + dsgn[i, 8]*H)))) )
Y2[Y1 == 1]<-1
simdata <- data.frame(H,Z,C1,X1,C2,Y1,X2,Y2)
simdatasum[i,]<-colMeans(simdata)
########################################################################
print("BAYESIAN APPROACH: WITHOUT Y1, WITHOUT H (uniform priors):")
sumtheta11Bvec<-rep(0,nB)
sumtheta00Bvec<-rep(0,nB)
gammas_wY1_wH<-data.frame(expand.grid(c(0,1),c(0,1),c(0,1),c(0,1),c(0,1),c(0,1),c(1), c("H","C1","C2","Y1","Y2"), c(NA)))
colnames(gammas_wY1_wH)<-c("H","C1","C2","X1","X2","Y1","Y2", "cond", "gamma")
## Start of MC
gammas<-matrix(0,dim(gammas_wY1_wH)[1], nB)
for(sw in 1:nB){
y2i<-1
for (x2i in 0:1) {
x1i <-x2i
for (c2i in 0:1) {
for (c1i in 0:1) {
gammas_wY1_wH[gammas_wY1_wH$cond=="C1" & gammas_wY1_wH$C1== c1i,]$gamma<-C1probB(c1=c1i)
gammas_wY1_wH[gammas_wY1_wH$cond=="C2" & gammas_wY1_wH$C1== c1i & gammas_wY1_wH$C2== c2i &
gammas_wY1_wH$X1== x1i,]$gamma<-C2probB(c1=c1i,c2=c2i,x1=x1i)
gammas_wY1_wH[gammas_wY1_wH$cond=="Y2" & gammas_wY1_wH$C1== c1i & gammas_wY1_wH$C2== c2i &
gammas_wY1_wH$X1== x1i & gammas_wY1_wH$X2== x2i & gammas_wY1_wH$Y2== y2i,]$gamma<-Y2probB(y2=y2i, c1=c1i, c2=c2i, x1=x1i, x2=x2i)
}
}
}
sumtheta11B <- 0
sumtheta00B <- 0
x1i<-1
x2i<-1
y2i<-1
for (x1i in 0:1) {
x2i<-x1i
for (c2i in 0:1) {
for (c1i in 0:1) {
if(x1i==1){ sumtheta11B <- sumtheta11B +
unique(gammas_wY1_wH[gammas_wY1_wH$cond=="C1" & gammas_wY1_wH$C1== c1i,]$gamma*
gammas_wY1_wH[gammas_wY1_wH$cond=="C2" & gammas_wY1_wH$C1== c1i & gammas_wY1_wH$C2== c2i & gammas_wY1_wH$X1== x1i,]$gamma*
gammas_wY1_wH[gammas_wY1_wH$cond=="Y2" & gammas_wY1_wH$C1== c1i & gammas_wY1_wH$C2== c2i & gammas_wY1_wH$X1== x1i & gammas_wY1_wH$X2== x2i & gammas_wY1_wH$Y2== y2i,]$gamma)}
if(x1i==0){ sumtheta00B <- sumtheta00B +
unique(gammas_wY1_wH[gammas_wY1_wH$cond=="C1" & gammas_wY1_wH$C1== c1i,]$gamma*
gammas_wY1_wH[gammas_wY1_wH$cond=="C2" & gammas_wY1_wH$C1== c1i & gammas_wY1_wH$C2== c2i & gammas_wY1_wH$X1== x1i,]$gamma*
gammas_wY1_wH[gammas_wY1_wH$cond=="Y2" & gammas_wY1_wH$C1== c1i & gammas_wY1_wH$C2== c2i & gammas_wY1_wH$X1== x1i & gammas_wY1_wH$X2== x2i & gammas_wY1_wH$Y2== y2i,]$gamma)}
}
}
}
sumtheta11Bvec[sw]<-sumtheta11B
sumtheta00Bvec[sw]<-sumtheta00B
gammas[,sw]<-gammas_wY1_wH$gamma } ## End of MC
my_gammas_wY1_wH[[i]][,j]<-rowMeans(gammas[,1:nB])
sumtheta00Bvec_wY1_wH <- sumtheta00Bvec
sumtheta11Bvec_wY1_wH <- sumtheta11Bvec
#################################################################
print("BAYESIAN APPROACH WITH Y1, WITHOUT H (uniform priors):")
sumtheta11Bvec<-rep(0,nB)
sumtheta00Bvec<-rep(0,nB)
gammas_Y1_wH<-data.frame(expand.grid(c(0,1),c(0,1),c(0,1),c(0,1),c(0,1),c(0,1),c(1), c("H","C1","C2","Y1","Y2"), c(NA)))
colnames(gammas_Y1_wH)<-c("H","C1","C2","X1","X2","Y1","Y2", "cond", "gamma")
gammas<-matrix(0,dim(gammas_Y1_wH)[1], nB)
## Start of MC
for(sw in 1:nB){
y2i<-1
for (x2i in 0:1) {
x1i<-x2i
for (y1i in 0:1) {
for (c2i in 0:1) {
for (c1i in 0:1) {
gammas_Y1_wH[gammas_Y1_wH$cond=="C1" & gammas_Y1_wH$C1== c1i,]$gamma<-C1probB(c1=c1i)
gammas_Y1_wH[gammas_Y1_wH$cond=="Y1" & gammas_Y1_wH$Y1== y1i & gammas_Y1_wH$C1== c1i & gammas_Y1_wH$X1== x1i,]$gamma<-Y1probB(y1=y1i, c1=c1i, x1=x1i)
gammas_Y1_wH[gammas_Y1_wH$cond=="C2" & gammas_Y1_wH$C2== c2i & gammas_Y1_wH$C1== c1i & gammas_Y1_wH$X1== x1i,]$gamma<-C2probB(c2=c2i, c1=c1i, x1=x1i)
gammas_Y1_wH[gammas_Y1_wH$cond=="Y2" & gammas_Y1_wH$Y2== y2i & gammas_Y1_wH$C2== c2i & gammas_Y1_wH$X2== x2i & gammas_Y1_wH$Y1== y1i,]$gamma<-Y2probB(y2=y2i, c2=c2i, x2=x2i, y1=y1i)
}}}}
sumtheta11B<-0
sumtheta00B<-0
y2i<-1
for(x1i in 0:1){
x2i<-x1i
for(c2i in 0:1){
for(y1i in 0:1){
for(c1i in 0:1){
if(x1i==1){
sumtheta11B<-sumtheta11B+
unique(gammas_Y1_wH[gammas_Y1_wH$cond=="C1" & gammas_Y1_wH$C1== c1i,]$gamma*
gammas_Y1_wH[gammas_Y1_wH$cond=="Y1" & gammas_Y1_wH$Y1== y1i & gammas_Y1_wH$C1== c1i & gammas_Y1_wH$X1== x1i,]$gamma*
gammas_Y1_wH[gammas_Y1_wH$cond=="C2" & gammas_Y1_wH$C2== c2i & gammas_Y1_wH$C1== c1i & gammas_Y1_wH$X1== x1i,]$gamma*
gammas_Y1_wH[gammas_Y1_wH$cond=="Y2" & gammas_Y1_wH$Y2== y2i & gammas_Y1_wH$C2== c2i & gammas_Y1_wH$X2== x2i & gammas_Y1_wH$Y1== y1i,]$gamma)
}
if(x1i==0){
sumtheta00B<-sumtheta00B+
unique(gammas_Y1_wH[gammas_Y1_wH$cond=="C1" & gammas_Y1_wH$C1== c1i,]$gamma*
gammas_Y1_wH[gammas_Y1_wH$cond=="Y1" & gammas_Y1_wH$Y1== y1i & gammas_Y1_wH$C1== c1i & gammas_Y1_wH$X1== x1i,]$gamma*
gammas_Y1_wH[gammas_Y1_wH$cond=="C2" & gammas_Y1_wH$C2== c2i & gammas_Y1_wH$C1== c1i & gammas_Y1_wH$X1== x1i,]$gamma*
gammas_Y1_wH[gammas_Y1_wH$cond=="Y2" & gammas_Y1_wH$Y2== y2i & gammas_Y1_wH$C2== c2i & gammas_Y1_wH$X2== x2i & gammas_Y1_wH$Y1== y1i,]$gamma)
}
}}}}
sumtheta11Bvec[sw]<-sumtheta11B
sumtheta00Bvec[sw]<-sumtheta00B
gammas[,sw]<-gammas_Y1_wH$gamma} ## End of MC
my_gammas_Y1_wH[[i]][,j]<-rowMeans(gammas[,1:nB])
sumtheta00Bvec_Y1_wH <- sumtheta00Bvec
sumtheta11Bvec_Y1_wH <- sumtheta11Bvec
Bpval_Y1_wH <- 1 - mean((sumtheta11Bvec_Y1_wH - sumtheta00Bvec_Y1_wH) > 0)
#########################################################################
print("BAYESIAN APPROACH WITHOUT Y1 WITH H (uniform priors):")
sumtheta11Bvec<-rep(0,nB)
sumtheta00Bvec<-rep(0,nB)
gammas_wY1_H<-data.frame(expand.grid(c(0,1),c(0,1),c(0,1),c(0,1),c(0,1),c(0,1),c(1), c("H","C1","C2","Y1","Y2"), c(NA)))
colnames(gammas_wY1_H)<-c("H","C1","C2","X1","X2","Y1","Y2", "cond", "gamma")
gammas<-matrix(0,dim(gammas_wY1_H)[1], nB)
## Start of MC
for(sw in 1:nB){
y2i<-1
for (x2i in 0:1) {
x1i<-x2i
for (h1i in 0:1) {
for (c2i in 0:1) {
for (c1i in 0:1) {
gammas_wY1_H[gammas_wY1_H$cond=="H" & gammas_wY1_H$H== h1i,]$gamma<-HprobB(h1i)
gammas_wY1_H[gammas_wY1_H$cond=="C1" & gammas_wY1_H$C1== c1i & gammas_wY1_H$H== h1i,]$gamma<-C1probB(h=h1i ,c1=c1i)
gammas_wY1_H[gammas_wY1_H$cond=="C2" & gammas_wY1_H$H== h1i & gammas_wY1_H$C2== c2i & gammas_wY1_H$C1== c1i & gammas_wY1_H$X1== x1i,]$gamma<-C2probB(h=h1i, c2=c2i, c1=c1i, x1=x1i)
gammas_wY1_H[gammas_wY1_H$cond=="Y2" & gammas_wY1_H$H== h1i & gammas_wY1_H$Y2== y2i & gammas_wY1_H$C1== c1i & gammas_wY1_H$C2== c2i & gammas_wY1_H$X2== x2i & gammas_wY1_H$X1== x1i,]$gamma<-Y2probB(h=h1i, y2=y2i, c1=c1i, c2=c2i, x1=x1i, x2=x2i)
}}}}
sumtheta11B<-0
sumtheta00B<-0
y2i<-1
for(x1i in 0:1){
x2i<-x1i
for(c2i in 0:1){
for(h1i in 0:1){
for(c1i in 0:1){
if(x1i==1){ sumtheta11B<-sumtheta11B+
unique(gammas_wY1_H[gammas_wY1_H$cond=="H" & gammas_wY1_H$H== h1i,]$gamma*
gammas_wY1_H[gammas_wY1_H$cond=="C1" & gammas_wY1_H$C1== c1i & gammas_wY1_H$H== h1i,]$gamma*
gammas_wY1_H[gammas_wY1_H$cond=="C2" & gammas_wY1_H$H== h1i & gammas_wY1_H$C2== c2i & gammas_wY1_H$C1== c1i & gammas_wY1_H$X1== x1i,]$gamma*
gammas_wY1_H[gammas_wY1_H$cond=="Y2" & gammas_wY1_H$H== h1i & gammas_wY1_H$Y2== y2i & gammas_wY1_H$C1== c1i & gammas_wY1_H$C2== c2i & gammas_wY1_H$X2== x2i & gammas_wY1_H$X1== x1i,]$gamma)
}
if(x1i==0){ sumtheta00B<-sumtheta00B+
unique(gammas_wY1_H[gammas_wY1_H$cond=="H" & gammas_wY1_H$H== h1i,]$gamma*
gammas_wY1_H[gammas_wY1_H$cond=="C1" & gammas_wY1_H$C1== c1i & gammas_wY1_H$H== h1i,]$gamma*
gammas_wY1_H[gammas_wY1_H$cond=="C2" & gammas_wY1_H$H== h1i & gammas_wY1_H$C2== c2i & gammas_wY1_H$C1== c1i & gammas_wY1_H$X1== x1i,]$gamma*
gammas_wY1_H[gammas_wY1_H$cond=="Y2" & gammas_wY1_H$H== h1i & gammas_wY1_H$Y2== y2i & gammas_wY1_H$C1== c1i & gammas_wY1_H$C2== c2i & gammas_wY1_H$X2== x2i & gammas_wY1_H$X1== x1i,]$gamma)
}
}}}}
sumtheta11Bvec[sw]<-sumtheta11B
sumtheta00Bvec[sw]<-sumtheta00B
gammas[,sw]<-gammas_wY1_H$gamma } ## End of MC
my_gammas_wY1_H[[i]][,j] <- rowMeans(gammas[,1:nB])
sumtheta00Bvec_wY1_H <- sumtheta00Bvec
sumtheta11Bvec_wY1_H <- sumtheta11Bvec
##############################################################
###############################################################
# ##
print("BAYESIAN APPROACH WITH Y1 WITH H (uniform priors):")
sumtheta11Bvec<-rep(0,nB)
sumtheta00Bvec<-rep(0,nB)
gammas_Y1_H<-data.frame(expand.grid(c(0,1),c(0,1),c(0,1),c(0,1),c(0,1),c(0,1),c(1), c("H","C1","C2","Y1","Y2"), c(NA)))
colnames(gammas_Y1_H)<-c("H","C1","C2","X1","X2","Y1","Y2", "cond", "gamma")
gammas<-matrix(0,dim(gammas_Y1_H)[1], nB)
## Start of MC
for(sw in 1:nB){
y2i<-1
for (x2i in 0:1) {
x1i<-x2i
for (y1i in 0:1) {
for (h1i in 0:1) {
for (c2i in 0:1) {
for (c1i in 0:1) {
gammas_Y1_H[gammas_Y1_H$cond=="H" & gammas_Y1_H$H== h1i,]$gamma<-HprobB(h1i)
gammas_Y1_H[gammas_Y1_H$cond=="C1" & gammas_Y1_H$C1== c1i & gammas_Y1_H$H== h1i,]$gamma<-C1probB(h=h1i ,c1=c1i)
gammas_Y1_H[gammas_Y1_H$cond=="Y1" & gammas_Y1_H$H== h1i & gammas_Y1_H$Y1== y1i & gammas_Y1_H$C1== c1i & gammas_Y1_H$X1== x1i,]$gamma<-Y1probB(h=h1i, y1=y1i, c1=c1i, x1=x1i)
gammas_Y1_H[gammas_Y1_H$cond=="C2" & gammas_Y1_H$H== h1i & gammas_Y1_H$C2== c2i & gammas_Y1_H$C1== c1i & gammas_Y1_H$X1== x1i,]$gamma<-C2probB(h=h1i, c2=c2i, c1=c1i, x1=x1i)
gammas_Y1_H[gammas_Y1_H$cond=="Y2" & gammas_Y1_H$H== h1i & gammas_Y1_H$Y2== y2i & gammas_Y1_H$C2== c2i & gammas_Y1_H$X2== x2i & gammas_Y1_H$Y1== y1i,]$gamma<-Y2probB(h=h1i, y2=y2i, c2=c2i, x2=x2i, y1=y1i)
}}}}}
sumtheta11B<-0
sumtheta00B<-0
y2i<-1
for(x1i in 0:1){
x2i<-x1i
for(c2i in 0:1){
for(y1i in 0:1){
for(c1i in 0:1){
for (h1i in 0:1) {
if(x1i==1){ sumtheta11B<-sumtheta11B+
unique(gammas_Y1_H[gammas_Y1_H$cond=="H" & gammas_Y1_H$H== h1i,]$gamma*
gammas_Y1_H[gammas_Y1_H$cond=="C1" & gammas_Y1_H$C1== c1i & gammas_Y1_H$H== h1i,]$gamma*
gammas_Y1_H[gammas_Y1_H$cond=="C2" & gammas_Y1_H$H== h1i & gammas_Y1_H$C2== c2i & gammas_Y1_H$C1== c1i & gammas_Y1_H$X1== x1i,]$gamma*
gammas_Y1_H[gammas_Y1_H$cond=="Y1" & gammas_Y1_H$H== h1i & gammas_Y1_H$Y1== y1i & gammas_Y1_H$C1== c1i & gammas_Y1_H$X1== x1i,]$gamma*
gammas_Y1_H[gammas_Y1_H$cond=="Y2" & gammas_Y1_H$H== h1i & gammas_Y1_H$Y2== y2i & gammas_Y1_H$C2== c2i & gammas_Y1_H$X2== x2i & gammas_Y1_H$Y1== y1i,]$gamma)
}
if(x1i==0){ sumtheta00B<-sumtheta00B+
unique(gammas_Y1_H[gammas_Y1_H$cond=="H" & gammas_Y1_H$H== h1i,]$gamma*
gammas_Y1_H[gammas_Y1_H$cond=="C1" & gammas_Y1_H$H== h1i & gammas_Y1_H$C1== c1i ,]$gamma*
gammas_Y1_H[gammas_Y1_H$cond=="C2"& gammas_Y1_H$H== h1i & gammas_Y1_H$C2== c2i & gammas_Y1_H$C1== c1i & gammas_Y1_H$X1== x1i,]$gamma*
gammas_Y1_H[gammas_Y1_H$cond=="Y1" & gammas_Y1_H$H== h1i & gammas_Y1_H$Y1== y1i & gammas_Y1_H$C1== c1i & gammas_Y1_H$X1== x1i,]$gamma*
gammas_Y1_H[gammas_Y1_H$cond=="Y2" & gammas_Y1_H$H== h1i & gammas_Y1_H$Y2== y2i & gammas_Y1_H$C2== c2i & gammas_Y1_H$X2== x2i & gammas_Y1_H$Y1== y1i,]$gamma)
}}}}}
}
sumtheta11Bvec[sw]<-sumtheta11B
sumtheta00Bvec[sw]<-sumtheta00B
gammas[,sw]<-gammas_Y1_H$gamma } ## End of MC
my_gammas_Y1_H[[i]][,j]<-rowMeans(gammas[,1:nB])
sumtheta00Bvec_Y1_H <- sumtheta00Bvec
sumtheta11Bvec_Y1_H <- sumtheta11Bvec
#########################################
#mod1<-(glm(Y2 ~ H*X1*X2*Y1*C1*C2, data = simdata, family = "binomial"))
#mod2<-(glm(Y2 ~ H*Y1*C1*C2, data = simdata, family = "binomial"))
#library(lmtest)
#pval<-(lrtest(mod2,mod1))["Pr(>Chisq)"]
#print(pval)
#one-sided p-values:
Bpval_wY1_wH <- 1 - mean((sumtheta11Bvec_wY1_wH - sumtheta00Bvec_wY1_wH) > 0)
Bpval_Y1_wH <- 1 - mean((sumtheta11Bvec_Y1_wH - sumtheta00Bvec_Y1_wH) > 0)
Bpval_wY1_H <- 1 - mean((sumtheta11Bvec_wY1_H - sumtheta00Bvec_wY1_H) > 0)
Bpval_Y1_H <- 1 - mean((sumtheta11Bvec_Y1_H - sumtheta00Bvec_Y1_H) > 0)
Hpval <- (1-pnorm((summary(glm(Y2 ~ H, data = simdata, family = "binomial"))$coefficients[2, 3])))
Bayesresult[kkk, ] <- c(i, j, diff_theta[i], Bpval_Y1_H, Bpval_wY1_H, Bpval_Y1_wH, Bpval_wY1_wH, Hpval)
print(Bayesresult[kkk, ])
kkk <- kkk + 1
}
true_gammas[[i]]$simgamma_wY1_wH<-rowMeans(my_gammas_wY1_wH[[i]][,1:j])
true_gammas[[i]]$simgamma_Y1_wH<-rowMeans(my_gammas_Y1_wH[[i]][,1:j])
true_gammas[[i]]$simgamma_wY1_H<-rowMeans(my_gammas_wY1_H[[i]][,1:j])
true_gammas[[i]]$simgamma_Y1_H<-rowMeans(my_gammas_Y1_H[[i]][,1:j])
}
########################################################################
## Are all gamma parameters correctly estimated?
for(i in 1:length(true_gammas)){
true_gammas[[i]]$Y2avg<-0
true_gammas[[i]][true_gammas[[1]]$cond=="Y2" & true_gammas[[i]]$C1==0,]$Y2avg<-c(
c(table(simdata[simdata$C1==0,]$Y1)/sum(table(simdata[simdata$C1==0,]$Y1)))%*%rbind(true_gammas[[i]][true_gammas[[i]]$cond=="Y2" & true_gammas[[i]]$Y1==0 & true_gammas[[i]]$C1==0,]$gamma,true_gammas[[i]][true_gammas[[i]]$cond=="Y2" &true_gammas[[i]]$Y1==1,]$gamma))
true_gammas[[i]][true_gammas[[i]]$cond=="Y2" & true_gammas[[i]]$C1==1,]$Y2avg<-c(
c(table(simdata[simdata$C1==1,]$Y1)/sum(table(simdata[simdata$C1==1,]$Y1)))%*%rbind(true_gammas[[i]][true_gammas[[i]]$cond=="Y2" & true_gammas[[i]]$Y1==0 & true_gammas[[i]]$C1==1,]$gamma,true_gammas[[i]][true_gammas[[i]]$cond=="Y2" &true_gammas[[i]]$Y1==1,]$gamma))
}
checkmat<-function(mat){
data.frame("error"=round(apply(cbind(abs(mat[,dim(mat)[2]]-mat$gamma), abs(mat[,dim(mat)[2]]-mat$Y2avg)),1, min),2))
}
errorcol<-function( colname="simgamma_wY1_wH"){
for(i in 1:10){
# for(i in 1:length(true_gammas)){
dat<-na.omit(true_gammas[[i]][,c("H", "C1", "C2", "X1", "X2", "Y1", "Y2", "Y2avg","cond", "gamma", colname)])
if(i==1){dat_check<-data.frame(dat[!duplicated(dat[,c(colname)]),])
e<-checkmat (dat[!duplicated(dat[,c(colname)]),])
colnames(e)<-paste("error",i,sep="_")
dat_check <-data.frame(dat_check,e)
}
if(i>1){
e<-checkmat (dat[!duplicated(dat[,c(colname)]),])
colnames(e)<-paste("error",i,sep="_")
dat_check <-data.frame(dat_check,e)}}
return(dat_check[,c(!colnames(dat_check)==colname)])}
########################################################################
errorList<-list(E_wY1_wH=errorcol("simgamma_wY1_wH"),
E_Y1_wH=errorcol("simgamma_Y1_wH"),
E_Y1_H=errorcol("simgamma_Y1_H"),
E_wY1_H=errorcol("simgamma_wY1_H"))
########################################################################
alpha<-0.1
power <- matrix(0, dim(dsgn)[1], 7)
for (i in (dim(dsgn)[1]:1)) {
power[i, ] <- c(i,
mean(Bayesresult[Bayesresult[, 1] == i, ][, 3]),
mean(Bayesresult[Bayesresult[, 1] == i, ][, 4] <= alpha),
mean(Bayesresult[Bayesresult[, 1] == i, ][, 5] <= alpha),
mean(Bayesresult[Bayesresult[,1] == i, ][, 6] <= alpha),
mean(Bayesresult[Bayesresult[,1] == i, ][, 7] <= alpha),
mean(Bayesresult[Bayesresult[,1] == i, ][, 8] <= alpha))
}
colnames(power)<-c("i","diff_theta","with Y1 with H","without Y1 with H","with Y1 without H","without Y1 without H", "IV")
power
save(errorList, file="~/Documents/gcomp/Rcode/results/errorList_n10000.rda")
save(true_gammas, file="~/Documents/gcomp/Rcode/results/true_gammas_n10000.rda")
save(Bayesresult, file="~/Documents/gcomp/Rcode/results/Bayesresult_n10000.rda")
save(power, file="~/Documents/gcomp/Rcode/results/power_n10000.rda")
| /Rcode/complete2.R | no_license | harlanhappydog/gcomp | R | false | false | 23,592 | r | #######################################################################
#######################################################################
#######################################################################
# Design of experiment
lvls <- list(NA)
fixlev<-c(0,0.5) # levels for not interesting parameters
seqlev<-c(1, 2, 3) # levels for interesting parameters
lvls[[1]] <- c(0) # violation
lvls[[2]] <- seqlev
lvls[[3]] <- c(0)
lvls[[4]] <- 0.5
lvls[[5]] <- seqlev
lvls[[6]] <- 0
lvls[[7]] <- c(0) # violation
lvls[[8]] <- c(0)
lvls[[9]] <- c(0)
lvls[[10]] <- c(0)
lvls[[11]] <- seqlev
interceptc<-(0) # intercept for C1 and C2
intercepty<-(-1) # intercept for Y1 and Y2
intercepth<-(-1) #intercept for H
interceptz<-0.5
interceptx<-0.5
### factorial design
dsgn <- as.matrix(expand.grid(lvls[[1]], lvls[[2]], lvls[[3]], lvls[[4]], lvls[[5]], lvls[[6]], lvls[[7]], lvls[[8]], lvls[[9]], lvls[[10]], lvls[[11]]))
dim(dsgn)
dim(dsgn)[1]
# only null
#dsgn<-dsgn[dsgn[,5]==0|dsgn[,6]==0,]
diff_theta<-rep(0,dim(dsgn)[1])
########################################################################
### CALCULATE TRUE VALUES OF diff
matt <- matrix(0, dim(dsgn)[1], 2)
true_gammas<-list()
for (i in c(1:dim(dsgn)[1])) {
truetheta11 <- 0
truetheta00 <- 0
true_gammas[[i]]<-data.frame(expand.grid(c(0,1),c(0,1),c(0,1),c(0,1),c(0,1),c(0,1),c(1), c("H","C1","C2","Y1","Y2"), c(NA)))
colnames(true_gammas[[i]])<-c("H","C1","C2","X1","X2","Y1","Y2", "cond", "gamma")
myH<-0
myZ<-0
for(theta in c(0,1)){
kk <- 1
individual <- matrix(0, 8, 8)
if(theta==0){ myX1<-myX2<-0}
if(theta==1){ myX1<-myX2<-1}
for (c2i in 0:1) {
for (y1i in 0:1) {
for (c1i in 0:1) {
individual[kk, 1:3] <- c(y1i, c1i, c2i)
# P(C1=1 or =0)
pc1<-1/(1+exp(-(interceptc )))
aa <- (as.numeric(c1i == 1) * (pc1) + as.numeric(c1i == 0) * (1-pc1))
true_gammas[[i]][true_gammas[[i]]$cond=="C1" & true_gammas[[i]]$C1== c1i,]$gamma<-aa
# P(Y1=1 or =0|C1,X1)
py1<-1/(1+exp(-(intercepty + dsgn[i, 5]*c1i + dsgn[i, 3]*myX1 + dsgn[i, 8]*myH)))
bb <- as.numeric(y1i == 1) * (py1) + as.numeric(y1i == 0) * (1 - py1)
true_gammas[[i]][true_gammas[[i]]$cond=="Y1" &
true_gammas[[i]]$Y1== y1i & true_gammas[[i]]$X1==myX1 & true_gammas[[i]]$C1==c1i,]$gamma<-bb
# P(C2=1 or =0|C1,X1)
pc2<-(1/(1+exp(-(interceptc + dsgn[i, 4]*c1i + dsgn[i, 6]*myX1))))
cc <- as.numeric(c2i == 1) * (pc2) + as.numeric(c2i == 0)*(1-pc2)
true_gammas[[i]][true_gammas[[i]]$cond=="C2" &
true_gammas[[i]]$C1== c1i & true_gammas[[i]]$C2== c2i & true_gammas[[i]]$X1== myX1,]$gamma<-cc
# P(Y2=1|C2,X2,Y1)
dd <- (as.numeric(y1i == 1) * 1 + as.numeric(y1i == 0) *
( (1/(1+exp(-(intercepty+dsgn[i, 9]*myZ + dsgn[i, 5]*c2i + dsgn[i, 3]*myX2 + dsgn[i, 8]*myH)))) ))
true_gammas[[i]][true_gammas[[i]]$cond=="Y2" &
true_gammas[[i]]$Y2== 1 & true_gammas[[i]]$C2== c2i & true_gammas[[i]]$X2== myX2 &
true_gammas[[i]]$Y1== y1i,]$gamma<-dd
individual[kk, 5:8] <- c(aa, bb, cc, dd)
# Product
individual[kk, 4] <- aa * bb * cc * dd
if(theta==1){ truetheta11 <- truetheta11 + individual[kk, 4] }
if(theta==1){ truetheta00 <- truetheta00 + individual[kk, 4] }
kk <- kk + 1
}
}
}
matt[i, ] <- c(truetheta00, truetheta11)}
# For H marginals
for (h1i in 0:1) {
# P(H=1 or =0)
ph1<-1/(1+exp(-(intercepth )))
hh <- (as.numeric(h1i == 1) * (ph1) + as.numeric(h1i == 0) * (1-ph1))
true_gammas[[i]][true_gammas[[i]]$cond=="H" & true_gammas[[i]]$H== h1i,]$gamma<-hh
}
}
diff_theta<-round(matt[,2]-matt[,1],5)
# confirming that every event has a plaussible chance of occuring:
for(i in 1:dim(dsgn)[1]) {
print(dim(true_gammas[[i]][true_gammas[[i]]$gamma>0.95 | true_gammas[[i]]$gamma<0.05,])[1]-32)
if((dim(true_gammas[[i]][true_gammas[[i]]$gamma>0.95 | true_gammas[[i]]$gamma<0.05,])[1]-32)>0){print(dsgn[i,])}
}
########################################################################
# Functions
########################################################################
C2probB <- function(h=c(0,1), c2=c(1), c1=c(0,1), x1=c(0,1), nm = 1) {
ss <- sum(simdata$C2[simdata$H %in% h & simdata$C1 %in% c1 & simdata$X1 %in% x1])
nn <- length(simdata$C2[simdata$H %in% h & simdata$C1 %in% c1 & simdata$X1 %in% x1])
pp <- (rbeta(nm, (ss + 1), (nn - ss + 1)))
if (c2 %in% 0) {return(1 - as.numeric(pp))}
if (c2 %in% 1) {return(as.numeric(pp))}}
Y2probB<-function(h=c(0,1), y2=c(1),c2=c(0,1),x2=c(0,1),y1=c(0,1), c1=c(0,1), x1=c(0,1), nm=1){
ss<-sum(simdata$Y2[simdata$H %in% h &
simdata$C2%in% c2 & simdata$X2%in% x2 &
simdata$Y1%in% y1 & simdata$C1%in% c1 & simdata$X1%in% x1])
nn<-length(simdata$Y2[simdata$H %in% h &
simdata$C2%in% c2 & simdata$X2%in% x2 &
simdata$Y1%in% y1 & simdata$C1%in% c1 & simdata$X1%in% x1])
pp<-(rbeta(nm,(ss+1),(nn-ss+1)))
if(y2%in%0){return(1-as.numeric(pp))}
if(y2%in%1){return(as.numeric(pp))}}
Y1probB<-function(h=c(0,1), y1=1,c1=c(0,1),x1=c(0,1),nm=1){
ss<-sum(simdata$Y1[simdata$H %in% h & simdata$X1%in%x1 & simdata$C1%in%c1])
nn<-length(simdata$Y1[simdata$H %in% h & simdata$X1%in%x1 & simdata$C1%in%c1])
pp<-(rbeta(nm,(ss+1),(nn-ss+1)))
if(y1%in%0){return(1-as.numeric(pp))}
if(y1%in%1){return(as.numeric(pp))}}
C1probB <- function(h=c(0,1), c1=1, nm = 1) {
ss <- sum(simdata$C1[simdata$H %in% h])
nn <- length(simdata$C1[simdata$H %in% h])
pp <- (rbeta(nm, (ss + 1), (nn - ss + 1)))
if (c1 %in% 0) {return(1 - as.numeric(pp))}
if (c1 %in% 1) {return(as.numeric(pp))}}
HprobB <- function(h=1, nm = 1) {
ss <- sum(simdata$H)
nn <- length(simdata$H)
pp <- (rbeta(nm, (ss + 1), (nn - ss + 1)))
if (h == 0) {return(1 - as.numeric(pp))}
if (h == 1) {return(as.numeric(pp))}}
########################################################################
# SIMULATIONS STUDY
########################################################################
########################################################################
ppp <- 1
kkk <- 1
est <- matrix(0, dim(dsgn)[1], 2)
result <- matrix(0, dim(dsgn)[1], 2)
numsim <- 100 ### number of datasets generated under each scenario
n <- 10000 ### size of each dataset
set.seed(13) ### for reproducibility
nB<-45 ### number of MC samples
Bayesresult <- matrix(0, dim(dsgn)[1] * numsim, 8)
########################################################################
simdatasum<-matrix(0,dim(dsgn)[1],8)
colnames(simdatasum)<-c( "H", "Z", "C1", "X1", "C2", "Y1", "X2", "Y2")
my_gammas_wY1_wH<-list()
my_gammas_wY1_H<-list()
my_gammas_Y1_wH<-list()
my_gammas_Y1_H<-list()
for (i in c(1:dim(dsgn)[1])) {
#for (i in 1) {
my_gammas_wY1_wH[[i]]<-matrix(0, dim(true_gammas[[1]])[1],numsim)
my_gammas_wY1_H[[i]]<-matrix(0, dim(true_gammas[[1]])[1],numsim)
my_gammas_Y1_wH[[i]]<-matrix(0, dim(true_gammas[[1]])[1],numsim)
my_gammas_Y1_H[[i]]<-matrix(0, dim(true_gammas[[1]])[1],numsim)
for (j in 1:numsim) { ### loop over datasets
print(c(i, j))
print(kkk/(dim(dsgn)[1] * numsim))
H <- rbinom(n, size = 1, prob = (1/(1+exp(-(intercepth)))) )
Z <- rbinom(n, 1, (1/(1+exp(-(interceptz + dsgn[i, 10]*H)))) )
C1 <- rbinom(n, size = 1, prob = (1/(1+exp(-(interceptc)))) )
X1<- rbinom(n, 1, (1/(1+exp(-(interceptx + dsgn[i, 1]*C1 + dsgn[i, 2]*H)))) )
C2 <-rbinom(n, 1, (1/(1+exp(-(interceptc + dsgn[i, 4]*C1 + dsgn[i, 6]*X1)))) )
Y1 <-rbinom(n, 1, (1/(1+exp(-(intercepty + dsgn[i, 5]*C1 + dsgn[i, 3]*X1 + dsgn[i, 8]*H)))) )
X2 <-rbinom(n, 1, (1/(1+exp(-(interceptx + dsgn[i, 1]*C2 + dsgn[i, 2]*H + dsgn[i, 7]*Y1 + dsgn[i, 9]*Z + dsgn[i, 11]*X1)))) )
Y2<-rbinom(n, 1, (1/(1+exp(-(intercepty + dsgn[i, 9]*Z + dsgn[i, 5]*C2 + dsgn[i, 3]*X2 + dsgn[i, 8]*H)))) )
Y2[Y1 == 1]<-1
simdata <- data.frame(H,Z,C1,X1,C2,Y1,X2,Y2)
simdatasum[i,]<-colMeans(simdata)
########################################################################
print("BAYESIAN APPROACH: WITHOUT Y1, WITHOUT H (uniform priors):")
sumtheta11Bvec<-rep(0,nB)
sumtheta00Bvec<-rep(0,nB)
gammas_wY1_wH<-data.frame(expand.grid(c(0,1),c(0,1),c(0,1),c(0,1),c(0,1),c(0,1),c(1), c("H","C1","C2","Y1","Y2"), c(NA)))
colnames(gammas_wY1_wH)<-c("H","C1","C2","X1","X2","Y1","Y2", "cond", "gamma")
## Start of MC
gammas<-matrix(0,dim(gammas_wY1_wH)[1], nB)
for(sw in 1:nB){
y2i<-1
for (x2i in 0:1) {
x1i <-x2i
for (c2i in 0:1) {
for (c1i in 0:1) {
gammas_wY1_wH[gammas_wY1_wH$cond=="C1" & gammas_wY1_wH$C1== c1i,]$gamma<-C1probB(c1=c1i)
gammas_wY1_wH[gammas_wY1_wH$cond=="C2" & gammas_wY1_wH$C1== c1i & gammas_wY1_wH$C2== c2i &
gammas_wY1_wH$X1== x1i,]$gamma<-C2probB(c1=c1i,c2=c2i,x1=x1i)
gammas_wY1_wH[gammas_wY1_wH$cond=="Y2" & gammas_wY1_wH$C1== c1i & gammas_wY1_wH$C2== c2i &
gammas_wY1_wH$X1== x1i & gammas_wY1_wH$X2== x2i & gammas_wY1_wH$Y2== y2i,]$gamma<-Y2probB(y2=y2i, c1=c1i, c2=c2i, x1=x1i, x2=x2i)
}
}
}
sumtheta11B <- 0
sumtheta00B <- 0
x1i<-1
x2i<-1
y2i<-1
for (x1i in 0:1) {
x2i<-x1i
for (c2i in 0:1) {
for (c1i in 0:1) {
if(x1i==1){ sumtheta11B <- sumtheta11B +
unique(gammas_wY1_wH[gammas_wY1_wH$cond=="C1" & gammas_wY1_wH$C1== c1i,]$gamma*
gammas_wY1_wH[gammas_wY1_wH$cond=="C2" & gammas_wY1_wH$C1== c1i & gammas_wY1_wH$C2== c2i & gammas_wY1_wH$X1== x1i,]$gamma*
gammas_wY1_wH[gammas_wY1_wH$cond=="Y2" & gammas_wY1_wH$C1== c1i & gammas_wY1_wH$C2== c2i & gammas_wY1_wH$X1== x1i & gammas_wY1_wH$X2== x2i & gammas_wY1_wH$Y2== y2i,]$gamma)}
if(x1i==0){ sumtheta00B <- sumtheta00B +
unique(gammas_wY1_wH[gammas_wY1_wH$cond=="C1" & gammas_wY1_wH$C1== c1i,]$gamma*
gammas_wY1_wH[gammas_wY1_wH$cond=="C2" & gammas_wY1_wH$C1== c1i & gammas_wY1_wH$C2== c2i & gammas_wY1_wH$X1== x1i,]$gamma*
gammas_wY1_wH[gammas_wY1_wH$cond=="Y2" & gammas_wY1_wH$C1== c1i & gammas_wY1_wH$C2== c2i & gammas_wY1_wH$X1== x1i & gammas_wY1_wH$X2== x2i & gammas_wY1_wH$Y2== y2i,]$gamma)}
}
}
}
sumtheta11Bvec[sw]<-sumtheta11B
sumtheta00Bvec[sw]<-sumtheta00B
gammas[,sw]<-gammas_wY1_wH$gamma } ## End of MC
my_gammas_wY1_wH[[i]][,j]<-rowMeans(gammas[,1:nB])
sumtheta00Bvec_wY1_wH <- sumtheta00Bvec
sumtheta11Bvec_wY1_wH <- sumtheta11Bvec
#################################################################
print("BAYESIAN APPROACH WITH Y1, WITHOUT H (uniform priors):")
sumtheta11Bvec<-rep(0,nB)
sumtheta00Bvec<-rep(0,nB)
gammas_Y1_wH<-data.frame(expand.grid(c(0,1),c(0,1),c(0,1),c(0,1),c(0,1),c(0,1),c(1), c("H","C1","C2","Y1","Y2"), c(NA)))
colnames(gammas_Y1_wH)<-c("H","C1","C2","X1","X2","Y1","Y2", "cond", "gamma")
gammas<-matrix(0,dim(gammas_Y1_wH)[1], nB)
## Start of MC
for(sw in 1:nB){
y2i<-1
for (x2i in 0:1) {
x1i<-x2i
for (y1i in 0:1) {
for (c2i in 0:1) {
for (c1i in 0:1) {
gammas_Y1_wH[gammas_Y1_wH$cond=="C1" & gammas_Y1_wH$C1== c1i,]$gamma<-C1probB(c1=c1i)
gammas_Y1_wH[gammas_Y1_wH$cond=="Y1" & gammas_Y1_wH$Y1== y1i & gammas_Y1_wH$C1== c1i & gammas_Y1_wH$X1== x1i,]$gamma<-Y1probB(y1=y1i, c1=c1i, x1=x1i)
gammas_Y1_wH[gammas_Y1_wH$cond=="C2" & gammas_Y1_wH$C2== c2i & gammas_Y1_wH$C1== c1i & gammas_Y1_wH$X1== x1i,]$gamma<-C2probB(c2=c2i, c1=c1i, x1=x1i)
gammas_Y1_wH[gammas_Y1_wH$cond=="Y2" & gammas_Y1_wH$Y2== y2i & gammas_Y1_wH$C2== c2i & gammas_Y1_wH$X2== x2i & gammas_Y1_wH$Y1== y1i,]$gamma<-Y2probB(y2=y2i, c2=c2i, x2=x2i, y1=y1i)
}}}}
sumtheta11B<-0
sumtheta00B<-0
y2i<-1
for(x1i in 0:1){
x2i<-x1i
for(c2i in 0:1){
for(y1i in 0:1){
for(c1i in 0:1){
if(x1i==1){
sumtheta11B<-sumtheta11B+
unique(gammas_Y1_wH[gammas_Y1_wH$cond=="C1" & gammas_Y1_wH$C1== c1i,]$gamma*
gammas_Y1_wH[gammas_Y1_wH$cond=="Y1" & gammas_Y1_wH$Y1== y1i & gammas_Y1_wH$C1== c1i & gammas_Y1_wH$X1== x1i,]$gamma*
gammas_Y1_wH[gammas_Y1_wH$cond=="C2" & gammas_Y1_wH$C2== c2i & gammas_Y1_wH$C1== c1i & gammas_Y1_wH$X1== x1i,]$gamma*
gammas_Y1_wH[gammas_Y1_wH$cond=="Y2" & gammas_Y1_wH$Y2== y2i & gammas_Y1_wH$C2== c2i & gammas_Y1_wH$X2== x2i & gammas_Y1_wH$Y1== y1i,]$gamma)
}
if(x1i==0){
sumtheta00B<-sumtheta00B+
unique(gammas_Y1_wH[gammas_Y1_wH$cond=="C1" & gammas_Y1_wH$C1== c1i,]$gamma*
gammas_Y1_wH[gammas_Y1_wH$cond=="Y1" & gammas_Y1_wH$Y1== y1i & gammas_Y1_wH$C1== c1i & gammas_Y1_wH$X1== x1i,]$gamma*
gammas_Y1_wH[gammas_Y1_wH$cond=="C2" & gammas_Y1_wH$C2== c2i & gammas_Y1_wH$C1== c1i & gammas_Y1_wH$X1== x1i,]$gamma*
gammas_Y1_wH[gammas_Y1_wH$cond=="Y2" & gammas_Y1_wH$Y2== y2i & gammas_Y1_wH$C2== c2i & gammas_Y1_wH$X2== x2i & gammas_Y1_wH$Y1== y1i,]$gamma)
}
}}}}
sumtheta11Bvec[sw]<-sumtheta11B
sumtheta00Bvec[sw]<-sumtheta00B
gammas[,sw]<-gammas_Y1_wH$gamma} ## End of MC
my_gammas_Y1_wH[[i]][,j]<-rowMeans(gammas[,1:nB])
sumtheta00Bvec_Y1_wH <- sumtheta00Bvec
sumtheta11Bvec_Y1_wH <- sumtheta11Bvec
Bpval_Y1_wH <- 1 - mean((sumtheta11Bvec_Y1_wH - sumtheta00Bvec_Y1_wH) > 0)
#########################################################################
print("BAYESIAN APPROACH WITHOUT Y1 WITH H (uniform priors):")
sumtheta11Bvec<-rep(0,nB)
sumtheta00Bvec<-rep(0,nB)
gammas_wY1_H<-data.frame(expand.grid(c(0,1),c(0,1),c(0,1),c(0,1),c(0,1),c(0,1),c(1), c("H","C1","C2","Y1","Y2"), c(NA)))
colnames(gammas_wY1_H)<-c("H","C1","C2","X1","X2","Y1","Y2", "cond", "gamma")
gammas<-matrix(0,dim(gammas_wY1_H)[1], nB)
## Start of MC
for(sw in 1:nB){
y2i<-1
for (x2i in 0:1) {
x1i<-x2i
for (h1i in 0:1) {
for (c2i in 0:1) {
for (c1i in 0:1) {
gammas_wY1_H[gammas_wY1_H$cond=="H" & gammas_wY1_H$H== h1i,]$gamma<-HprobB(h1i)
gammas_wY1_H[gammas_wY1_H$cond=="C1" & gammas_wY1_H$C1== c1i & gammas_wY1_H$H== h1i,]$gamma<-C1probB(h=h1i ,c1=c1i)
gammas_wY1_H[gammas_wY1_H$cond=="C2" & gammas_wY1_H$H== h1i & gammas_wY1_H$C2== c2i & gammas_wY1_H$C1== c1i & gammas_wY1_H$X1== x1i,]$gamma<-C2probB(h=h1i, c2=c2i, c1=c1i, x1=x1i)
gammas_wY1_H[gammas_wY1_H$cond=="Y2" & gammas_wY1_H$H== h1i & gammas_wY1_H$Y2== y2i & gammas_wY1_H$C1== c1i & gammas_wY1_H$C2== c2i & gammas_wY1_H$X2== x2i & gammas_wY1_H$X1== x1i,]$gamma<-Y2probB(h=h1i, y2=y2i, c1=c1i, c2=c2i, x1=x1i, x2=x2i)
}}}}
sumtheta11B<-0
sumtheta00B<-0
y2i<-1
for(x1i in 0:1){
x2i<-x1i
for(c2i in 0:1){
for(h1i in 0:1){
for(c1i in 0:1){
if(x1i==1){ sumtheta11B<-sumtheta11B+
unique(gammas_wY1_H[gammas_wY1_H$cond=="H" & gammas_wY1_H$H== h1i,]$gamma*
gammas_wY1_H[gammas_wY1_H$cond=="C1" & gammas_wY1_H$C1== c1i & gammas_wY1_H$H== h1i,]$gamma*
gammas_wY1_H[gammas_wY1_H$cond=="C2" & gammas_wY1_H$H== h1i & gammas_wY1_H$C2== c2i & gammas_wY1_H$C1== c1i & gammas_wY1_H$X1== x1i,]$gamma*
gammas_wY1_H[gammas_wY1_H$cond=="Y2" & gammas_wY1_H$H== h1i & gammas_wY1_H$Y2== y2i & gammas_wY1_H$C1== c1i & gammas_wY1_H$C2== c2i & gammas_wY1_H$X2== x2i & gammas_wY1_H$X1== x1i,]$gamma)
}
if(x1i==0){ sumtheta00B<-sumtheta00B+
unique(gammas_wY1_H[gammas_wY1_H$cond=="H" & gammas_wY1_H$H== h1i,]$gamma*
gammas_wY1_H[gammas_wY1_H$cond=="C1" & gammas_wY1_H$C1== c1i & gammas_wY1_H$H== h1i,]$gamma*
gammas_wY1_H[gammas_wY1_H$cond=="C2" & gammas_wY1_H$H== h1i & gammas_wY1_H$C2== c2i & gammas_wY1_H$C1== c1i & gammas_wY1_H$X1== x1i,]$gamma*
gammas_wY1_H[gammas_wY1_H$cond=="Y2" & gammas_wY1_H$H== h1i & gammas_wY1_H$Y2== y2i & gammas_wY1_H$C1== c1i & gammas_wY1_H$C2== c2i & gammas_wY1_H$X2== x2i & gammas_wY1_H$X1== x1i,]$gamma)
}
}}}}
sumtheta11Bvec[sw]<-sumtheta11B
sumtheta00Bvec[sw]<-sumtheta00B
gammas[,sw]<-gammas_wY1_H$gamma } ## End of MC
my_gammas_wY1_H[[i]][,j] <- rowMeans(gammas[,1:nB])
sumtheta00Bvec_wY1_H <- sumtheta00Bvec
sumtheta11Bvec_wY1_H <- sumtheta11Bvec
##############################################################
###############################################################
# ##
print("BAYESIAN APPROACH WITH Y1 WITH H (uniform priors):")
sumtheta11Bvec<-rep(0,nB)
sumtheta00Bvec<-rep(0,nB)
gammas_Y1_H<-data.frame(expand.grid(c(0,1),c(0,1),c(0,1),c(0,1),c(0,1),c(0,1),c(1), c("H","C1","C2","Y1","Y2"), c(NA)))
colnames(gammas_Y1_H)<-c("H","C1","C2","X1","X2","Y1","Y2", "cond", "gamma")
gammas<-matrix(0,dim(gammas_Y1_H)[1], nB)
## Start of MC
for(sw in 1:nB){
y2i<-1
for (x2i in 0:1) {
x1i<-x2i
for (y1i in 0:1) {
for (h1i in 0:1) {
for (c2i in 0:1) {
for (c1i in 0:1) {
gammas_Y1_H[gammas_Y1_H$cond=="H" & gammas_Y1_H$H== h1i,]$gamma<-HprobB(h1i)
gammas_Y1_H[gammas_Y1_H$cond=="C1" & gammas_Y1_H$C1== c1i & gammas_Y1_H$H== h1i,]$gamma<-C1probB(h=h1i ,c1=c1i)
gammas_Y1_H[gammas_Y1_H$cond=="Y1" & gammas_Y1_H$H== h1i & gammas_Y1_H$Y1== y1i & gammas_Y1_H$C1== c1i & gammas_Y1_H$X1== x1i,]$gamma<-Y1probB(h=h1i, y1=y1i, c1=c1i, x1=x1i)
gammas_Y1_H[gammas_Y1_H$cond=="C2" & gammas_Y1_H$H== h1i & gammas_Y1_H$C2== c2i & gammas_Y1_H$C1== c1i & gammas_Y1_H$X1== x1i,]$gamma<-C2probB(h=h1i, c2=c2i, c1=c1i, x1=x1i)
gammas_Y1_H[gammas_Y1_H$cond=="Y2" & gammas_Y1_H$H== h1i & gammas_Y1_H$Y2== y2i & gammas_Y1_H$C2== c2i & gammas_Y1_H$X2== x2i & gammas_Y1_H$Y1== y1i,]$gamma<-Y2probB(h=h1i, y2=y2i, c2=c2i, x2=x2i, y1=y1i)
}}}}}
sumtheta11B<-0
sumtheta00B<-0
y2i<-1
for(x1i in 0:1){
x2i<-x1i
for(c2i in 0:1){
for(y1i in 0:1){
for(c1i in 0:1){
for (h1i in 0:1) {
if(x1i==1){ sumtheta11B<-sumtheta11B+
unique(gammas_Y1_H[gammas_Y1_H$cond=="H" & gammas_Y1_H$H== h1i,]$gamma*
gammas_Y1_H[gammas_Y1_H$cond=="C1" & gammas_Y1_H$C1== c1i & gammas_Y1_H$H== h1i,]$gamma*
gammas_Y1_H[gammas_Y1_H$cond=="C2" & gammas_Y1_H$H== h1i & gammas_Y1_H$C2== c2i & gammas_Y1_H$C1== c1i & gammas_Y1_H$X1== x1i,]$gamma*
gammas_Y1_H[gammas_Y1_H$cond=="Y1" & gammas_Y1_H$H== h1i & gammas_Y1_H$Y1== y1i & gammas_Y1_H$C1== c1i & gammas_Y1_H$X1== x1i,]$gamma*
gammas_Y1_H[gammas_Y1_H$cond=="Y2" & gammas_Y1_H$H== h1i & gammas_Y1_H$Y2== y2i & gammas_Y1_H$C2== c2i & gammas_Y1_H$X2== x2i & gammas_Y1_H$Y1== y1i,]$gamma)
}
if(x1i==0){ sumtheta00B<-sumtheta00B+
unique(gammas_Y1_H[gammas_Y1_H$cond=="H" & gammas_Y1_H$H== h1i,]$gamma*
gammas_Y1_H[gammas_Y1_H$cond=="C1" & gammas_Y1_H$H== h1i & gammas_Y1_H$C1== c1i ,]$gamma*
gammas_Y1_H[gammas_Y1_H$cond=="C2"& gammas_Y1_H$H== h1i & gammas_Y1_H$C2== c2i & gammas_Y1_H$C1== c1i & gammas_Y1_H$X1== x1i,]$gamma*
gammas_Y1_H[gammas_Y1_H$cond=="Y1" & gammas_Y1_H$H== h1i & gammas_Y1_H$Y1== y1i & gammas_Y1_H$C1== c1i & gammas_Y1_H$X1== x1i,]$gamma*
gammas_Y1_H[gammas_Y1_H$cond=="Y2" & gammas_Y1_H$H== h1i & gammas_Y1_H$Y2== y2i & gammas_Y1_H$C2== c2i & gammas_Y1_H$X2== x2i & gammas_Y1_H$Y1== y1i,]$gamma)
}}}}}
}
sumtheta11Bvec[sw]<-sumtheta11B
sumtheta00Bvec[sw]<-sumtheta00B
gammas[,sw]<-gammas_Y1_H$gamma } ## End of MC
my_gammas_Y1_H[[i]][,j]<-rowMeans(gammas[,1:nB])
sumtheta00Bvec_Y1_H <- sumtheta00Bvec
sumtheta11Bvec_Y1_H <- sumtheta11Bvec
#########################################
#mod1<-(glm(Y2 ~ H*X1*X2*Y1*C1*C2, data = simdata, family = "binomial"))
#mod2<-(glm(Y2 ~ H*Y1*C1*C2, data = simdata, family = "binomial"))
#library(lmtest)
#pval<-(lrtest(mod2,mod1))["Pr(>Chisq)"]
#print(pval)
#one-sided p-values:
Bpval_wY1_wH <- 1 - mean((sumtheta11Bvec_wY1_wH - sumtheta00Bvec_wY1_wH) > 0)
Bpval_Y1_wH <- 1 - mean((sumtheta11Bvec_Y1_wH - sumtheta00Bvec_Y1_wH) > 0)
Bpval_wY1_H <- 1 - mean((sumtheta11Bvec_wY1_H - sumtheta00Bvec_wY1_H) > 0)
Bpval_Y1_H <- 1 - mean((sumtheta11Bvec_Y1_H - sumtheta00Bvec_Y1_H) > 0)
Hpval <- (1-pnorm((summary(glm(Y2 ~ H, data = simdata, family = "binomial"))$coefficients[2, 3])))
Bayesresult[kkk, ] <- c(i, j, diff_theta[i], Bpval_Y1_H, Bpval_wY1_H, Bpval_Y1_wH, Bpval_wY1_wH, Hpval)
print(Bayesresult[kkk, ])
kkk <- kkk + 1
}
true_gammas[[i]]$simgamma_wY1_wH<-rowMeans(my_gammas_wY1_wH[[i]][,1:j])
true_gammas[[i]]$simgamma_Y1_wH<-rowMeans(my_gammas_Y1_wH[[i]][,1:j])
true_gammas[[i]]$simgamma_wY1_H<-rowMeans(my_gammas_wY1_H[[i]][,1:j])
true_gammas[[i]]$simgamma_Y1_H<-rowMeans(my_gammas_Y1_H[[i]][,1:j])
}
########################################################################
## Are all gamma parameters correctly estimated?
for(i in 1:length(true_gammas)){
true_gammas[[i]]$Y2avg<-0
true_gammas[[i]][true_gammas[[1]]$cond=="Y2" & true_gammas[[i]]$C1==0,]$Y2avg<-c(
c(table(simdata[simdata$C1==0,]$Y1)/sum(table(simdata[simdata$C1==0,]$Y1)))%*%rbind(true_gammas[[i]][true_gammas[[i]]$cond=="Y2" & true_gammas[[i]]$Y1==0 & true_gammas[[i]]$C1==0,]$gamma,true_gammas[[i]][true_gammas[[i]]$cond=="Y2" &true_gammas[[i]]$Y1==1,]$gamma))
true_gammas[[i]][true_gammas[[i]]$cond=="Y2" & true_gammas[[i]]$C1==1,]$Y2avg<-c(
c(table(simdata[simdata$C1==1,]$Y1)/sum(table(simdata[simdata$C1==1,]$Y1)))%*%rbind(true_gammas[[i]][true_gammas[[i]]$cond=="Y2" & true_gammas[[i]]$Y1==0 & true_gammas[[i]]$C1==1,]$gamma,true_gammas[[i]][true_gammas[[i]]$cond=="Y2" &true_gammas[[i]]$Y1==1,]$gamma))
}
checkmat<-function(mat){
data.frame("error"=round(apply(cbind(abs(mat[,dim(mat)[2]]-mat$gamma), abs(mat[,dim(mat)[2]]-mat$Y2avg)),1, min),2))
}
errorcol<-function( colname="simgamma_wY1_wH"){
for(i in 1:10){
# for(i in 1:length(true_gammas)){
dat<-na.omit(true_gammas[[i]][,c("H", "C1", "C2", "X1", "X2", "Y1", "Y2", "Y2avg","cond", "gamma", colname)])
if(i==1){dat_check<-data.frame(dat[!duplicated(dat[,c(colname)]),])
e<-checkmat (dat[!duplicated(dat[,c(colname)]),])
colnames(e)<-paste("error",i,sep="_")
dat_check <-data.frame(dat_check,e)
}
if(i>1){
e<-checkmat (dat[!duplicated(dat[,c(colname)]),])
colnames(e)<-paste("error",i,sep="_")
dat_check <-data.frame(dat_check,e)}}
return(dat_check[,c(!colnames(dat_check)==colname)])}
########################################################################
errorList<-list(E_wY1_wH=errorcol("simgamma_wY1_wH"),
E_Y1_wH=errorcol("simgamma_Y1_wH"),
E_Y1_H=errorcol("simgamma_Y1_H"),
E_wY1_H=errorcol("simgamma_wY1_H"))
########################################################################
alpha<-0.1
power <- matrix(0, dim(dsgn)[1], 7)
for (i in (dim(dsgn)[1]:1)) {
power[i, ] <- c(i,
mean(Bayesresult[Bayesresult[, 1] == i, ][, 3]),
mean(Bayesresult[Bayesresult[, 1] == i, ][, 4] <= alpha),
mean(Bayesresult[Bayesresult[, 1] == i, ][, 5] <= alpha),
mean(Bayesresult[Bayesresult[,1] == i, ][, 6] <= alpha),
mean(Bayesresult[Bayesresult[,1] == i, ][, 7] <= alpha),
mean(Bayesresult[Bayesresult[,1] == i, ][, 8] <= alpha))
}
colnames(power)<-c("i","diff_theta","with Y1 with H","without Y1 with H","with Y1 without H","without Y1 without H", "IV")
power
save(errorList, file="~/Documents/gcomp/Rcode/results/errorList_n10000.rda")
save(true_gammas, file="~/Documents/gcomp/Rcode/results/true_gammas_n10000.rda")
save(Bayesresult, file="~/Documents/gcomp/Rcode/results/Bayesresult_n10000.rda")
save(power, file="~/Documents/gcomp/Rcode/results/power_n10000.rda")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rna_visualize.R
\name{rna_visualize}
\alias{rna_visualize}
\title{RNAseq Visualize}
\usage{
rna_visualize(rna_data, method = "hist", lib = "ggplot")
}
\description{
Visualize
https://www.rdocumentation.org/packages/edgeR/versions/3.14.0/topics/plotSmear
}
\examples{
rna_visualize(rna_data, method="hist", lib="base")
rna_visualize(rna_data, method="hist", lib="ggplot")
rna_visualize(rna_data, method="boxplot", lib="base")
rna_visualize(rna_data, method="boxplot", lib="ggplot")
rna_visualize(rna_data, method="density", lib="base")
rna_visualize(rna_data, method="density", lib="ggplot")
rna_visualize(rna_data, method="cluster-gene", lib="base")
rna_visualize(rna_data, method="cluster-gene", lib="ggplot")
rna_visualize(rna_data, method="cluster-expt", lib="base")
rna_visualize(rna_data, method="cluster-expt", lib="ggplot")
rna_visualize(rna_data, method="MA", lib="base")
rna_visualize(rna_data, method="MA", lib="ggplot")
rna_visualize(rna_data, method="smear", lib="base")
rna_visualize(rna_data, method="smear", lib="ggplot")
rna_visualize(rna_data, method="MDS", lib="base")
rna_visualize(rna_data, method="MDS", lib="ggplot")
rna_visualize(rna_data, method="PCA", lib="base")
rna_visualize(rna_data, method="PCA", lib="ggplot")
rna_visualize(rna_data, method="BCV", lib="base")
rna_visualize(rna_data, method="BCV", lib="ggplot")
rna_visualize(rna_data, method="dispersion", lib="base")
rna_visualize(rna_data, method="dispersion", lib="ggplot")
rna_visualize(rna_data, method="volcano", lib="base")
rna_visualize(rna_data, method="volcano", lib="ggplot")
}
| /man/rna_visualize.Rd | permissive | htnani/rnaseq.work | R | false | true | 1,687 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rna_visualize.R
\name{rna_visualize}
\alias{rna_visualize}
\title{RNAseq Visualize}
\usage{
rna_visualize(rna_data, method = "hist", lib = "ggplot")
}
\description{
Visualize
https://www.rdocumentation.org/packages/edgeR/versions/3.14.0/topics/plotSmear
}
\examples{
rna_visualize(rna_data, method="hist", lib="base")
rna_visualize(rna_data, method="hist", lib="ggplot")
rna_visualize(rna_data, method="boxplot", lib="base")
rna_visualize(rna_data, method="boxplot", lib="ggplot")
rna_visualize(rna_data, method="density", lib="base")
rna_visualize(rna_data, method="density", lib="ggplot")
rna_visualize(rna_data, method="cluster-gene", lib="base")
rna_visualize(rna_data, method="cluster-gene", lib="ggplot")
rna_visualize(rna_data, method="cluster-expt", lib="base")
rna_visualize(rna_data, method="cluster-expt", lib="ggplot")
rna_visualize(rna_data, method="MA", lib="base")
rna_visualize(rna_data, method="MA", lib="ggplot")
rna_visualize(rna_data, method="smear", lib="base")
rna_visualize(rna_data, method="smear", lib="ggplot")
rna_visualize(rna_data, method="MDS", lib="base")
rna_visualize(rna_data, method="MDS", lib="ggplot")
rna_visualize(rna_data, method="PCA", lib="base")
rna_visualize(rna_data, method="PCA", lib="ggplot")
rna_visualize(rna_data, method="BCV", lib="base")
rna_visualize(rna_data, method="BCV", lib="ggplot")
rna_visualize(rna_data, method="dispersion", lib="base")
rna_visualize(rna_data, method="dispersion", lib="ggplot")
rna_visualize(rna_data, method="volcano", lib="base")
rna_visualize(rna_data, method="volcano", lib="ggplot")
}
|
################################ Permanent ################################
############################ Vulnerable plants ############################
options(digits=20)
source("Permanent/Vulnerable plants/Functions.r")
# Results
ca <- 400
k <- 0.025
MAP <- 216
x <- seq(0.218, 0.219, by=0.0002)
resLmax <- vector(mode="numeric", length=length(x))
resHmax <- vector(mode="numeric", length=length(x))
resLvalue <- vector(mode="numeric", length=length(x))
resHvalue <- vector(mode="numeric", length=length(x))
for (i in 1:length(x)){
resL <- optbelowf(x[i])
resLmax[i] <- resL$maximum
resLvalue[i] <- resL$objective
resH <- optabovef(x[i])
resHmax[i] <- resH$maximum
resHvalue[i] <- resH$objective
message(x[i])
}
res <- as.data.frame(cbind(x, resLmax, resHmax, resLvalue, resHvalue))
colnames(res) <- c("wLr", "resLmax", "resHmax", "resLvalue", "resHvalue")
#write.csv(res, "Permanent/Vulnerable plants/LHS maxmum & RHS maximum.csv", row.names = FALSE)
# Figure
windows(8, 6)
par(mgp=c(2.2, 1, 0), xaxs="i", yaxs="i", mar=c(4, 4, 3, 2), mfrow=c(1,1))
plot(res$wLr, res$resLmax, xlim=c(head(res$wLr, 1), tail(res$wLr, 1)), lwd=2, ylim=c(head(res$wLr, 1), tail(res$wLr, 1)), type="l",
xlab=expression(italic(w[Lr])), ylab=expression(italic(w[Li])),
cex.lab=1.3, col="blue")
points(res$wLr, res$resHmax, type="l", cex.lab=1.3, col="red", lwd=2)
abline(a=0, b=1)
| /Permanent/Vulnerable Plants/Figure - LHS maximum vs. RHS maximum.r | no_license | YaojieLu/Xylem-damage | R | false | false | 1,388 | r |
################################ Permanent ################################
############################ Vulnerable plants ############################
options(digits=20)
source("Permanent/Vulnerable plants/Functions.r")
# Results
ca <- 400
k <- 0.025
MAP <- 216
x <- seq(0.218, 0.219, by=0.0002)
resLmax <- vector(mode="numeric", length=length(x))
resHmax <- vector(mode="numeric", length=length(x))
resLvalue <- vector(mode="numeric", length=length(x))
resHvalue <- vector(mode="numeric", length=length(x))
for (i in 1:length(x)){
resL <- optbelowf(x[i])
resLmax[i] <- resL$maximum
resLvalue[i] <- resL$objective
resH <- optabovef(x[i])
resHmax[i] <- resH$maximum
resHvalue[i] <- resH$objective
message(x[i])
}
res <- as.data.frame(cbind(x, resLmax, resHmax, resLvalue, resHvalue))
colnames(res) <- c("wLr", "resLmax", "resHmax", "resLvalue", "resHvalue")
#write.csv(res, "Permanent/Vulnerable plants/LHS maxmum & RHS maximum.csv", row.names = FALSE)
# Figure
windows(8, 6)
par(mgp=c(2.2, 1, 0), xaxs="i", yaxs="i", mar=c(4, 4, 3, 2), mfrow=c(1,1))
plot(res$wLr, res$resLmax, xlim=c(head(res$wLr, 1), tail(res$wLr, 1)), lwd=2, ylim=c(head(res$wLr, 1), tail(res$wLr, 1)), type="l",
xlab=expression(italic(w[Lr])), ylab=expression(italic(w[Li])),
cex.lab=1.3, col="blue")
points(res$wLr, res$resHmax, type="l", cex.lab=1.3, col="red", lwd=2)
abline(a=0, b=1)
|
# download nlcd chunk only if it doesn't already exist
download_nlcd_chunk <- function(nlcd_chunk_number) {
dir.create("./nlcd_fst/", showWarnings = FALSE)
nlcd_file <- glue::glue("./nlcd_fst/nlcd_chunk_{nlcd_chunk_number}.fst")
if (file.exists(nlcd_file)) {
message(glue::glue("{nlcd_file} already exists"))
invisible(return(NULL))
}
message(glue::glue("downloading s3://geomarker/nlcd/nlcd_chunk_{nlcd_chunk_number}.fst to {nlcd_file}"))
download.file(
url = glue::glue(
"https://geomarker.s3.us-east-2.amazonaws.com/",
"nlcd/nlcd_fst/", "nlcd_chunk_{nlcd_chunk_number}.fst"
),
destfile = nlcd_file
)
}
# get raw nlcd values for specific cell number, year, and product
get_nlcd_data <- function(nlcd_cell_number,
year = c(2001, 2006, 2011, 2016),
product = c("nlcd", "impervious", "imperviousdescriptor")) {
if (length(nlcd_cell_number) > 1) {
warning("nlcd_cell is longer than one; processing only the first value")
nlcd_cell <- nlcd_cell_number[1]
}
nlcd_chunk <- nlcd_cell_number %/% 1e+07
nlcd_row <- nlcd_cell_number %% 1e+07
nlcd_file <- glue::glue("./nlcd_fst/nlcd_chunk_{nlcd_chunk}.fst")
nlcd_columns <- unlist(purrr::map(year, ~ glue::glue("{product}_{.}")))
if (!file.exists(nlcd_file)) download_nlcd_chunk(nlcd_chunk)
out <- fst::read_fst(
path = nlcd_file,
from = nlcd_row,
to = nlcd_row,
columns = nlcd_columns
)
out <- tibble::as_tibble(out)
out
}
# download all chunks needed for nlcd multiple cell numbers ahead of time
download_nlcd_chunks <- function(nlcd_cell_numbers) {
nlcd_chunks_needed <- unique(nlcd_cell_numbers %/% 1e+07)
message("downloading ", length(nlcd_chunks_needed), " total chunk files to ./nlcd_fst/")
purrr::walk(nlcd_chunks_needed, download_nlcd_chunk)
} | /get_nlcd_data.R | no_license | andrew-vancil/ecat_depindex | R | false | false | 1,846 | r | # download nlcd chunk only if it doesn't already exist
download_nlcd_chunk <- function(nlcd_chunk_number) {
dir.create("./nlcd_fst/", showWarnings = FALSE)
nlcd_file <- glue::glue("./nlcd_fst/nlcd_chunk_{nlcd_chunk_number}.fst")
if (file.exists(nlcd_file)) {
message(glue::glue("{nlcd_file} already exists"))
invisible(return(NULL))
}
message(glue::glue("downloading s3://geomarker/nlcd/nlcd_chunk_{nlcd_chunk_number}.fst to {nlcd_file}"))
download.file(
url = glue::glue(
"https://geomarker.s3.us-east-2.amazonaws.com/",
"nlcd/nlcd_fst/", "nlcd_chunk_{nlcd_chunk_number}.fst"
),
destfile = nlcd_file
)
}
# get raw nlcd values for specific cell number, year, and product
get_nlcd_data <- function(nlcd_cell_number,
year = c(2001, 2006, 2011, 2016),
product = c("nlcd", "impervious", "imperviousdescriptor")) {
if (length(nlcd_cell_number) > 1) {
warning("nlcd_cell is longer than one; processing only the first value")
nlcd_cell <- nlcd_cell_number[1]
}
nlcd_chunk <- nlcd_cell_number %/% 1e+07
nlcd_row <- nlcd_cell_number %% 1e+07
nlcd_file <- glue::glue("./nlcd_fst/nlcd_chunk_{nlcd_chunk}.fst")
nlcd_columns <- unlist(purrr::map(year, ~ glue::glue("{product}_{.}")))
if (!file.exists(nlcd_file)) download_nlcd_chunk(nlcd_chunk)
out <- fst::read_fst(
path = nlcd_file,
from = nlcd_row,
to = nlcd_row,
columns = nlcd_columns
)
out <- tibble::as_tibble(out)
out
}
# download all chunks needed for nlcd multiple cell numbers ahead of time
download_nlcd_chunks <- function(nlcd_cell_numbers) {
nlcd_chunks_needed <- unique(nlcd_cell_numbers %/% 1e+07)
message("downloading ", length(nlcd_chunks_needed), " total chunk files to ./nlcd_fst/")
purrr::walk(nlcd_chunks_needed, download_nlcd_chunk)
} |
## R code implementing plotting tasks as part of creating Plot 1
## Coursera Assignment: Course Project 1 in Exploratory Data Analysis
setwd("E:/Coursera/C4-EDA/ex1") ## changes working directory
## rest of code assumes file "household_power_consumption.txt" is in working directory
## Investigate the data set
sample<-read.table("household_power_consumption.txt",nrows=10)
str(sample)
## load the data set into data frame with appropriate column types
types<-c("character","character","numeric","numeric","numeric","numeric","numeric","numeric","numeric")
data<-read.table("household_power_consumption.txt",sep=";",header=TRUE,colClasses=types,na.strings=c("?",""))
str(data)
names(data)
## extract subset of data matching dates specified in exercise
qdata<-subset(data,data$Date %in% c("1/2/2007","2/2/2007"))
## convert Date and Time columns to Date and POSIXlt types
## (there is probably a neater way do to this than this)
date<-as.Date(qdata$Date,format="%d/%m/%Y")
time<-strptime(paste(qdata$Date,qdata$Time),format="%d/%m/%Y %H:%M:%S")
qdata$Time<-time
qdata$Date<-date
## Plot 1 a Histogram of Global Active Power to window
hist(qdata$Global_active_power,col="red",main="Global Active Power",xlab="Global Active Power (kilowatts)")
## Plot 1 a Histogram of Global Active Power to png file
png(file="plot1.png",width = 480, height = 480)
hist(qdata$Global_active_power,col="red",main="Global Active Power",xlab="Global Active Power (kilowatts)")
dev.off() | /plot1.R | no_license | dplcoursera2016/ExData_Plotting1 | R | false | false | 1,473 | r | ## R code implementing plotting tasks as part of creating Plot 1
## Coursera Assignment: Course Project 1 in Exploratory Data Analysis
setwd("E:/Coursera/C4-EDA/ex1") ## changes working directory
## rest of code assumes file "household_power_consumption.txt" is in working directory
## Investigate the data set
sample<-read.table("household_power_consumption.txt",nrows=10)
str(sample)
## load the data set into data frame with appropriate column types
types<-c("character","character","numeric","numeric","numeric","numeric","numeric","numeric","numeric")
data<-read.table("household_power_consumption.txt",sep=";",header=TRUE,colClasses=types,na.strings=c("?",""))
str(data)
names(data)
## extract subset of data matching dates specified in exercise
qdata<-subset(data,data$Date %in% c("1/2/2007","2/2/2007"))
## convert Date and Time columns to Date and POSIXlt types
## (there is probably a neater way do to this than this)
date<-as.Date(qdata$Date,format="%d/%m/%Y")
time<-strptime(paste(qdata$Date,qdata$Time),format="%d/%m/%Y %H:%M:%S")
qdata$Time<-time
qdata$Date<-date
## Plot 1 a Histogram of Global Active Power to window
hist(qdata$Global_active_power,col="red",main="Global Active Power",xlab="Global Active Power (kilowatts)")
## Plot 1 a Histogram of Global Active Power to png file
png(file="plot1.png",width = 480, height = 480)
hist(qdata$Global_active_power,col="red",main="Global Active Power",xlab="Global Active Power (kilowatts)")
dev.off() |
# Time Series
#dataset
AirPassengers
class(AirPassengers)
JohnsonJohnson
nhtemp
Nile
sunspots
ds = list(AirPassengers,JohnsonJohnson,nhtemp,Nile,sunspots)
sapply(ds, class)
# Sales TS Data
sales = c(18, 33, 41, 7, 34, 35, 24, 25, 24, 21, 25, 20,
22, 31, 40, 29, 25, 21, 22, 54, 31, 25, 26, 35)
tsales = ts(sales, start=c(2003, 1), frequency=12)
tsales
plot(tsales)
start(tsales)
end(tsales)
frequency(tsales)
(tsales.subset = window(tsales, start=c(2003, 5), end=c(2004, 6)))
tsales.subset
#SMA
Nile
library(forecast)
opar = par(no.readonly = T)
par(mfrow=c(2,2))
(ylim = range(Nile))
plot(Nile, main='Original TS')
head(Nile)
head(ma(Nile,3))
mean(Nile[1:3])
(1120+1160+963)/3
plot(ma(Nile,3), main='SMA k=3', ylim=ylim)
plot(ma(Nile,7), main='SMA k=7', ylim=ylim)
plot(ma(Nile,15),main='SMA k=15', ylim=ylim)
par(opar)
# Listing 15.4 - Simple exponential smoothing
library(forecast)
nhtemp
par(mfrow=c(1,1))
plot(nhtemp)
(fitse = ets(nhtemp, model='ANN'))
(fitse2 = ses(nhtemp))
forecast(fitse,3)
plot(forecast(fitse,c(3)))
accuracy(fitse)
#Holt Exponential Smoothening
TS = level + slope * t + irregular
plot(AirPassengers)
#log model to use additive model
plot(log(AirPassengers))
(fithe = ets(log(AirPassengers), model='AAA'))
(pred = forecast(fithe, 5))
plot(pred, main='Forecast for Air Travel', ylab='Log (Air Passengers)', xlab='Time')
#since log was used, use exp to get predicted values
pred$mean
(pred$mean = exp(pred$mean))
(pred$lower = exp(pred$lower))
(pred$upper = exp(pred$upper))
(p = cbind(pred$mean, pred$lower, pred$upper))
(pred$mean = exp(pred$mean))
#Holt Winters Exponential Smoothening
TS = level + slope * t + s(t) + irregular
fit <- HoltWinters(nhtemp, beta=FALSE, gamma=FALSE)
fit
forecast(fit, 1)
plot(forecast(fit, 1), xlab="Year", ylab=expression(paste("Temperature (", degree*F,")",)), main="New Haven Annual Mean Temperature")
accuracy(fit)
# Listing 15.5 - Exponential smoothing with level, slope, and seasonal components
fit <- HoltWinters(log(AirPassengers))
fit
accuracy(fit)
pred <- forecast(fit, 5)
pred
plot(pred, main="Forecast for Air Travel",
ylab="Log(AirPassengers)", xlab="Time")
pred$mean <- exp(pred$mean)
pred$lower <- exp(pred$lower)
pred$upper <- exp(pred$upper)
p <- cbind(pred$mean, pred$lower, pred$upper)
dimnames(p)[[2]] <- c("mean", "Lo 80", "Lo 95", "Hi 80", "Hi 95")
p
# Listing 15.6 - Automatic exponential forecasting with ets()
library(forecast)
fit <- ets(JohnsonJohnson)
fit
plot(forecast(fit), main="Johnson and Johnson Forecasts",
ylab="Quarterly Earnings (Dollars)", xlab="Time")
# Listing 15.7 - Transforming the time series and assessing stationarity
library(forecast)
library(tseries)
plot(Nile)
ndiffs(Nile)
dNile <- diff(Nile)
plot(dNile)
adf.test(dNile) | /TS/ts3.R | no_license | GopalKrishna-P/analytics | R | false | false | 2,769 | r | # Time Series
#dataset
AirPassengers
class(AirPassengers)
JohnsonJohnson
nhtemp
Nile
sunspots
ds = list(AirPassengers,JohnsonJohnson,nhtemp,Nile,sunspots)
sapply(ds, class)
# Sales TS Data
sales = c(18, 33, 41, 7, 34, 35, 24, 25, 24, 21, 25, 20,
22, 31, 40, 29, 25, 21, 22, 54, 31, 25, 26, 35)
tsales = ts(sales, start=c(2003, 1), frequency=12)
tsales
plot(tsales)
start(tsales)
end(tsales)
frequency(tsales)
(tsales.subset = window(tsales, start=c(2003, 5), end=c(2004, 6)))
tsales.subset
#SMA
Nile
library(forecast)
opar = par(no.readonly = T)
par(mfrow=c(2,2))
(ylim = range(Nile))
plot(Nile, main='Original TS')
head(Nile)
head(ma(Nile,3))
mean(Nile[1:3])
(1120+1160+963)/3
plot(ma(Nile,3), main='SMA k=3', ylim=ylim)
plot(ma(Nile,7), main='SMA k=7', ylim=ylim)
plot(ma(Nile,15),main='SMA k=15', ylim=ylim)
par(opar)
# Listing 15.4 - Simple exponential smoothing
library(forecast)
nhtemp
par(mfrow=c(1,1))
plot(nhtemp)
(fitse = ets(nhtemp, model='ANN'))
(fitse2 = ses(nhtemp))
forecast(fitse,3)
plot(forecast(fitse,c(3)))
accuracy(fitse)
#Holt Exponential Smoothening
TS = level + slope * t + irregular
plot(AirPassengers)
#log model to use additive model
plot(log(AirPassengers))
(fithe = ets(log(AirPassengers), model='AAA'))
(pred = forecast(fithe, 5))
plot(pred, main='Forecast for Air Travel', ylab='Log (Air Passengers)', xlab='Time')
#since log was used, use exp to get predicted values
pred$mean
(pred$mean = exp(pred$mean))
(pred$lower = exp(pred$lower))
(pred$upper = exp(pred$upper))
(p = cbind(pred$mean, pred$lower, pred$upper))
(pred$mean = exp(pred$mean))
#Holt Winters Exponential Smoothening
TS = level + slope * t + s(t) + irregular
fit <- HoltWinters(nhtemp, beta=FALSE, gamma=FALSE)
fit
forecast(fit, 1)
plot(forecast(fit, 1), xlab="Year", ylab=expression(paste("Temperature (", degree*F,")",)), main="New Haven Annual Mean Temperature")
accuracy(fit)
# Listing 15.5 - Exponential smoothing with level, slope, and seasonal components
fit <- HoltWinters(log(AirPassengers))
fit
accuracy(fit)
pred <- forecast(fit, 5)
pred
plot(pred, main="Forecast for Air Travel",
ylab="Log(AirPassengers)", xlab="Time")
pred$mean <- exp(pred$mean)
pred$lower <- exp(pred$lower)
pred$upper <- exp(pred$upper)
p <- cbind(pred$mean, pred$lower, pred$upper)
dimnames(p)[[2]] <- c("mean", "Lo 80", "Lo 95", "Hi 80", "Hi 95")
p
# Listing 15.6 - Automatic exponential forecasting with ets()
library(forecast)
fit <- ets(JohnsonJohnson)
fit
plot(forecast(fit), main="Johnson and Johnson Forecasts",
ylab="Quarterly Earnings (Dollars)", xlab="Time")
# Listing 15.7 - Transforming the time series and assessing stationarity
library(forecast)
library(tseries)
plot(Nile)
ndiffs(Nile)
dNile <- diff(Nile)
plot(dNile)
adf.test(dNile) |
# 6 | Sum square difference
# https://projecteuler.net/problem=6
#
# The sum of the squares of the first ten natural numbers is,
# 1^2 + 2^2 + ... + 10^2 = 385
#
# The square of the sum of the first ten natural numbers is,
# (1 + 2 + ... + 10)^ 2 = 3025
# Hence the difference between the sum of the squares of the first
# ten natural numbers and the square of the sum is 3025 − 385 = 2640.
# Find the difference between the sum of the squares of the first one
# hundred natural numbers and the square of the sum.
sum(1:100)^ 2 - sum((1:100)^ 2)
| /R/p6.R | no_license | rebordao/project-euler | R | false | false | 553 | r | # 6 | Sum square difference
# https://projecteuler.net/problem=6
#
# The sum of the squares of the first ten natural numbers is,
# 1^2 + 2^2 + ... + 10^2 = 385
#
# The square of the sum of the first ten natural numbers is,
# (1 + 2 + ... + 10)^ 2 = 3025
# Hence the difference between the sum of the squares of the first
# ten natural numbers and the square of the sum is 3025 − 385 = 2640.
# Find the difference between the sum of the squares of the first one
# hundred natural numbers and the square of the sum.
sum(1:100)^ 2 - sum((1:100)^ 2)
|
###########################################################################
# summary.pmc.ppc #
# #
# The purpose of the summary.pmc.ppc function is to summarize an object #
# of class pmc.ppc (posterior predictive check). #
###########################################################################
summary.pmc.ppc <- function(object=NULL, Categorical=FALSE, Rows=NULL,
Discrep=NULL, d=0, Quiet=FALSE, ...)
{
if(is.null(object)) stop("The object argument is NULL.")
y <- object$y
yhat <- object$yhat
Deviance <- object$Deviance
if(is.null(Rows)) Rows <- 1:length(y)
if(any(Rows > length(y)) || any(Rows <= 0)) {
warning("Invalid Rows argument; All rows included.")
Rows <- 1:length(y)}
### Create Continuous Summary Table
if(Categorical == FALSE) {
Summ <- matrix(NA, length(y), 8, dimnames=list(1:length(y),
c("y","Mean","SD","LB","Median","UB","PQ","Discrep")))
Summ[,1] <- y
Summ[,2] <- round(rowMeans(yhat),3)
Summ[,3] <- round(apply(yhat, 1, sd),3)
for (i in 1:length(y))
{
Summ[i,4] <- round(quantile(yhat[i,], probs=0.025,
na.rm=TRUE),3)
Summ[i,5] <- round(quantile(yhat[i,], probs=0.500,
na.rm=TRUE),3)
Summ[i,6] <- round(quantile(yhat[i,], probs=0.975,
na.rm=TRUE),3)
Summ[i,7] <- round(mean(yhat[i,] >= y[i], na.rm=TRUE),3)
}
### Discrepancy Statistics
Concordance <- 1 - mean({{Summ[,7] < 0.025} | {Summ[,7] > 0.975}},
na.rm=TRUE)
if(identical(yhat,y)) Concordance <- 1
Discrepancy.Statistic <- 0
if(!is.null(Discrep) && {Discrep == "Chi-Square"}) {
Summ[,8] <- round((y - rowMeans(yhat))^2 /
apply(yhat,1,var),3)
Discrepancy.Statistic <- round(sum(Summ[,8], na.rm=TRUE),3)}
if(!is.null(Discrep) && {Discrep == "Chi-Square2"}) {
chisq.obs <- chisq.rep <- yhat
E.y <- E.yrep <- rowMeans(yhat, na.rm=TRUE)
for (i in 1:nrow(yhat)) {
chisq.obs[i,] <- (y[i] - E.y[i])^2 / E.y[i]
chisq.rep[i,] <- (yhat[i,] - E.yrep[i])^2 / E.yrep[i]
}
Summ[,8] <- round(rowMeans(chisq.rep > chisq.obs,
na.rm=TRUE),3)
Discrepancy.Statistic <- round(mean((Summ[,8] < 0.025) |
(Summ[,8] > 0.975), na.rm=TRUE),3)}
if(!is.null(Discrep) && {Discrep == "DW"}) {
Summ[,8] <- round((rowMeans(y - yhat, na.rm=TRUE) -
c(0, diff(rowMeans(y - yhat, na.rm=TRUE))))^2 /
rowMeans(y - yhat, na.rm=TRUE)^2, 3)
Discrepancy.Statistic <- round(sum(Summ[,8], na.rm=TRUE), 3)}
if(!is.null(Discrep) && {Discrep == "Kurtosis"}) {
kurtosis <- function(x) {
m4 <- mean((x-mean(x, na.rm=TRUE))^4, na.rm=TRUE)
kurt <- m4/(sd(x, na.rm=TRUE)^4)-3
return(kurt)}
for (i in 1:length(y)) {Summ[i,8] <- round(kurtosis(yhat[i,]),3)}
Discrepancy.Statistic <- round(mean(Summ[,8], na.rm=TRUE),3)}
if(!is.null(Discrep) && {Discrep == "L.criterion"}) {
Summ[,8] <- round(sqrt(apply(yhat,1,var) +
(y - rowMeans(yhat))^2),3)
Discrepancy.Statistic <- round(sum(Summ[,8], na.rm=TRUE),3)}
if(!is.null(Discrep) && {Discrep == "MASE"}) {
Summ[,8] <- round(abs(rowMeans(y - yhat, na.rm=TRUE) /
mean(abs(diff(y)), na.rm=TRUE)), 3)
Discrepancy.Statistic <- round(mean(Summ[,8], na.rm=TRUE),3)}
if(!is.null(Discrep) && {Discrep == "MSE"}) {
Summ[,8] <- round(rowMeans((y - yhat)^2, na.rm=TRUE),3)
Discrepancy.Statistic <- round(mean(Summ[,8], na.rm=TRUE),3)}
if(!is.null(Discrep) && {Discrep == "PPL"}) {
Summ[,8] <- round(apply(yhat,1,var) + (d/(d+1)) *
(rowMeans(yhat) - y)^2,3)
Discrepancy.Statistic <- round(sum(Summ[,8], na.rm=TRUE),3)}
if(!is.null(Discrep) && {Discrep == "Quadratic Loss"}) {
Summ[,8] <- round(rowMeans((y - yhat)^2, na.rm=TRUE),3)
Discrepancy.Statistic <- round(mean(Summ[,8], na.rm=TRUE),3)}
if(!is.null(Discrep) && {Discrep == "Quadratic Utility"}) {
Summ[,8] <- round(rowMeans(-1*(y - yhat)^2, na.rm=TRUE),3)
Discrepancy.Statistic <- round(mean(Summ[,8], na.rm=TRUE),3)}
if(!is.null(Discrep) && {Discrep == "RMSE"}) {
Summ[,8] <- round(sqrt(rowMeans((y - yhat)^2, na.rm=TRUE)),3)
Discrepancy.Statistic <- round(mean(Summ[,8], na.rm=TRUE),3)}
if(!is.null(Discrep) && {Discrep == "Skewness"}) {
skewness <- function(x) {
m3 <- mean((x-mean(x, na.rm=TRUE))^3, na.rm=TRUE)
skew <- m3/(sd(x, na.rm=TRUE)^3)
return(skew)}
for (i in 1:length(y)) {Summ[i,8] <- round(skewness(yhat[i,]),3)}
Discrepancy.Statistic <- round(mean(Summ[,8], na.rm=TRUE),3)}
if(!is.null(Discrep) && {Discrep == "max(yhat[i,]) > max(y)"}) {
for (i in 1:length(y)) {Summ[i,8] <- max(yhat[i,]) > max(y)}
Discrepancy.Statistic <- round(mean(Summ[,8], na.rm=TRUE),3)}
if(!is.null(Discrep) && {Discrep == "mean(yhat[i,]) > mean(y)"}) {
for (i in 1:length(y)) {Summ[i,8] <- mean(yhat[i,]) > mean(y)}
Discrepancy.Statistic <- round(mean(Summ[,8], na.rm=TRUE),3)}
if(!is.null(Discrep) && {Discrep == "mean(yhat[i,] > d)"}) {
for (i in 1:length(y)) {Summ[i,8] <- mean(yhat[i,] > d)}
Discrepancy.Statistic <- round(mean(Summ[,8], na.rm=TRUE),3)}
if(!is.null(Discrep) && {Discrep == "mean(yhat[i,] > mean(y))"}) {
for (i in 1:length(y)) {Summ[i,8] <- mean(yhat[i,] > mean(y))}
Discrepancy.Statistic <- round(mean(Summ[,8], na.rm=TRUE),3)}
if(!is.null(Discrep) && {Discrep == "min(yhat[i,]) < min(y)"}) {
for (i in 1:length(y)) {Summ[i,8] <- min(yhat[i,]) < min(y)}
Discrepancy.Statistic <- round(mean(Summ[,8], na.rm=TRUE),3)}
if(!is.null(Discrep) && {Discrep == "round(yhat[i,]) = d"}) {
for (i in 1:length(y)) {
Summ[i,8] <- round(mean(round(yhat[i,]) == d,
na.rm=TRUE), 3)}
Discrepancy.Statistic <- round(mean(Summ[,8], na.rm=TRUE),3)}
if(!is.null(Discrep) && {Discrep == "sd(yhat[i,]) > sd(y)"}) {
for (i in 1:length(y)) {Summ[i,8] <- sd(yhat[i,]) > sd(y)}
Discrepancy.Statistic <- round(mean(Summ[,8], na.rm=TRUE),3)}
Dbar <- round(mean(Deviance, na.rm=TRUE),3)
pD <- round(var(Deviance, na.rm=TRUE)/2, 3)
BPIC <- Dbar + 2*pD
bpic <- matrix(c(Dbar, pD, BPIC), 1, 3)
colnames(bpic) <- c("Dbar","pD","BPIC"); rownames(bpic) <- ""
L <- round(sqrt(apply(yhat,1,var) + (y - rowMeans(yhat))^2), 3)
S.L <- round(sd(L, na.rm=TRUE),3); L <- round(sum(L, na.rm=TRUE),3)
### Create Output
Summ.out <- list(BPIC=bpic,
Concordance=Concordance,
Discrepancy.Statistic=round(Discrepancy.Statistic,5),
L.criterion=L,
S.L=S.L,
Summary=Summ[Rows,])
if(Quiet == FALSE) {
cat("Bayesian Predictive Information Criterion:\n")
print(bpic)
cat("Concordance: ", Concordance, "\n")
cat("Discrepancy Statistic: ",
round(Discrepancy.Statistic,5), "\n")
cat("L-criterion: ", L, ", S.L: ", S.L, sep="", "\n")
cat("Records: \n")
print(Summ[Rows,])}
}
### Create Categorical Summary Table
else {
catcounts <- table(y)
sumnames <- rep(NA, length(catcounts)+3)
sumnames[1] <- "y"
for (i in 1:length(catcounts)) {
sumnames[i+1] <- paste("p(yhat=",names(catcounts)[i],")",sep="")}
sumnames[length(sumnames)-1] <- "Lift"
sumnames[length(sumnames)] <- "Discrep"
Summ <- matrix(NA, length(y), length(sumnames),
dimnames=list(1:length(y), sumnames))
Summ[,1] <- y
for (i in 1:length(catcounts)) {
Summ[,i+1] <- rowSums(yhat == as.numeric(names(catcounts)[i])) /
ncol(yhat)}
Summ[,{ncol(Summ)-1}] <- 1
for (i in 1:length(y)) {
Summ[i,{ncol(Summ)-1}] <- Summ[i,
grep(Summ[i,1],names(catcounts))+1] /
{as.vector(catcounts[grep(Summ[i,1],names(catcounts))]) /
sum(catcounts)} - 1}
### Discrepancy Statistics
Mean.Lift <- round(mean(Summ[,{ncol(Summ)-1}]),3)
Discrepancy.Statistic <- 0
if(!is.null(Discrep) && {Discrep == "p(yhat[i,] != y[i])"}) {
for (i in 1:length(y)) {Summ[i,ncol(Summ)] <- 1 -
Summ[i, grep(Summ[i,1],names(catcounts))+1]}
Discrepancy.Statistic <- round(mean(Summ[,ncol(Summ)],
na.rm=TRUE),3)}
Dbar <- round(mean(Deviance, na.rm=TRUE),3)
pD <- round(var(Deviance, na.rm=TRUE)/2, 3)
BPIC <- Dbar + 2*pD
bpic <- matrix(c(Dbar, pD, BPIC), 1, 3)
colnames(bpic) <- c("Dbar","pD","BPIC"); rownames(bpic) <- ""
### Create Output
Summ.out <- list(BPIC=bpic,
Mean.Lift=Mean.Lift,
Discrepancy.Statistic=round(Discrepancy.Statistic,5),
Summary=Summ[Rows,])
if(Quiet == FALSE) {
cat("Bayesian Predictive Information Criterion:\n")
print(bpic)
cat("Mean Lift: ", Mean.Lift, "\n")
cat("Discrepancy Statistic: ",
round(Discrepancy.Statistic,5), "\n")
cat("Records: \n")
print(Summ[Rows,])}
}
return(invisible(Summ.out))
}
#End
| /R/summary.pmc.ppc.R | permissive | benmarwick/LaplacesDemon | R | false | false | 10,771 | r | ###########################################################################
# summary.pmc.ppc #
# #
# The purpose of the summary.pmc.ppc function is to summarize an object #
# of class pmc.ppc (posterior predictive check). #
###########################################################################
summary.pmc.ppc <- function(object=NULL, Categorical=FALSE, Rows=NULL,
Discrep=NULL, d=0, Quiet=FALSE, ...)
{
if(is.null(object)) stop("The object argument is NULL.")
y <- object$y
yhat <- object$yhat
Deviance <- object$Deviance
if(is.null(Rows)) Rows <- 1:length(y)
if(any(Rows > length(y)) || any(Rows <= 0)) {
warning("Invalid Rows argument; All rows included.")
Rows <- 1:length(y)}
### Create Continuous Summary Table
if(Categorical == FALSE) {
Summ <- matrix(NA, length(y), 8, dimnames=list(1:length(y),
c("y","Mean","SD","LB","Median","UB","PQ","Discrep")))
Summ[,1] <- y
Summ[,2] <- round(rowMeans(yhat),3)
Summ[,3] <- round(apply(yhat, 1, sd),3)
for (i in 1:length(y))
{
Summ[i,4] <- round(quantile(yhat[i,], probs=0.025,
na.rm=TRUE),3)
Summ[i,5] <- round(quantile(yhat[i,], probs=0.500,
na.rm=TRUE),3)
Summ[i,6] <- round(quantile(yhat[i,], probs=0.975,
na.rm=TRUE),3)
Summ[i,7] <- round(mean(yhat[i,] >= y[i], na.rm=TRUE),3)
}
### Discrepancy Statistics
Concordance <- 1 - mean({{Summ[,7] < 0.025} | {Summ[,7] > 0.975}},
na.rm=TRUE)
if(identical(yhat,y)) Concordance <- 1
Discrepancy.Statistic <- 0
if(!is.null(Discrep) && {Discrep == "Chi-Square"}) {
Summ[,8] <- round((y - rowMeans(yhat))^2 /
apply(yhat,1,var),3)
Discrepancy.Statistic <- round(sum(Summ[,8], na.rm=TRUE),3)}
if(!is.null(Discrep) && {Discrep == "Chi-Square2"}) {
chisq.obs <- chisq.rep <- yhat
E.y <- E.yrep <- rowMeans(yhat, na.rm=TRUE)
for (i in 1:nrow(yhat)) {
chisq.obs[i,] <- (y[i] - E.y[i])^2 / E.y[i]
chisq.rep[i,] <- (yhat[i,] - E.yrep[i])^2 / E.yrep[i]
}
Summ[,8] <- round(rowMeans(chisq.rep > chisq.obs,
na.rm=TRUE),3)
Discrepancy.Statistic <- round(mean((Summ[,8] < 0.025) |
(Summ[,8] > 0.975), na.rm=TRUE),3)}
if(!is.null(Discrep) && {Discrep == "DW"}) {
Summ[,8] <- round((rowMeans(y - yhat, na.rm=TRUE) -
c(0, diff(rowMeans(y - yhat, na.rm=TRUE))))^2 /
rowMeans(y - yhat, na.rm=TRUE)^2, 3)
Discrepancy.Statistic <- round(sum(Summ[,8], na.rm=TRUE), 3)}
if(!is.null(Discrep) && {Discrep == "Kurtosis"}) {
kurtosis <- function(x) {
m4 <- mean((x-mean(x, na.rm=TRUE))^4, na.rm=TRUE)
kurt <- m4/(sd(x, na.rm=TRUE)^4)-3
return(kurt)}
for (i in 1:length(y)) {Summ[i,8] <- round(kurtosis(yhat[i,]),3)}
Discrepancy.Statistic <- round(mean(Summ[,8], na.rm=TRUE),3)}
if(!is.null(Discrep) && {Discrep == "L.criterion"}) {
Summ[,8] <- round(sqrt(apply(yhat,1,var) +
(y - rowMeans(yhat))^2),3)
Discrepancy.Statistic <- round(sum(Summ[,8], na.rm=TRUE),3)}
if(!is.null(Discrep) && {Discrep == "MASE"}) {
Summ[,8] <- round(abs(rowMeans(y - yhat, na.rm=TRUE) /
mean(abs(diff(y)), na.rm=TRUE)), 3)
Discrepancy.Statistic <- round(mean(Summ[,8], na.rm=TRUE),3)}
if(!is.null(Discrep) && {Discrep == "MSE"}) {
Summ[,8] <- round(rowMeans((y - yhat)^2, na.rm=TRUE),3)
Discrepancy.Statistic <- round(mean(Summ[,8], na.rm=TRUE),3)}
if(!is.null(Discrep) && {Discrep == "PPL"}) {
Summ[,8] <- round(apply(yhat,1,var) + (d/(d+1)) *
(rowMeans(yhat) - y)^2,3)
Discrepancy.Statistic <- round(sum(Summ[,8], na.rm=TRUE),3)}
if(!is.null(Discrep) && {Discrep == "Quadratic Loss"}) {
Summ[,8] <- round(rowMeans((y - yhat)^2, na.rm=TRUE),3)
Discrepancy.Statistic <- round(mean(Summ[,8], na.rm=TRUE),3)}
if(!is.null(Discrep) && {Discrep == "Quadratic Utility"}) {
Summ[,8] <- round(rowMeans(-1*(y - yhat)^2, na.rm=TRUE),3)
Discrepancy.Statistic <- round(mean(Summ[,8], na.rm=TRUE),3)}
if(!is.null(Discrep) && {Discrep == "RMSE"}) {
Summ[,8] <- round(sqrt(rowMeans((y - yhat)^2, na.rm=TRUE)),3)
Discrepancy.Statistic <- round(mean(Summ[,8], na.rm=TRUE),3)}
if(!is.null(Discrep) && {Discrep == "Skewness"}) {
skewness <- function(x) {
m3 <- mean((x-mean(x, na.rm=TRUE))^3, na.rm=TRUE)
skew <- m3/(sd(x, na.rm=TRUE)^3)
return(skew)}
for (i in 1:length(y)) {Summ[i,8] <- round(skewness(yhat[i,]),3)}
Discrepancy.Statistic <- round(mean(Summ[,8], na.rm=TRUE),3)}
if(!is.null(Discrep) && {Discrep == "max(yhat[i,]) > max(y)"}) {
for (i in 1:length(y)) {Summ[i,8] <- max(yhat[i,]) > max(y)}
Discrepancy.Statistic <- round(mean(Summ[,8], na.rm=TRUE),3)}
if(!is.null(Discrep) && {Discrep == "mean(yhat[i,]) > mean(y)"}) {
for (i in 1:length(y)) {Summ[i,8] <- mean(yhat[i,]) > mean(y)}
Discrepancy.Statistic <- round(mean(Summ[,8], na.rm=TRUE),3)}
if(!is.null(Discrep) && {Discrep == "mean(yhat[i,] > d)"}) {
for (i in 1:length(y)) {Summ[i,8] <- mean(yhat[i,] > d)}
Discrepancy.Statistic <- round(mean(Summ[,8], na.rm=TRUE),3)}
if(!is.null(Discrep) && {Discrep == "mean(yhat[i,] > mean(y))"}) {
for (i in 1:length(y)) {Summ[i,8] <- mean(yhat[i,] > mean(y))}
Discrepancy.Statistic <- round(mean(Summ[,8], na.rm=TRUE),3)}
if(!is.null(Discrep) && {Discrep == "min(yhat[i,]) < min(y)"}) {
for (i in 1:length(y)) {Summ[i,8] <- min(yhat[i,]) < min(y)}
Discrepancy.Statistic <- round(mean(Summ[,8], na.rm=TRUE),3)}
if(!is.null(Discrep) && {Discrep == "round(yhat[i,]) = d"}) {
for (i in 1:length(y)) {
Summ[i,8] <- round(mean(round(yhat[i,]) == d,
na.rm=TRUE), 3)}
Discrepancy.Statistic <- round(mean(Summ[,8], na.rm=TRUE),3)}
if(!is.null(Discrep) && {Discrep == "sd(yhat[i,]) > sd(y)"}) {
for (i in 1:length(y)) {Summ[i,8] <- sd(yhat[i,]) > sd(y)}
Discrepancy.Statistic <- round(mean(Summ[,8], na.rm=TRUE),3)}
Dbar <- round(mean(Deviance, na.rm=TRUE),3)
pD <- round(var(Deviance, na.rm=TRUE)/2, 3)
BPIC <- Dbar + 2*pD
bpic <- matrix(c(Dbar, pD, BPIC), 1, 3)
colnames(bpic) <- c("Dbar","pD","BPIC"); rownames(bpic) <- ""
L <- round(sqrt(apply(yhat,1,var) + (y - rowMeans(yhat))^2), 3)
S.L <- round(sd(L, na.rm=TRUE),3); L <- round(sum(L, na.rm=TRUE),3)
### Create Output
Summ.out <- list(BPIC=bpic,
Concordance=Concordance,
Discrepancy.Statistic=round(Discrepancy.Statistic,5),
L.criterion=L,
S.L=S.L,
Summary=Summ[Rows,])
if(Quiet == FALSE) {
cat("Bayesian Predictive Information Criterion:\n")
print(bpic)
cat("Concordance: ", Concordance, "\n")
cat("Discrepancy Statistic: ",
round(Discrepancy.Statistic,5), "\n")
cat("L-criterion: ", L, ", S.L: ", S.L, sep="", "\n")
cat("Records: \n")
print(Summ[Rows,])}
}
### Create Categorical Summary Table
else {
catcounts <- table(y)
sumnames <- rep(NA, length(catcounts)+3)
sumnames[1] <- "y"
for (i in 1:length(catcounts)) {
sumnames[i+1] <- paste("p(yhat=",names(catcounts)[i],")",sep="")}
sumnames[length(sumnames)-1] <- "Lift"
sumnames[length(sumnames)] <- "Discrep"
Summ <- matrix(NA, length(y), length(sumnames),
dimnames=list(1:length(y), sumnames))
Summ[,1] <- y
for (i in 1:length(catcounts)) {
Summ[,i+1] <- rowSums(yhat == as.numeric(names(catcounts)[i])) /
ncol(yhat)}
Summ[,{ncol(Summ)-1}] <- 1
for (i in 1:length(y)) {
Summ[i,{ncol(Summ)-1}] <- Summ[i,
grep(Summ[i,1],names(catcounts))+1] /
{as.vector(catcounts[grep(Summ[i,1],names(catcounts))]) /
sum(catcounts)} - 1}
### Discrepancy Statistics
Mean.Lift <- round(mean(Summ[,{ncol(Summ)-1}]),3)
Discrepancy.Statistic <- 0
if(!is.null(Discrep) && {Discrep == "p(yhat[i,] != y[i])"}) {
for (i in 1:length(y)) {Summ[i,ncol(Summ)] <- 1 -
Summ[i, grep(Summ[i,1],names(catcounts))+1]}
Discrepancy.Statistic <- round(mean(Summ[,ncol(Summ)],
na.rm=TRUE),3)}
Dbar <- round(mean(Deviance, na.rm=TRUE),3)
pD <- round(var(Deviance, na.rm=TRUE)/2, 3)
BPIC <- Dbar + 2*pD
bpic <- matrix(c(Dbar, pD, BPIC), 1, 3)
colnames(bpic) <- c("Dbar","pD","BPIC"); rownames(bpic) <- ""
### Create Output
Summ.out <- list(BPIC=bpic,
Mean.Lift=Mean.Lift,
Discrepancy.Statistic=round(Discrepancy.Statistic,5),
Summary=Summ[Rows,])
if(Quiet == FALSE) {
cat("Bayesian Predictive Information Criterion:\n")
print(bpic)
cat("Mean Lift: ", Mean.Lift, "\n")
cat("Discrepancy Statistic: ",
round(Discrepancy.Statistic,5), "\n")
cat("Records: \n")
print(Summ[Rows,])}
}
return(invisible(Summ.out))
}
#End
|
# ---- Description --------------------------------------------------------------------------------
# This program builds the loan data for the elasticity study.
# ---- Preliminaries ------------------------------------------------------------------------------
# Load libraries
library(tidyverse)
# Load functions
source("./assist/functions.R")
source("./assist/descr_stats.R")
# Load constants
source("./assist/elast_constants.R")
# ---- Load data ----------------------------------------------------------------------------------
# Load data
elast_df = read_csv(
file = "../data-raw/Screening_Project/Analysis_Sample_2020_09_27.csv",
guess_max = 100000
)
# ---- Subset variables ---------------------------------------------------------------------------
# Subset variables
elast_df = elast_df %>%
select(
loan_id,
# origination
originatorName,
originationDate,
originalLoanAmount,
originalLoanTerm,
originalInterestRatePercentage,
underwritingIndicator,
subvented,
warehouse,
reportingPeriodScheduledPaymentAmount,
# loan
vehicleManufacturerName,
vehicleNewUsedCode,
vehicleModelYear,
vehicleTypeCode,
vehicleValueAmount,
# obligor
obligorCreditScore,
obligorIncomeVerificationLevelCode,
obligorEmploymentVerificationCode,
paymentToIncomePercentage,
coObligorIndicator,
obligorGeographicLocation,
Income,
# delinquency
dpd60_12,
dpd90_12,
dpd60_18,
dpd90_18,
dpd60_24,
dpd90_24,
dpd60_30,
dpd90_30,
dpd60_36,
dpd90_36,
severe_derog60_12,
severe_derog90_12,
severe_derog60_18,
severe_derog90_18,
severe_derog60_24,
severe_derog90_24,
severe_derog60_30,
severe_derog90_30,
severe_derog60_36,
severe_derog90_36,
# filters
interestCalculationTypeCode,
originalInterestRateTypeCode,
paymentTypeCode,
servicingAdvanceMethodCode
)
# Rename variables
elast_df = elast_df %>%
rename(
# origination
originator = originatorName,
orig_date = originationDate,
amount = originalLoanAmount,
months = originalLoanTerm,
rate = originalInterestRatePercentage,
uwrite = underwritingIndicator,
whouse = warehouse,
scheduled_pmt = reportingPeriodScheduledPaymentAmount,
# loan
manufacturer = vehicleManufacturerName,
new_used_code = vehicleNewUsedCode,
model_year = vehicleModelYear,
vehicle_type = vehicleTypeCode,
value = vehicleValueAmount,
# obligor
cscore = obligorCreditScore,
income = Income,
i_verif_code = obligorIncomeVerificationLevelCode,
e_verif_code = obligorEmploymentVerificationCode,
pmi = paymentToIncomePercentage,
cosign = coObligorIndicator,
state = obligorGeographicLocation,
# delinquency
delinq_60_12 = severe_derog60_12,
delinq_90_12 = severe_derog90_12,
delinq_60_18 = severe_derog60_18,
delinq_90_18 = severe_derog90_18,
delinq_60_24 = severe_derog60_24,
delinq_90_24 = severe_derog90_24,
delinq_60_30 = severe_derog60_30,
delinq_90_30 = severe_derog90_30,
delinq_60_36 = severe_derog60_36,
delinq_90_36 = severe_derog90_36
)
# ---- Wrangle data -------------------------------------------------------------------------------
# Date changes
elast_df = elast_df %>%
mutate(
orig_date = yyyymm_dashes(orig_date)
)
# Rate bounds
elast_df = elast_df %>%
mutate(
rate = ifelse(rate > 0.50, rate / 100, rate),
pmi = ifelse(pmi > 0.50, pmi / 100, pmi)
)
# Income bounds
elast_df = elast_df %>%
mutate(
income = cap_vals(income, 2500, 250000)
)
# Other variables
elast_df = elast_df %>%
mutate(
ltv = amount / value,
new_vehicle = as.numeric(new_used_code == 1),
i_verif = as.numeric(i_verif_code %in% c(3, 4, 5)),
e_verif = as.numeric(e_verif_code %in% c(3, 4, 5)),
rate_sub = as.numeric(subvented %in% c("1", "1,2", "2,1", "1,98")),
cash_sub = as.numeric(subvented %in% c("2", "1,2", "2,1", "2,98"))
)
# ---- Clean data ---------------------------------------------------------------------------------
# Baseline filters
elast_df = elast_df %>%
filter(
interestCalculationTypeCode != 98,
!is.na(interestCalculationTypeCode),
originalInterestRateTypeCode != 98,
!is.na(originalInterestRateTypeCode),
paymentTypeCode != 98,
!is.na(paymentTypeCode),
subvented != 98,
!is.na(subvented),
servicingAdvanceMethodCode != 98,
!is.na(servicingAdvanceMethodCode)
) %>%
select(
-interestCalculationTypeCode,
-originalInterestRateTypeCode,
-paymentTypeCode,
-servicingAdvanceMethodCode
)
# Data quality
elast_df = elast_df %>%
filter(
value > 0,
ltv < 2,
amount < 250000,
cscore >= 280,
cscore <= 850,
rate <= 0.30,
pmi <= 0.30,
state %in% us_states()
)
# ---- Append lender names ------------------------------------------------------------------------
# Append names
elast_df = elast_df %>%
inner_join(
y = lender_names(),
by = c("originator" = "ticker")
)
# ---- Descriptive statistics ---------------------------------------------------------------------
# Wide window
descr_df = descr_stats(
df = elast_df,
features = c(
"amount",
"rate",
"scheduled_pmt",
"months",
"ltv",
"value",
"model_year",
"new_vehicle",
"whouse",
"cscore",
"income",
"pmi",
"cosign",
"uwrite",
"rate_sub",
"cash_sub",
"i_verif",
"e_verif"
)
)
# Export results
write_csv(
descr_df,
path = "../output/tables/validation/descr_stats.csv"
) | /validate/descr_stats.R | no_license | davidsovich/auto_abs | R | false | false | 5,634 | r | # ---- Description --------------------------------------------------------------------------------
# This program builds the loan data for the elasticity study.
# ---- Preliminaries ------------------------------------------------------------------------------
# Load libraries
library(tidyverse)
# Load functions
source("./assist/functions.R")
source("./assist/descr_stats.R")
# Load constants
source("./assist/elast_constants.R")
# ---- Load data ----------------------------------------------------------------------------------
# Load data
elast_df = read_csv(
file = "../data-raw/Screening_Project/Analysis_Sample_2020_09_27.csv",
guess_max = 100000
)
# ---- Subset variables ---------------------------------------------------------------------------
# Subset variables
elast_df = elast_df %>%
select(
loan_id,
# origination
originatorName,
originationDate,
originalLoanAmount,
originalLoanTerm,
originalInterestRatePercentage,
underwritingIndicator,
subvented,
warehouse,
reportingPeriodScheduledPaymentAmount,
# loan
vehicleManufacturerName,
vehicleNewUsedCode,
vehicleModelYear,
vehicleTypeCode,
vehicleValueAmount,
# obligor
obligorCreditScore,
obligorIncomeVerificationLevelCode,
obligorEmploymentVerificationCode,
paymentToIncomePercentage,
coObligorIndicator,
obligorGeographicLocation,
Income,
# delinquency
dpd60_12,
dpd90_12,
dpd60_18,
dpd90_18,
dpd60_24,
dpd90_24,
dpd60_30,
dpd90_30,
dpd60_36,
dpd90_36,
severe_derog60_12,
severe_derog90_12,
severe_derog60_18,
severe_derog90_18,
severe_derog60_24,
severe_derog90_24,
severe_derog60_30,
severe_derog90_30,
severe_derog60_36,
severe_derog90_36,
# filters
interestCalculationTypeCode,
originalInterestRateTypeCode,
paymentTypeCode,
servicingAdvanceMethodCode
)
# Rename variables
elast_df = elast_df %>%
rename(
# origination
originator = originatorName,
orig_date = originationDate,
amount = originalLoanAmount,
months = originalLoanTerm,
rate = originalInterestRatePercentage,
uwrite = underwritingIndicator,
whouse = warehouse,
scheduled_pmt = reportingPeriodScheduledPaymentAmount,
# loan
manufacturer = vehicleManufacturerName,
new_used_code = vehicleNewUsedCode,
model_year = vehicleModelYear,
vehicle_type = vehicleTypeCode,
value = vehicleValueAmount,
# obligor
cscore = obligorCreditScore,
income = Income,
i_verif_code = obligorIncomeVerificationLevelCode,
e_verif_code = obligorEmploymentVerificationCode,
pmi = paymentToIncomePercentage,
cosign = coObligorIndicator,
state = obligorGeographicLocation,
# delinquency
delinq_60_12 = severe_derog60_12,
delinq_90_12 = severe_derog90_12,
delinq_60_18 = severe_derog60_18,
delinq_90_18 = severe_derog90_18,
delinq_60_24 = severe_derog60_24,
delinq_90_24 = severe_derog90_24,
delinq_60_30 = severe_derog60_30,
delinq_90_30 = severe_derog90_30,
delinq_60_36 = severe_derog60_36,
delinq_90_36 = severe_derog90_36
)
# ---- Wrangle data -------------------------------------------------------------------------------
# Date changes
elast_df = elast_df %>%
mutate(
orig_date = yyyymm_dashes(orig_date)
)
# Rate bounds
elast_df = elast_df %>%
mutate(
rate = ifelse(rate > 0.50, rate / 100, rate),
pmi = ifelse(pmi > 0.50, pmi / 100, pmi)
)
# Income bounds
elast_df = elast_df %>%
mutate(
income = cap_vals(income, 2500, 250000)
)
# Other variables
elast_df = elast_df %>%
mutate(
ltv = amount / value,
new_vehicle = as.numeric(new_used_code == 1),
i_verif = as.numeric(i_verif_code %in% c(3, 4, 5)),
e_verif = as.numeric(e_verif_code %in% c(3, 4, 5)),
rate_sub = as.numeric(subvented %in% c("1", "1,2", "2,1", "1,98")),
cash_sub = as.numeric(subvented %in% c("2", "1,2", "2,1", "2,98"))
)
# ---- Clean data ---------------------------------------------------------------------------------
# Baseline filters
elast_df = elast_df %>%
filter(
interestCalculationTypeCode != 98,
!is.na(interestCalculationTypeCode),
originalInterestRateTypeCode != 98,
!is.na(originalInterestRateTypeCode),
paymentTypeCode != 98,
!is.na(paymentTypeCode),
subvented != 98,
!is.na(subvented),
servicingAdvanceMethodCode != 98,
!is.na(servicingAdvanceMethodCode)
) %>%
select(
-interestCalculationTypeCode,
-originalInterestRateTypeCode,
-paymentTypeCode,
-servicingAdvanceMethodCode
)
# Data quality
elast_df = elast_df %>%
filter(
value > 0,
ltv < 2,
amount < 250000,
cscore >= 280,
cscore <= 850,
rate <= 0.30,
pmi <= 0.30,
state %in% us_states()
)
# ---- Append lender names ------------------------------------------------------------------------
# Append names
elast_df = elast_df %>%
inner_join(
y = lender_names(),
by = c("originator" = "ticker")
)
# ---- Descriptive statistics ---------------------------------------------------------------------
# Wide window
descr_df = descr_stats(
df = elast_df,
features = c(
"amount",
"rate",
"scheduled_pmt",
"months",
"ltv",
"value",
"model_year",
"new_vehicle",
"whouse",
"cscore",
"income",
"pmi",
"cosign",
"uwrite",
"rate_sub",
"cash_sub",
"i_verif",
"e_verif"
)
)
# Export results
write_csv(
descr_df,
path = "../output/tables/validation/descr_stats.csv"
) |
#' @export
end_round = function(paths,hideouts=NULL){
#' @title Manage list of possible hideouts
#'
#' @description Create or update a list of possible hideouts based on final positions from the list of possible paths traveled.
#'
#' @param paths list of all possible paths already traveled
#' @param hideouts optional vector of possible hideouts from previous rounds. Not used in round 1, only rounds 2 and 3
#'
#' @return list of all possible hideouts
#' @examples
#' possibilities = start_round(64)
#' possibilities = take_a_step(possibilities,roads)
#' possibilities = take_a_step(possibilities,roads,blocked=list(c(63,82),c(63,65)))
#' possibilities = inspect_space(possibilities,space = c(29,30), clue = FALSE)
#' possibilities = inspect_space(possibilities,space = 49, clue = TRUE)
#' hideouts = end_round(possibilities,hideouts=NULL)
#' possibilities = start_round(67)
#' possibilities = take_a_step(possibilities,roads)
#' hideouts = end_round(possibilities,hideouts=hideouts)
possible_hideouts = lapply(paths,function(x){
rev(x)[1]
})
possible_hideouts = unique(unlist(possible_hideouts))
if(is.null(hideouts)) return(sort(possible_hideouts))
hideouts = intersect(hideouts,possible_hideouts)
return(sort(hideouts))
}
| /R/end_round.R | no_license | cran/whitechapelR | R | false | false | 1,311 | r | #' @export
end_round = function(paths,hideouts=NULL){
#' @title Manage list of possible hideouts
#'
#' @description Create or update a list of possible hideouts based on final positions from the list of possible paths traveled.
#'
#' @param paths list of all possible paths already traveled
#' @param hideouts optional vector of possible hideouts from previous rounds. Not used in round 1, only rounds 2 and 3
#'
#' @return list of all possible hideouts
#' @examples
#' possibilities = start_round(64)
#' possibilities = take_a_step(possibilities,roads)
#' possibilities = take_a_step(possibilities,roads,blocked=list(c(63,82),c(63,65)))
#' possibilities = inspect_space(possibilities,space = c(29,30), clue = FALSE)
#' possibilities = inspect_space(possibilities,space = 49, clue = TRUE)
#' hideouts = end_round(possibilities,hideouts=NULL)
#' possibilities = start_round(67)
#' possibilities = take_a_step(possibilities,roads)
#' hideouts = end_round(possibilities,hideouts=hideouts)
possible_hideouts = lapply(paths,function(x){
rev(x)[1]
})
possible_hideouts = unique(unlist(possible_hideouts))
if(is.null(hideouts)) return(sort(possible_hideouts))
hideouts = intersect(hideouts,possible_hideouts)
return(sort(hideouts))
}
|
stan_catlogit_km1 = "
data {
int<lower=1> K; # number of classes
int<lower=1> N; # nrow of x
int<lower=1> D; # ncol of x
int<lower=1, upper=K> y[N]; # target as integer
vector[D] x[N]; # array of D
}
transformed data {
row_vector[D] zeros; # create reference level coefs of zero
zeros = rep_row_vector(0, D);
}
parameters {
matrix[K-1,D] beta_raw; # estimated coefs
}
transformed parameters{
matrix[K, D] beta;
beta = append_row(zeros, beta_raw);
}
model {
# prior
to_vector(beta_raw) ~ normal(0, 10);
# likelihood
for (n in 1:N)
y[n] ~ categorical_logit(beta * x[n]);
}"
# Applies some vectorization, some speed gain; but looks negligible
stan_catlogit_km1_vec = "
data {
int<lower=1> K; # number of classes
int<lower=1> N; # nrow of x
int<lower=1> D; # ncol of x
int<lower=1, upper=K> y[N]; # target as integer
matrix[N,D] x; # model matrix
}
transformed data{
matrix[D,N] xt;
row_vector[D] zeros;
xt = x';
zeros = rep_row_vector(0, D);
}
parameters {
matrix[K-1,D] beta_raw;
}
transformed parameters{
matrix[K, D] beta;
beta = append_row(zeros, beta_raw);
}
model {
matrix[K,N] L; # Linear predictor
L = beta * xt;
to_vector(beta_raw) ~ normal(0, 10);
for (n in 1:N)
y[n] ~ categorical_logit(L[,n]);
}
"
# keeps things in columnar format, no data transpose
stan_catlogit_km1_vec_mlogit = "
data {
int K;
int N;
int D;
int y[N];
matrix[N,D] x;
}
transformed data{
vector[D] zeros;
zeros = rep_vector(0, D);
}
parameters {
matrix[D,K-1] beta_raw;
}
transformed parameters{
matrix[D, K] beta;
beta = append_col(zeros, beta_raw);
}
model {
matrix[N, K] L; # Linear predictor
L = x * beta;
to_vector(beta_raw) ~ normal(0, 10);
for (n in 1:N)
y[n] ~ categorical_logit(to_vector(L[n]));
}
"
# somewhat slower
stan_catlogit_km1_vec_constraint = "
data {
int K;
int N;
int D;
int y[N];
matrix[N,D] x;
}
transformed data{
matrix[D,N] xt;
xt = x';
}
parameters {
matrix[K-1,D] beta_raw;
}
transformed parameters{
matrix[K, D] beta;
for (d in 1:D) {
beta[1,d] = -sum(beta_raw[,d]);
beta[2:K,d] = beta_raw[,d];
}
}
model {
matrix[K,N] L;
L = beta * xt; # Linear predictor
to_vector(beta_raw) ~ normal(0, 10);
for (n in 1:N)
y[n] ~ categorical_logit(L[,n]);
}
"
# this follows the stan manual example, but is slow, adds to code verbosity, and
# doesn't provide correct (though close) estimates. The scale parameters in
# particular are problematic.
# stan_catlogit_km1_vec_simplex = "
# data {
# int K;
# int N;
# int D;
# int y[N];
# matrix[N,D] x;
# }
#
# transformed data{
# matrix[D,N] xt;
# row_vector[D] zeros;
#
# xt = x';
# }
#
# parameters {
# simplex[K] beta_raw1;
# simplex[K] beta_raw2;
# simplex[K] beta_raw3;
# simplex[K] beta_raw4;
#
# vector<lower=0>[4] beta_scale;
# }
#
# transformed parameters{
# matrix[K, D] beta;
#
#
# beta[,1] = beta_scale[1] * (beta_raw1 - 1.0 / K);
# beta[,2] = beta_scale[2] * (beta_raw2 - 1.0 / K);
# beta[,3] = beta_scale[3] * (beta_raw3 - 1.0 / K);
# beta[,4] = beta_scale[4] * (beta_raw4 - 1.0 / K);
#
# }
#
# model {
# matrix[K,N] L;
#
# L = beta * xt;
#
# beta_raw1 ~ normal(0, 10);
# beta_raw2 ~ normal(0, 10);
# beta_raw3 ~ normal(0, 10);
# beta_raw4 ~ normal(0, 10);
#
# # beta_scale ~ student_t(3, 0, 100);
#
# for (n in 1:N)
# y[n] ~ categorical_logit(L[,n]);
# }
# "
# Import data and setup --------------------------------------------------
library(haven); library(tidyverse)
program = read_dta("https://stats.idre.ucla.edu/stat/data/hsbdemo.dta") %>%
as_factor() %>%
mutate(prog = relevel(prog, ref = "academic"))
library(mlogit)
head(program[,1:5])
programLong = program %>%
select(id, prog, ses, write) %>%
mlogit.data(data=, shape='wide', choice='prog', id.var='id')
head(programLong)
mlogit_mod = mlogit(prog ~ 1|ses + write, data=programLong)
mlogit_coefs = coef(mlogit_mod)[c(1,3,5,7,2,4,6,8)]
X = model.matrix(prog ~ ses + write, data = program)
y = program$prog
X = X[order(y),]
y = y[order(y)]
# N = sample size, x is the model matrix, y integer version of class outcome, k=
# number of classes, D is dimension of model matrix
datalist = list(N=nrow(X), x = X, y=as.integer(y), K=n_distinct(y), D=ncol(X))
library(rstan)
# categorical based on K-1
bayes_catlogit_km1 = stan(model_code=stan_catlogit_km1, data=datalist, cores=4)
bayespar_catlogit_km1 = get_posterior_mean(bayes_catlogit_km1, par='beta_raw')[,5]
cbind(mlogit_coefs, bayespar_catlogit_km1)
# categorical based on K-1 + vec
bayes_catlogit_km1_vec = stan(model_code=stan_catlogit_km1_vec, data=datalist, cores=4)
bayespar_catlogit_km1_vec = get_posterior_mean(bayes_catlogit_km1_vec, par='beta_raw')[,5]
cbind(mlogit_coefs, bayespar_catlogit_km1_vec)
bayes_catlogit_km1_vec_mlogit = stan(model_code=stan_catlogit_km1_vec_mlogit, data=datalist, cores=4)
bayespar_catlogit_km1_vec_mlogit = get_posterior_mean(bayes_catlogit_km1_vec_mlogit, par='beta_raw')[,5]
cbind(coef(mlogit_mod), bayespar_catlogit_km1_vec_mlogit)
# categorical based on K-1 + vec constraint
bayes_catlogit_km1_vec_constraint = stan(model_code=stan_catlogit_km1_vec_constraint, data=datalist, cores=4)
bayespar_catlogit_km1_vec_constraint = get_posterior_mean(bayes_catlogit_km1_vec_constraint, par='beta')[,5]
bayespar_catlogit_km1_vec_constraint = c(bayespar_catlogit_km1_vec_constraint[5:12]-bayespar_catlogit_km1_vec_constraint[1:4])
cbind(mlogit_coefs, bayespar_catlogit_km1_vec_constraint)
| /ModelFitting/Bayesian/multinomial/multinomial_stan_comparisons.R | no_license | Pakillo/Miscellaneous-R-Code | R | false | false | 5,818 | r | stan_catlogit_km1 = "
data {
int<lower=1> K; # number of classes
int<lower=1> N; # nrow of x
int<lower=1> D; # ncol of x
int<lower=1, upper=K> y[N]; # target as integer
vector[D] x[N]; # array of D
}
transformed data {
row_vector[D] zeros; # create reference level coefs of zero
zeros = rep_row_vector(0, D);
}
parameters {
matrix[K-1,D] beta_raw; # estimated coefs
}
transformed parameters{
matrix[K, D] beta;
beta = append_row(zeros, beta_raw);
}
model {
# prior
to_vector(beta_raw) ~ normal(0, 10);
# likelihood
for (n in 1:N)
y[n] ~ categorical_logit(beta * x[n]);
}"
# Applies some vectorization, some speed gain; but looks negligible
stan_catlogit_km1_vec = "
data {
int<lower=1> K; # number of classes
int<lower=1> N; # nrow of x
int<lower=1> D; # ncol of x
int<lower=1, upper=K> y[N]; # target as integer
matrix[N,D] x; # model matrix
}
transformed data{
matrix[D,N] xt;
row_vector[D] zeros;
xt = x';
zeros = rep_row_vector(0, D);
}
parameters {
matrix[K-1,D] beta_raw;
}
transformed parameters{
matrix[K, D] beta;
beta = append_row(zeros, beta_raw);
}
model {
matrix[K,N] L; # Linear predictor
L = beta * xt;
to_vector(beta_raw) ~ normal(0, 10);
for (n in 1:N)
y[n] ~ categorical_logit(L[,n]);
}
"
# keeps things in columnar format, no data transpose
stan_catlogit_km1_vec_mlogit = "
data {
int K;
int N;
int D;
int y[N];
matrix[N,D] x;
}
transformed data{
vector[D] zeros;
zeros = rep_vector(0, D);
}
parameters {
matrix[D,K-1] beta_raw;
}
transformed parameters{
matrix[D, K] beta;
beta = append_col(zeros, beta_raw);
}
model {
matrix[N, K] L; # Linear predictor
L = x * beta;
to_vector(beta_raw) ~ normal(0, 10);
for (n in 1:N)
y[n] ~ categorical_logit(to_vector(L[n]));
}
"
# somewhat slower
stan_catlogit_km1_vec_constraint = "
data {
int K;
int N;
int D;
int y[N];
matrix[N,D] x;
}
transformed data{
matrix[D,N] xt;
xt = x';
}
parameters {
matrix[K-1,D] beta_raw;
}
transformed parameters{
matrix[K, D] beta;
for (d in 1:D) {
beta[1,d] = -sum(beta_raw[,d]);
beta[2:K,d] = beta_raw[,d];
}
}
model {
matrix[K,N] L;
L = beta * xt; # Linear predictor
to_vector(beta_raw) ~ normal(0, 10);
for (n in 1:N)
y[n] ~ categorical_logit(L[,n]);
}
"
# this follows the stan manual example, but is slow, adds to code verbosity, and
# doesn't provide correct (though close) estimates. The scale parameters in
# particular are problematic.
# stan_catlogit_km1_vec_simplex = "
# data {
# int K;
# int N;
# int D;
# int y[N];
# matrix[N,D] x;
# }
#
# transformed data{
# matrix[D,N] xt;
# row_vector[D] zeros;
#
# xt = x';
# }
#
# parameters {
# simplex[K] beta_raw1;
# simplex[K] beta_raw2;
# simplex[K] beta_raw3;
# simplex[K] beta_raw4;
#
# vector<lower=0>[4] beta_scale;
# }
#
# transformed parameters{
# matrix[K, D] beta;
#
#
# beta[,1] = beta_scale[1] * (beta_raw1 - 1.0 / K);
# beta[,2] = beta_scale[2] * (beta_raw2 - 1.0 / K);
# beta[,3] = beta_scale[3] * (beta_raw3 - 1.0 / K);
# beta[,4] = beta_scale[4] * (beta_raw4 - 1.0 / K);
#
# }
#
# model {
# matrix[K,N] L;
#
# L = beta * xt;
#
# beta_raw1 ~ normal(0, 10);
# beta_raw2 ~ normal(0, 10);
# beta_raw3 ~ normal(0, 10);
# beta_raw4 ~ normal(0, 10);
#
# # beta_scale ~ student_t(3, 0, 100);
#
# for (n in 1:N)
# y[n] ~ categorical_logit(L[,n]);
# }
# "
# Import data and setup --------------------------------------------------
library(haven); library(tidyverse)
program = read_dta("https://stats.idre.ucla.edu/stat/data/hsbdemo.dta") %>%
as_factor() %>%
mutate(prog = relevel(prog, ref = "academic"))
library(mlogit)
head(program[,1:5])
programLong = program %>%
select(id, prog, ses, write) %>%
mlogit.data(data=, shape='wide', choice='prog', id.var='id')
head(programLong)
mlogit_mod = mlogit(prog ~ 1|ses + write, data=programLong)
mlogit_coefs = coef(mlogit_mod)[c(1,3,5,7,2,4,6,8)]
X = model.matrix(prog ~ ses + write, data = program)
y = program$prog
X = X[order(y),]
y = y[order(y)]
# N = sample size, x is the model matrix, y integer version of class outcome, k=
# number of classes, D is dimension of model matrix
datalist = list(N=nrow(X), x = X, y=as.integer(y), K=n_distinct(y), D=ncol(X))
library(rstan)
# categorical based on K-1
bayes_catlogit_km1 = stan(model_code=stan_catlogit_km1, data=datalist, cores=4)
bayespar_catlogit_km1 = get_posterior_mean(bayes_catlogit_km1, par='beta_raw')[,5]
cbind(mlogit_coefs, bayespar_catlogit_km1)
# categorical based on K-1 + vec
bayes_catlogit_km1_vec = stan(model_code=stan_catlogit_km1_vec, data=datalist, cores=4)
bayespar_catlogit_km1_vec = get_posterior_mean(bayes_catlogit_km1_vec, par='beta_raw')[,5]
cbind(mlogit_coefs, bayespar_catlogit_km1_vec)
bayes_catlogit_km1_vec_mlogit = stan(model_code=stan_catlogit_km1_vec_mlogit, data=datalist, cores=4)
bayespar_catlogit_km1_vec_mlogit = get_posterior_mean(bayes_catlogit_km1_vec_mlogit, par='beta_raw')[,5]
cbind(coef(mlogit_mod), bayespar_catlogit_km1_vec_mlogit)
# categorical based on K-1 + vec constraint
bayes_catlogit_km1_vec_constraint = stan(model_code=stan_catlogit_km1_vec_constraint, data=datalist, cores=4)
bayespar_catlogit_km1_vec_constraint = get_posterior_mean(bayes_catlogit_km1_vec_constraint, par='beta')[,5]
bayespar_catlogit_km1_vec_constraint = c(bayespar_catlogit_km1_vec_constraint[5:12]-bayespar_catlogit_km1_vec_constraint[1:4])
cbind(mlogit_coefs, bayespar_catlogit_km1_vec_constraint)
|
\name{subset.nri}
\alias{subset.nri}
\alias{subset,Nri-method}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Subsetting \code{Nri}-objects
}
\description{
Return subsets of \code{Nri}-objects which meet conditions.
}
\usage{
\S4method{subset}{Nri}(x, subset, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
Object of class 'Nri'.
}
\item{subset}{
Logical expression indicating spectra to keep: missing values are taken as false. See details section.
}
\item{...}{
Further arguments passed to \code{\link{agrep}}.
}
}
\details{
Matchable objects are SI data. Use column names to identify the respectrive SI. See \code{\link{SI}} to access SI of a \code{Nri}. IDs of samples may be accessed using "id.nri" as variable name.
}
\value{
Object of class \code{Nri}.
}
\author{
Lukas Lehnert
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{\linkS4class{Nri}}, \code{\link{SI}}
}
\examples{
% \dontrun{
data(spectral_data)
## Calculate all possible combinations for WorldView-2-8
spec_WV <- spectralResampling(spectral_data, "WorldView2-8",
response_function = FALSE)
nri_WV <- nri(spec_WV, recursive = TRUE)
## Return names of SI data
names(SI(nri_WV))
## Devide into both seasons
sp_summer <- subset(nri_WV, season == "summer")
sp_spring <- subset(nri_WV, season == "spring")
## Print both Nri-objects
sp_summer
sp_spring
% }
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{utilities}
| /man/subset_nri.Rd | no_license | keltoskytoi/hsdar | R | false | false | 1,586 | rd | \name{subset.nri}
\alias{subset.nri}
\alias{subset,Nri-method}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Subsetting \code{Nri}-objects
}
\description{
Return subsets of \code{Nri}-objects which meet conditions.
}
\usage{
\S4method{subset}{Nri}(x, subset, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
Object of class 'Nri'.
}
\item{subset}{
Logical expression indicating spectra to keep: missing values are taken as false. See details section.
}
\item{...}{
Further arguments passed to \code{\link{agrep}}.
}
}
\details{
Matchable objects are SI data. Use column names to identify the respectrive SI. See \code{\link{SI}} to access SI of a \code{Nri}. IDs of samples may be accessed using "id.nri" as variable name.
}
\value{
Object of class \code{Nri}.
}
\author{
Lukas Lehnert
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{\linkS4class{Nri}}, \code{\link{SI}}
}
\examples{
% \dontrun{
data(spectral_data)
## Calculate all possible combinations for WorldView-2-8
spec_WV <- spectralResampling(spectral_data, "WorldView2-8",
response_function = FALSE)
nri_WV <- nri(spec_WV, recursive = TRUE)
## Return names of SI data
names(SI(nri_WV))
## Devide into both seasons
sp_summer <- subset(nri_WV, season == "summer")
sp_spring <- subset(nri_WV, season == "spring")
## Print both Nri-objects
sp_summer
sp_spring
% }
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{utilities}
|
library(BacArena)
### Name: selPheno
### Title: Function for selecting phenotypes which occured on the arena
### from specific iterations and species
### Aliases: selPheno selPheno,Eval-method
### ** Examples
data(Ec_core, envir = environment()) #get Escherichia coli core metabolic model
bac <- Bac(Ec_core,deathrate=0.05,
minweight=0.05,growtype="exponential") #initialize a bacterium
arena <- Arena(n=20,m=20) #initialize the environment
arena <- addOrg(arena,bac,amount=10) #add 10 organisms
arena <- addSubs(arena,40) #add all possible substances
eval <- simEnv(arena,5)
selPheno(eval,time=5,type='ecoli_core_model',reduce=TRUE)
| /data/genthat_extracted_code/BacArena/examples/selPheno.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 654 | r | library(BacArena)
### Name: selPheno
### Title: Function for selecting phenotypes which occured on the arena
### from specific iterations and species
### Aliases: selPheno selPheno,Eval-method
### ** Examples
data(Ec_core, envir = environment()) #get Escherichia coli core metabolic model
bac <- Bac(Ec_core,deathrate=0.05,
minweight=0.05,growtype="exponential") #initialize a bacterium
arena <- Arena(n=20,m=20) #initialize the environment
arena <- addOrg(arena,bac,amount=10) #add 10 organisms
arena <- addSubs(arena,40) #add all possible substances
eval <- simEnv(arena,5)
selPheno(eval,time=5,type='ecoli_core_model',reduce=TRUE)
|
library(caret)
timestamp <- format(Sys.time(), "%Y_%m_%d_%H_%M")
model <- "svmSpectrumString"
#########################################################################
library(kernlab)
data(reuters)
cctrl1 <- trainControl(method = "cv", number = 3, returnResamp = "all")
cctrl2 <- trainControl(method = "LOOCV", savePredictions = TRUE)
cctrl3 <- trainControl(method = "none",
classProbs = TRUE, summaryFunction = twoClassSummary)
cctrlR <- trainControl(method = "cv", number = 3, returnResamp = "all", search = "random")
set.seed(849)
test_class_cv_model <- train(matrix(reuters, ncol = 1), rlabels,
method = "svmSpectrumString",
trControl = cctrl1)
test_class_pred <- predict(test_class_cv_model, matrix(reuters, ncol = 1))
set.seed(849)
test_class_rand <- train(matrix(reuters, ncol = 1), rlabels,
method = "svmSpectrumString",
trControl = cctrlR,
tuneLength = 4)
set.seed(849)
test_class_loo_model <- train(matrix(reuters, ncol = 1), rlabels,
method = "svmSpectrumString",
trControl = cctrl2)
set.seed(849)
test_class_none_model <- train(matrix(reuters, ncol = 1), rlabels,
method = "svmSpectrumString",
trControl = cctrl3,
tuneGrid = test_class_cv_model$bestTune,
metric = "ROC")
test_class_none_pred <- predict(test_class_none_model, matrix(reuters, ncol = 1))
test_class_none_prob <- predict(test_class_none_model, matrix(reuters, ncol = 1), type = "prob")
test_levels <- levels(test_class_cv_model)
if(!all(levels(rlabels) %in% test_levels))
cat("wrong levels")
#########################################################################
tests <- grep("test_", ls(), fixed = TRUE, value = TRUE)
sInfo <- sessionInfo()
save(list = c(tests, "sInfo", "timestamp"),
file = file.path(getwd(), paste(model, ".RData", sep = "")))
q("no")
| /RegressionTests/Code/svmSpectrumString.R | no_license | Ragyi/caret | R | false | false | 2,107 | r | library(caret)
timestamp <- format(Sys.time(), "%Y_%m_%d_%H_%M")
model <- "svmSpectrumString"
#########################################################################
library(kernlab)
data(reuters)
cctrl1 <- trainControl(method = "cv", number = 3, returnResamp = "all")
cctrl2 <- trainControl(method = "LOOCV", savePredictions = TRUE)
cctrl3 <- trainControl(method = "none",
classProbs = TRUE, summaryFunction = twoClassSummary)
cctrlR <- trainControl(method = "cv", number = 3, returnResamp = "all", search = "random")
set.seed(849)
test_class_cv_model <- train(matrix(reuters, ncol = 1), rlabels,
method = "svmSpectrumString",
trControl = cctrl1)
test_class_pred <- predict(test_class_cv_model, matrix(reuters, ncol = 1))
set.seed(849)
test_class_rand <- train(matrix(reuters, ncol = 1), rlabels,
method = "svmSpectrumString",
trControl = cctrlR,
tuneLength = 4)
set.seed(849)
test_class_loo_model <- train(matrix(reuters, ncol = 1), rlabels,
method = "svmSpectrumString",
trControl = cctrl2)
set.seed(849)
test_class_none_model <- train(matrix(reuters, ncol = 1), rlabels,
method = "svmSpectrumString",
trControl = cctrl3,
tuneGrid = test_class_cv_model$bestTune,
metric = "ROC")
test_class_none_pred <- predict(test_class_none_model, matrix(reuters, ncol = 1))
test_class_none_prob <- predict(test_class_none_model, matrix(reuters, ncol = 1), type = "prob")
test_levels <- levels(test_class_cv_model)
if(!all(levels(rlabels) %in% test_levels))
cat("wrong levels")
#########################################################################
tests <- grep("test_", ls(), fixed = TRUE, value = TRUE)
sInfo <- sessionInfo()
save(list = c(tests, "sInfo", "timestamp"),
file = file.path(getwd(), paste(model, ".RData", sep = "")))
q("no")
|
#' @import shiny
#' @import shinyBS
#' @importFrom graphics plot
ggeditGadget <- function(viewer=shiny::paneViewer(minHeight = 1000),...) {
TEMPLIST<-new.env()
TEMPLIST$obj<-get(".p", envir = .ggeditEnv)
verbose<- get(".verbose", envir = .ggeditEnv)
showDefaults<- get(".showDefaults", envir = .ggeditEnv)
plotWidth<- get(".plotWidth", envir = .ggeditEnv)
plotHeight<- get(".plotHeight", envir = .ggeditEnv)
ui <-miniUI::miniPage(
miniUI::gadgetTitleBar("Edit ggplots themes and layer aesthetics"),
miniUI::miniContentPanel(
shiny::fluidPage(
shiny::div(class='row',
shiny::column(width=3,shiny::actionLink("updateElem","Update Plot Layer")),
shiny::column(width=2,shiny::actionLink("updateTheme","Update Plot Theme")),
shiny::column(width=2,shiny::actionLink("SetThemeGrid",'Update Grid Theme')),
shiny::column(width=3,shiny::actionLink("SetThemeGlobal",'Update Global Theme')),
shiny::column(width=2,shiny::actionLink('viewVerbose','View Layer Code'))
),
shiny::hr(),
shiny::conditionalPanel('input.viewVerbose',shiny::uiOutput("SimPrint")),
shiny::column(width=3,shiny::selectInput("activePlot","Choose Plot:",choices = split(1:length(TEMPLIST$obj),factor(names(TEMPLIST$obj),levels=names(TEMPLIST$obj),ordered=T)), selected = 1)),
shiny::column(width=6,shiny::uiOutput('layers')),
shiny::plotOutput(outputId = "Plot",height = "300px"),
shiny::uiOutput('popElems'),
shiny::uiOutput('popTheme')
)
)
)
server = function(input, output, session) {
#Plots----
TEMPLIST$objList.new<- TEMPLIST$obj
TEMPLIST$nonLayers<-vector('list',length(TEMPLIST$objList.new))
TEMPLIST$nonLayersTxt<-vector('list',length(TEMPLIST$objList.new))
baseLayerVerbose=lapply(TEMPLIST$obj,function(x) lapply(x$layers,function(y) cloneLayer(y,verbose = T,showDefaults = showDefaults)))
plotIdx=shiny::reactive({
if(is.null(input$activePlot)){
1
}else{
as.numeric(input$activePlot)
}
})
shiny::observe(TEMPLIST$obj.new<-TEMPLIST$objList.new[[plotIdx()]])
theme.now=ggplot2::theme_get()
TEMPLIST$obj.theme<-lapply(TEMPLIST$objList.new,function(p){
if(length(p$theme)>0) theme.now=theme.now+p$theme
themeFetch(theme.now)
})
#Layers----
output$layers=shiny::renderUI({
TEMPLIST$obj.new<-TEMPLIST$objList.new[[as.numeric(input$activePlot)]]
shiny::radioButtons("geoms","Choose layer(s):",choices = geom_list(TEMPLIST$obj.new),selected = geom_list(TEMPLIST$obj.new)[1],inline = T)
})
update.Layer=shiny::eventReactive(input$sendElem,{
TEMPLIST$obj.new<-TEMPLIST$objList.new[[as.numeric(input$activePlot)]]
layer.idx=which(geom_list(TEMPLIST$obj.new)==input$geoms)
numElem=unlist(lapply(TEMPLIST$obj.Elems[[layer.idx]],function(x) length(x$val[[1]])))
for(item in names(TEMPLIST$obj.Elems[[layer.idx]])){
if(numElem[item]==1) {
newLayer=cloneLayer(TEMPLIST$obj.new$layers[[layer.idx]])
newLayer$aes_params[[item]]=eval(parse(text=paste0('input$pop',toupper(item))))
TEMPLIST$obj.new$layers[[layer.idx]]<-newLayer
}else{
if(TEMPLIST$obj.Elems[[layer.idx]][[item]][['class']][[1]]=='numeric'){
if(input[[paste0('pop',toupper(item),'fixedPal')]]!='Manual'){
palItem=paste0("'",input[[paste0('pop',toupper(item),'fixedPal')]],"'")
palTxt=paste0("scale_",item,"_gradientn(colours=scales::brewer_pal(palette=",palItem,",direction=-1)(9)[1:5])")
TEMPLIST$nonLayersTxt[[as.numeric(input$activePlot)]][[paste0("scale_",item,"_gradientn")]]<-palTxt
suppressMessages({nL=eval(parse(text=palTxt))})
TEMPLIST$nonLayers[[as.numeric(input$activePlot)]][[paste0("scale_",item,"_gradientn")]]<-nL
suppressMessages({eval(parse(text=paste0("TEMPLIST$obj.new<-TEMPLIST$obj.new+",palTxt)))})
}else{
LowCol=paste0("'",input[[paste0('pop',input$pop,toupper(item),'Low')]],"'")
HighCol=paste0("'",input[[paste0('pop',input$pop,toupper(item),'High')]],"'")
ColTxt=paste0("scale_",item,"_gradient(low=",LowCol,",high=",HighCol,")")
TEMPLIST$nonLayersTxt[[as.numeric(input$activePlot)]][[paste0("scale_",item,"_gradient")]]<-ColTxt
suppressMessages({nL=eval(parse(text=ColTxt))})
TEMPLIST$nonLayers[[as.numeric(input$activePlot)]][[paste0("scale_",item,"_gradient")]]<-nL
suppressMessages({eval(parse(text=paste0("TEMPLIST$obj.new<-TEMPLIST$obj.new+",ColTxt)))})
}
}else{
vals=unlist(lapply(names(input)[grepl(paste0('pop',toupper(item),'[1-9]'),names(input))],function(x) input[[x]]))
if(!item%in%c('size','shape','linetype')) vals=paste0("'",vals,"'")
if(item=='linetype') {
vals=match(vals,c('0',scales::linetype_pal()(6)))-1
}
vals=paste0(vals,collapse=',')
TEMPLIST$nonLayersTxt[[as.numeric(input$activePlot)]][[paste0("scale_",item,"_manual")]]<-paste0("scale_",item,"_manual(values=c(",vals,"))")
suppressMessages({nL=eval(parse(text=paste0("scale_",item,"_manual(values=c(",vals,"))")))})
TEMPLIST$nonLayers[[as.numeric(input$activePlot)]][[paste0("scale_",item,"_manual")]]<-nL
suppressMessages(eval(parse(text=paste0("TEMPLIST$obj.new<-TEMPLIST$obj.new+scale_",item,"_manual(values=c(",vals,"))"))))
}
}
}
TEMPLIST$objList.new[[as.numeric(input$activePlot)]]<-TEMPLIST$obj.new
return(TEMPLIST$objList.new)
})
output$popElems=shiny::renderUI({
TEMPLIST$obj.new<-TEMPLIST$objList.new[[as.numeric(input$activePlot)]]
TEMPLIST$obj.Elems<-fetch_aes_ggplotBuild(TEMPLIST$obj.new,geom_list(TEMPLIST$obj.new))
if(is.null(input$geoms)){
gIdx=1
}else{
gIdx=input$geoms
}
obj.elems=TEMPLIST$obj.Elems[[gIdx]]
obj.elems=obj.elems[!names(obj.elems)%in%c('family')]
obj.elemsL=list()
for(item in names(obj.elems)){
item_class=obj.elems[[item]]$class[[1]]
if(item%in%c('colour','color','fill')){
divName='divColor'
if(is.null(obj.elemsL[[divName]])) obj.elemsL[[divName]]=list()
}else{
if(item_class=='data.frame'){
divName='divSlide'
if(is.null(obj.elemsL[[divName]])) obj.elemsL[[divName]]=list()
}
if(item_class%in%c('character','factor')){
divName='divSelect'
if(is.null(obj.elemsL[[divName]])) obj.elemsL[[divName]]=list()
}
}
obj.elemsL[[divName]][[item]]=obj.elems[[item]]
}
shinyBS::bsModal(id = "updateElemPopup", title = "Update Plot Layer", trigger = "updateElem", size = "large",
shiny::fluidRow(
lapply(obj.elemsL,function(objItem){
shiny::column(4,
lapply(names(objItem) ,FUN = function(item){
list(
lapply(arg.value(item,objItem),function(x) {
do.call(what = x[['type']],args = x[['args']])
})
)
})
)
})
),
shiny::div(align="right",shiny::actionButton("sendElem","Update Layer"))
)
})
#Theme----
update.Theme=shiny::eventReactive(input$sendTheme,{
TEMPLIST$obj.new<-TEMPLIST$objList.new[[as.numeric(input$activePlot)]]
strThemeCallList=lapply(names(TEMPLIST$obj.theme[[plotIdx()]]),function(item){
themeNewVal(TEMPLIST$obj.theme[[plotIdx()]][item],TEMPLIST$obj.new,input)
})
strThemeCall=paste0("TEMPLIST$obj.new<-TEMPLIST$obj.new+theme(",paste0(unlist(strThemeCallList),collapse = ","),")")
eval(parse(text=strThemeCall))
TEMPLIST$objList.new[[as.numeric(input$activePlot)]]<-TEMPLIST$obj.new
TEMPLIST$themeUpdate<-lapply(TEMPLIST$objList.new,function(p) p$theme)
return(TEMPLIST$objList.new)
})
shiny::observeEvent(input$SetThemeGlobal,{
if(length(TEMPLIST$obj.new$theme)>0) theme.now=theme.now+TEMPLIST$obj.new$theme
ggplot2::theme_set(ggplot2::theme_get()%+replace%theme.now)
})
update.ThemeGrid=shiny::eventReactive(input$SetThemeGrid,{
TEMPLIST$p.now<-TEMPLIST$objList.new[[as.numeric(input$activePlot)]]
if(length(TEMPLIST$p.now$theme)>0) theme.now=theme.now+TEMPLIST$p.now$theme
for(i in 1:length(TEMPLIST$objList.new)){
TEMPLIST$objList.new[[i]]<- TEMPLIST$objList.new[[i]]+theme.now
TEMPLIST$themeUpdate[[i]]<- TEMPLIST$objList.new[[i]]$theme
}
return(TEMPLIST$objList.new)
})
output$popTheme=shiny::renderUI({
shinyBS::bsModal(id = "updateThemePopup", title = shiny::HTML('Update Plot Theme <a href="http://docs.ggplot2.org/0.9.3.1/theme.html" target="_blank">(help)</a>'), trigger = "updateTheme", size = "large",
do.call(shiny::tabsetPanel,
unlist(lapply(1:length(TEMPLIST$obj.theme[[plotIdx()]]),FUN = function(j){
if(themeListDepth(TEMPLIST$obj.theme[[plotIdx()]][j])>2){
list(themeMakePanel(TEMPLIST$obj.theme[[plotIdx()]][j]))
}else{
unlist(lapply(j, function(i) {themeMakePanel(TEMPLIST$obj.theme[[plotIdx()]][i])}),F)}
}),F)
),
shiny::hr(),
shiny::div(align="right",shiny::actionButton("sendTheme","Set Theme"))
)
})
#Render Plot----
output$Plot=shiny::renderPlot({
plot(as.ggedit(TEMPLIST$objList.new))
},width=plotWidth,height=plotHeight)
shiny::observeEvent(input$updateElem,{
output$Plot=shiny::renderPlot({
if(input$sendElem==0){
plot(as.ggedit(TEMPLIST$objList.new))
}else{
pList.out=update.Layer()
plot(as.ggedit(pList.out))
}
},width=plotWidth,height=plotHeight)
})
shiny::observeEvent(input$updateTheme,{
output$Plot=shiny::renderPlot({
if(input$sendTheme==0){
plot(as.ggedit(TEMPLIST$objList.new))
}else{
pList.out=update.Theme()
plot(as.ggedit(pList.out))
}
},width=plotWidth,height=plotHeight)
})
shiny::observeEvent(input$SetThemeGrid,{
pList.out=update.ThemeGrid()
output$Plot=shiny::renderPlot({plot(as.ggedit(pList.out))},width=plotWidth,height=plotHeight)
})
shiny::observeEvent(input$done, {
UpdatedPlots=as.ggedit(TEMPLIST$objList.new)
class(UpdatedPlots)=c("ggedit",class(UpdatedPlots))
ggeditOut=list(UpdatedPlots=UpdatedPlots,
UpdatedLayers=layersListObj(obj = TEMPLIST$objList.new,lbl=names(TEMPLIST$objList.new)),
UpdatedLayersElements=layersList(TEMPLIST$objList.new)
)
if(verbose) ggeditOut$UpdatedLayerCalls=lapply(TEMPLIST$objList.new,function(p) lapply(p$layer,function(item) cloneLayer(l = item,verbose = T,showDefaults = showDefaults)))
names(TEMPLIST$nonLayers)<-names(TEMPLIST$nonLayersTxt)<-names(TEMPLIST$objList.new)
ggeditOut$updatedScales=TEMPLIST$nonLayers
if(verbose) ggeditOut$UpdatedScalesCalls=TEMPLIST$nonLayersTxt
if(exists('themeUpdate',envir = TEMPLIST)) {
ggeditOut$UpdatedThemes=TEMPLIST$themeUpdate
if(verbose){
ggeditOut$UpdatedThemeCalls=lapply(names(TEMPLIST$objList.new),function(lp,input){
p=TEMPLIST$objList.new[[lp]]
if(length(p$theme)>0){
if(!showDefaults){
themeBase=ggplot2::theme_get()
if(length(TEMPLIST$obj[[lp]]$theme)>0) themeBase=themeBase+TEMPLIST$obj[[lp]]$theme
compare(p$theme,themeBase,verbose=T)
}else{
x.theme=themeFetch(p$theme)
x=lapply(names(x.theme),function(item){themeNewVal(x.theme[item],p,input)})
paste0("theme(",paste0(unlist(x),collapse = ","),")")
}
}else{
c('list()')
}
},input)
names(ggeditOut$UpdatedThemeCalls)=names(TEMPLIST$objList.new)
}
}
class(ggeditOut)=c("ggedit",class(ggeditOut))
#rm(list = ls(envir = .GlobalEnv)[ls(envir = .GlobalEnv)%in%c('obj.new','obj.theme','objList.new','obj.Elems','themeUpdate','nonLayers','nonLayersTxt')],envir = .GlobalEnv)
shiny::stopApp(ggeditOut)
})
shiny::observeEvent(input$cancel,{
#rm(list = ls(envir = .GlobalEnv)[ls(envir = .GlobalEnv)%in%c('obj.new','obj.theme','objList.new','obj.Elems','themeUpdate','nonLayers','nonLayersTxt')],envir = .GlobalEnv)
shiny::stopApp(NULL)
})
simTxt=shiny::reactive({
LayerVerbose<-lapply(TEMPLIST$objList.new,function(p) lapply(p$layer,function(item) cloneLayer(l = item,verbose = T,showDefaults = showDefaults)))
if(is.null(input$activePlot)){
aP=1
}else{
aP=as.numeric(input$activePlot)
}
if(is.null(input$geoms)){
l=1
}else{
l=which(geom_list(TEMPLIST$obj.new)==input$geoms)
}
a=input$updateElem
a1=input$updateElemPopup
if(length(l)==0) l=1
strNew=strBase=''
if(length(LayerVerbose)>0) strNew=LayerVerbose[[aP]][[l]]
if(length(baseLayerVerbose)>0) strBase=baseLayerVerbose[[aP]][[l]]
return(list(Original=strBase,Edited=strNew))
})
output$SimPrint <- shiny::renderUI({
junk=''
if(length(simTxt())>0) junk=textConnection(utils::capture.output(simTxt()))
toace=paste0(readLines(junk),collapse='\n')
if(input$viewVerbose%%2==1){
if (Sys.info()[1] == "Windows"){
output$codeout<-shiny::renderText({toace})
shiny::verbatimTextOutput('codeout')
}else{
shinyAce::aceEditor(outputId = "codeout",value=toace,mode = "r", theme = "chrome", height = "100px", fontSize = 12)
}
}
})
}
shiny::runGadget(ui, server, stopOnCancel = FALSE, viewer = viewer)
}
| /R/ggeditGadget.R | no_license | ktaranov/ggedit | R | false | false | 15,627 | r | #' @import shiny
#' @import shinyBS
#' @importFrom graphics plot
ggeditGadget <- function(viewer=shiny::paneViewer(minHeight = 1000),...) {
TEMPLIST<-new.env()
TEMPLIST$obj<-get(".p", envir = .ggeditEnv)
verbose<- get(".verbose", envir = .ggeditEnv)
showDefaults<- get(".showDefaults", envir = .ggeditEnv)
plotWidth<- get(".plotWidth", envir = .ggeditEnv)
plotHeight<- get(".plotHeight", envir = .ggeditEnv)
ui <-miniUI::miniPage(
miniUI::gadgetTitleBar("Edit ggplots themes and layer aesthetics"),
miniUI::miniContentPanel(
shiny::fluidPage(
shiny::div(class='row',
shiny::column(width=3,shiny::actionLink("updateElem","Update Plot Layer")),
shiny::column(width=2,shiny::actionLink("updateTheme","Update Plot Theme")),
shiny::column(width=2,shiny::actionLink("SetThemeGrid",'Update Grid Theme')),
shiny::column(width=3,shiny::actionLink("SetThemeGlobal",'Update Global Theme')),
shiny::column(width=2,shiny::actionLink('viewVerbose','View Layer Code'))
),
shiny::hr(),
shiny::conditionalPanel('input.viewVerbose',shiny::uiOutput("SimPrint")),
shiny::column(width=3,shiny::selectInput("activePlot","Choose Plot:",choices = split(1:length(TEMPLIST$obj),factor(names(TEMPLIST$obj),levels=names(TEMPLIST$obj),ordered=T)), selected = 1)),
shiny::column(width=6,shiny::uiOutput('layers')),
shiny::plotOutput(outputId = "Plot",height = "300px"),
shiny::uiOutput('popElems'),
shiny::uiOutput('popTheme')
)
)
)
server = function(input, output, session) {
#Plots----
TEMPLIST$objList.new<- TEMPLIST$obj
TEMPLIST$nonLayers<-vector('list',length(TEMPLIST$objList.new))
TEMPLIST$nonLayersTxt<-vector('list',length(TEMPLIST$objList.new))
baseLayerVerbose=lapply(TEMPLIST$obj,function(x) lapply(x$layers,function(y) cloneLayer(y,verbose = T,showDefaults = showDefaults)))
plotIdx=shiny::reactive({
if(is.null(input$activePlot)){
1
}else{
as.numeric(input$activePlot)
}
})
shiny::observe(TEMPLIST$obj.new<-TEMPLIST$objList.new[[plotIdx()]])
theme.now=ggplot2::theme_get()
TEMPLIST$obj.theme<-lapply(TEMPLIST$objList.new,function(p){
if(length(p$theme)>0) theme.now=theme.now+p$theme
themeFetch(theme.now)
})
#Layers----
output$layers=shiny::renderUI({
TEMPLIST$obj.new<-TEMPLIST$objList.new[[as.numeric(input$activePlot)]]
shiny::radioButtons("geoms","Choose layer(s):",choices = geom_list(TEMPLIST$obj.new),selected = geom_list(TEMPLIST$obj.new)[1],inline = T)
})
update.Layer=shiny::eventReactive(input$sendElem,{
TEMPLIST$obj.new<-TEMPLIST$objList.new[[as.numeric(input$activePlot)]]
layer.idx=which(geom_list(TEMPLIST$obj.new)==input$geoms)
numElem=unlist(lapply(TEMPLIST$obj.Elems[[layer.idx]],function(x) length(x$val[[1]])))
for(item in names(TEMPLIST$obj.Elems[[layer.idx]])){
if(numElem[item]==1) {
newLayer=cloneLayer(TEMPLIST$obj.new$layers[[layer.idx]])
newLayer$aes_params[[item]]=eval(parse(text=paste0('input$pop',toupper(item))))
TEMPLIST$obj.new$layers[[layer.idx]]<-newLayer
}else{
if(TEMPLIST$obj.Elems[[layer.idx]][[item]][['class']][[1]]=='numeric'){
if(input[[paste0('pop',toupper(item),'fixedPal')]]!='Manual'){
palItem=paste0("'",input[[paste0('pop',toupper(item),'fixedPal')]],"'")
palTxt=paste0("scale_",item,"_gradientn(colours=scales::brewer_pal(palette=",palItem,",direction=-1)(9)[1:5])")
TEMPLIST$nonLayersTxt[[as.numeric(input$activePlot)]][[paste0("scale_",item,"_gradientn")]]<-palTxt
suppressMessages({nL=eval(parse(text=palTxt))})
TEMPLIST$nonLayers[[as.numeric(input$activePlot)]][[paste0("scale_",item,"_gradientn")]]<-nL
suppressMessages({eval(parse(text=paste0("TEMPLIST$obj.new<-TEMPLIST$obj.new+",palTxt)))})
}else{
LowCol=paste0("'",input[[paste0('pop',input$pop,toupper(item),'Low')]],"'")
HighCol=paste0("'",input[[paste0('pop',input$pop,toupper(item),'High')]],"'")
ColTxt=paste0("scale_",item,"_gradient(low=",LowCol,",high=",HighCol,")")
TEMPLIST$nonLayersTxt[[as.numeric(input$activePlot)]][[paste0("scale_",item,"_gradient")]]<-ColTxt
suppressMessages({nL=eval(parse(text=ColTxt))})
TEMPLIST$nonLayers[[as.numeric(input$activePlot)]][[paste0("scale_",item,"_gradient")]]<-nL
suppressMessages({eval(parse(text=paste0("TEMPLIST$obj.new<-TEMPLIST$obj.new+",ColTxt)))})
}
}else{
vals=unlist(lapply(names(input)[grepl(paste0('pop',toupper(item),'[1-9]'),names(input))],function(x) input[[x]]))
if(!item%in%c('size','shape','linetype')) vals=paste0("'",vals,"'")
if(item=='linetype') {
vals=match(vals,c('0',scales::linetype_pal()(6)))-1
}
vals=paste0(vals,collapse=',')
TEMPLIST$nonLayersTxt[[as.numeric(input$activePlot)]][[paste0("scale_",item,"_manual")]]<-paste0("scale_",item,"_manual(values=c(",vals,"))")
suppressMessages({nL=eval(parse(text=paste0("scale_",item,"_manual(values=c(",vals,"))")))})
TEMPLIST$nonLayers[[as.numeric(input$activePlot)]][[paste0("scale_",item,"_manual")]]<-nL
suppressMessages(eval(parse(text=paste0("TEMPLIST$obj.new<-TEMPLIST$obj.new+scale_",item,"_manual(values=c(",vals,"))"))))
}
}
}
TEMPLIST$objList.new[[as.numeric(input$activePlot)]]<-TEMPLIST$obj.new
return(TEMPLIST$objList.new)
})
output$popElems=shiny::renderUI({
TEMPLIST$obj.new<-TEMPLIST$objList.new[[as.numeric(input$activePlot)]]
TEMPLIST$obj.Elems<-fetch_aes_ggplotBuild(TEMPLIST$obj.new,geom_list(TEMPLIST$obj.new))
if(is.null(input$geoms)){
gIdx=1
}else{
gIdx=input$geoms
}
obj.elems=TEMPLIST$obj.Elems[[gIdx]]
obj.elems=obj.elems[!names(obj.elems)%in%c('family')]
obj.elemsL=list()
for(item in names(obj.elems)){
item_class=obj.elems[[item]]$class[[1]]
if(item%in%c('colour','color','fill')){
divName='divColor'
if(is.null(obj.elemsL[[divName]])) obj.elemsL[[divName]]=list()
}else{
if(item_class=='data.frame'){
divName='divSlide'
if(is.null(obj.elemsL[[divName]])) obj.elemsL[[divName]]=list()
}
if(item_class%in%c('character','factor')){
divName='divSelect'
if(is.null(obj.elemsL[[divName]])) obj.elemsL[[divName]]=list()
}
}
obj.elemsL[[divName]][[item]]=obj.elems[[item]]
}
shinyBS::bsModal(id = "updateElemPopup", title = "Update Plot Layer", trigger = "updateElem", size = "large",
shiny::fluidRow(
lapply(obj.elemsL,function(objItem){
shiny::column(4,
lapply(names(objItem) ,FUN = function(item){
list(
lapply(arg.value(item,objItem),function(x) {
do.call(what = x[['type']],args = x[['args']])
})
)
})
)
})
),
shiny::div(align="right",shiny::actionButton("sendElem","Update Layer"))
)
})
#Theme----
update.Theme=shiny::eventReactive(input$sendTheme,{
TEMPLIST$obj.new<-TEMPLIST$objList.new[[as.numeric(input$activePlot)]]
strThemeCallList=lapply(names(TEMPLIST$obj.theme[[plotIdx()]]),function(item){
themeNewVal(TEMPLIST$obj.theme[[plotIdx()]][item],TEMPLIST$obj.new,input)
})
strThemeCall=paste0("TEMPLIST$obj.new<-TEMPLIST$obj.new+theme(",paste0(unlist(strThemeCallList),collapse = ","),")")
eval(parse(text=strThemeCall))
TEMPLIST$objList.new[[as.numeric(input$activePlot)]]<-TEMPLIST$obj.new
TEMPLIST$themeUpdate<-lapply(TEMPLIST$objList.new,function(p) p$theme)
return(TEMPLIST$objList.new)
})
shiny::observeEvent(input$SetThemeGlobal,{
if(length(TEMPLIST$obj.new$theme)>0) theme.now=theme.now+TEMPLIST$obj.new$theme
ggplot2::theme_set(ggplot2::theme_get()%+replace%theme.now)
})
update.ThemeGrid=shiny::eventReactive(input$SetThemeGrid,{
TEMPLIST$p.now<-TEMPLIST$objList.new[[as.numeric(input$activePlot)]]
if(length(TEMPLIST$p.now$theme)>0) theme.now=theme.now+TEMPLIST$p.now$theme
for(i in 1:length(TEMPLIST$objList.new)){
TEMPLIST$objList.new[[i]]<- TEMPLIST$objList.new[[i]]+theme.now
TEMPLIST$themeUpdate[[i]]<- TEMPLIST$objList.new[[i]]$theme
}
return(TEMPLIST$objList.new)
})
output$popTheme=shiny::renderUI({
shinyBS::bsModal(id = "updateThemePopup", title = shiny::HTML('Update Plot Theme <a href="http://docs.ggplot2.org/0.9.3.1/theme.html" target="_blank">(help)</a>'), trigger = "updateTheme", size = "large",
do.call(shiny::tabsetPanel,
unlist(lapply(1:length(TEMPLIST$obj.theme[[plotIdx()]]),FUN = function(j){
if(themeListDepth(TEMPLIST$obj.theme[[plotIdx()]][j])>2){
list(themeMakePanel(TEMPLIST$obj.theme[[plotIdx()]][j]))
}else{
unlist(lapply(j, function(i) {themeMakePanel(TEMPLIST$obj.theme[[plotIdx()]][i])}),F)}
}),F)
),
shiny::hr(),
shiny::div(align="right",shiny::actionButton("sendTheme","Set Theme"))
)
})
#Render Plot----
output$Plot=shiny::renderPlot({
plot(as.ggedit(TEMPLIST$objList.new))
},width=plotWidth,height=plotHeight)
shiny::observeEvent(input$updateElem,{
output$Plot=shiny::renderPlot({
if(input$sendElem==0){
plot(as.ggedit(TEMPLIST$objList.new))
}else{
pList.out=update.Layer()
plot(as.ggedit(pList.out))
}
},width=plotWidth,height=plotHeight)
})
shiny::observeEvent(input$updateTheme,{
output$Plot=shiny::renderPlot({
if(input$sendTheme==0){
plot(as.ggedit(TEMPLIST$objList.new))
}else{
pList.out=update.Theme()
plot(as.ggedit(pList.out))
}
},width=plotWidth,height=plotHeight)
})
shiny::observeEvent(input$SetThemeGrid,{
pList.out=update.ThemeGrid()
output$Plot=shiny::renderPlot({plot(as.ggedit(pList.out))},width=plotWidth,height=plotHeight)
})
shiny::observeEvent(input$done, {
UpdatedPlots=as.ggedit(TEMPLIST$objList.new)
class(UpdatedPlots)=c("ggedit",class(UpdatedPlots))
ggeditOut=list(UpdatedPlots=UpdatedPlots,
UpdatedLayers=layersListObj(obj = TEMPLIST$objList.new,lbl=names(TEMPLIST$objList.new)),
UpdatedLayersElements=layersList(TEMPLIST$objList.new)
)
if(verbose) ggeditOut$UpdatedLayerCalls=lapply(TEMPLIST$objList.new,function(p) lapply(p$layer,function(item) cloneLayer(l = item,verbose = T,showDefaults = showDefaults)))
names(TEMPLIST$nonLayers)<-names(TEMPLIST$nonLayersTxt)<-names(TEMPLIST$objList.new)
ggeditOut$updatedScales=TEMPLIST$nonLayers
if(verbose) ggeditOut$UpdatedScalesCalls=TEMPLIST$nonLayersTxt
if(exists('themeUpdate',envir = TEMPLIST)) {
ggeditOut$UpdatedThemes=TEMPLIST$themeUpdate
if(verbose){
ggeditOut$UpdatedThemeCalls=lapply(names(TEMPLIST$objList.new),function(lp,input){
p=TEMPLIST$objList.new[[lp]]
if(length(p$theme)>0){
if(!showDefaults){
themeBase=ggplot2::theme_get()
if(length(TEMPLIST$obj[[lp]]$theme)>0) themeBase=themeBase+TEMPLIST$obj[[lp]]$theme
compare(p$theme,themeBase,verbose=T)
}else{
x.theme=themeFetch(p$theme)
x=lapply(names(x.theme),function(item){themeNewVal(x.theme[item],p,input)})
paste0("theme(",paste0(unlist(x),collapse = ","),")")
}
}else{
c('list()')
}
},input)
names(ggeditOut$UpdatedThemeCalls)=names(TEMPLIST$objList.new)
}
}
class(ggeditOut)=c("ggedit",class(ggeditOut))
#rm(list = ls(envir = .GlobalEnv)[ls(envir = .GlobalEnv)%in%c('obj.new','obj.theme','objList.new','obj.Elems','themeUpdate','nonLayers','nonLayersTxt')],envir = .GlobalEnv)
shiny::stopApp(ggeditOut)
})
shiny::observeEvent(input$cancel,{
#rm(list = ls(envir = .GlobalEnv)[ls(envir = .GlobalEnv)%in%c('obj.new','obj.theme','objList.new','obj.Elems','themeUpdate','nonLayers','nonLayersTxt')],envir = .GlobalEnv)
shiny::stopApp(NULL)
})
simTxt=shiny::reactive({
LayerVerbose<-lapply(TEMPLIST$objList.new,function(p) lapply(p$layer,function(item) cloneLayer(l = item,verbose = T,showDefaults = showDefaults)))
if(is.null(input$activePlot)){
aP=1
}else{
aP=as.numeric(input$activePlot)
}
if(is.null(input$geoms)){
l=1
}else{
l=which(geom_list(TEMPLIST$obj.new)==input$geoms)
}
a=input$updateElem
a1=input$updateElemPopup
if(length(l)==0) l=1
strNew=strBase=''
if(length(LayerVerbose)>0) strNew=LayerVerbose[[aP]][[l]]
if(length(baseLayerVerbose)>0) strBase=baseLayerVerbose[[aP]][[l]]
return(list(Original=strBase,Edited=strNew))
})
output$SimPrint <- shiny::renderUI({
junk=''
if(length(simTxt())>0) junk=textConnection(utils::capture.output(simTxt()))
toace=paste0(readLines(junk),collapse='\n')
if(input$viewVerbose%%2==1){
if (Sys.info()[1] == "Windows"){
output$codeout<-shiny::renderText({toace})
shiny::verbatimTextOutput('codeout')
}else{
shinyAce::aceEditor(outputId = "codeout",value=toace,mode = "r", theme = "chrome", height = "100px", fontSize = 12)
}
}
})
}
shiny::runGadget(ui, server, stopOnCancel = FALSE, viewer = viewer)
}
|
# Session 2 Assignment 2
# 1. Read multiple JSON files into a directory to convert into a dataset
# I have files text1, text2, text3 in the directory JSON.
library(jsonlite)
library(dplyr)
ls <- list("D:\\Data Analytics with RET\\Assignment\\JSON\\text1.json",
"D:\\Data Analytics with RET\\Assignment\\JSON\\text2.json",
"D:\\Data Analytics with RET\\Assignment\\JSON\\text3.json")
for (i in ls){
z <- data.frame()
a <- read_json(i, simplifyVector = TRUE)
z <- cbind(z,a)
}
View(a)
# ---------------------------------------------------------------
#2. Parse the following JSON into a data frame.
json <- '[
{"name" : NULL,
"release_date_local" : NULL,
"title" : 3(2011),
"opening_weekend_take" : 1234,
"year" : 2011,
"release_date_wide" : [2011-09-16],
"gross" : 59954
}
]'
mydf <- fromJSON(json, simplifyVector = TRUE, simplifyDataFrame = simplifyVector)
mydf
# 3. Write a script for Variable Binning using R.
bin <- c(1:100)
bin
binning <- function(x)
{
for(i in c(1:100))
{
ifelse(i <= 25, paste(i,"group1"),
ifelse(i <= 50, paste(i,"group2"),
ifelse(i <= 75, paste(i,"group3"),
paste(i,"group4"))))
break
}
}
binning(bin) | /Assignment 2.2.R | no_license | munmun55/Read-multiple-json-files-into-a-working-directory-for-further-converting-into-a-dataset | R | false | false | 1,291 | r | # Session 2 Assignment 2
# 1. Read multiple JSON files into a directory to convert into a dataset
# I have files text1, text2, text3 in the directory JSON.
library(jsonlite)
library(dplyr)
ls <- list("D:\\Data Analytics with RET\\Assignment\\JSON\\text1.json",
"D:\\Data Analytics with RET\\Assignment\\JSON\\text2.json",
"D:\\Data Analytics with RET\\Assignment\\JSON\\text3.json")
for (i in ls){
z <- data.frame()
a <- read_json(i, simplifyVector = TRUE)
z <- cbind(z,a)
}
View(a)
# ---------------------------------------------------------------
#2. Parse the following JSON into a data frame.
json <- '[
{"name" : NULL,
"release_date_local" : NULL,
"title" : 3(2011),
"opening_weekend_take" : 1234,
"year" : 2011,
"release_date_wide" : [2011-09-16],
"gross" : 59954
}
]'
mydf <- fromJSON(json, simplifyVector = TRUE, simplifyDataFrame = simplifyVector)
mydf
# 3. Write a script for Variable Binning using R.
bin <- c(1:100)
bin
binning <- function(x)
{
for(i in c(1:100))
{
ifelse(i <= 25, paste(i,"group1"),
ifelse(i <= 50, paste(i,"group2"),
ifelse(i <= 75, paste(i,"group3"),
paste(i,"group4"))))
break
}
}
binning(bin) |
#' process.mult.nominal.modif.variable
#' @keywords internal
#' @importFrom XML xmlSApply
process.mult.nominal.modif.variable <- function(labels, number.of.rows, parsed.xml,
variable.index, variable.name) {
aux <- list()
aux[[1]] <- rep("$H", number.of.rows)
if (labels) {
categories <- xpathSApply(parsed.xml, paste0(
"/assofile/variables/stvar[",
variable.index, "]/mult_nominal_Modif/nominal-desc/list-nom/label"
), xmlValue)
} else {
categories <- xpathSApply(parsed.xml, paste0(
"/assofile/variables/stvar[",
variable.index, "]/mult_nominal_Modif/nominal-desc/list-nom/name"
), xmlValue)
}
aux[[2]] <- rep(length(categories), number.of.rows)
nodes <- getNodeSet(parsed.xml, paste0(
"/assofile/indiv_mat/ligmat/valmat[", variable.index,
"]"
))
get.distributions <- function(node) {
if (length(node["val_list_modal"]) == 0) {
return(rep(NA, length(categories)))
} else {
moda.nodes <- as.numeric(sapply(
xmlSApply(node, function(x) x["no_moda"]),
xmlValue
))
frequencies <- as.numeric(sapply(
xmlSApply(node, function(x) x["frequency"]),
xmlValue
))
missing.categories.indexes <- setdiff(1:length(categories), moda.nodes)
for (missing.cat.index in missing.categories.indexes) {
frequencies <- append(frequencies, 0, after = missing.cat.index - 1)
}
return(frequencies)
}
}
all.frequencies <- t(round(sapply(nodes, get.distributions), 3))
aux <- data.frame(c(aux, as.data.frame(all.frequencies)))
colnames(aux) <- c("$H", variable.name, categories)
return(aux)
}
| /R/process.mult.nominal.modif.variable.R | no_license | Frenchyy1/RSDA | R | false | false | 1,699 | r | #' process.mult.nominal.modif.variable
#' @keywords internal
#' @importFrom XML xmlSApply
process.mult.nominal.modif.variable <- function(labels, number.of.rows, parsed.xml,
variable.index, variable.name) {
aux <- list()
aux[[1]] <- rep("$H", number.of.rows)
if (labels) {
categories <- xpathSApply(parsed.xml, paste0(
"/assofile/variables/stvar[",
variable.index, "]/mult_nominal_Modif/nominal-desc/list-nom/label"
), xmlValue)
} else {
categories <- xpathSApply(parsed.xml, paste0(
"/assofile/variables/stvar[",
variable.index, "]/mult_nominal_Modif/nominal-desc/list-nom/name"
), xmlValue)
}
aux[[2]] <- rep(length(categories), number.of.rows)
nodes <- getNodeSet(parsed.xml, paste0(
"/assofile/indiv_mat/ligmat/valmat[", variable.index,
"]"
))
get.distributions <- function(node) {
if (length(node["val_list_modal"]) == 0) {
return(rep(NA, length(categories)))
} else {
moda.nodes <- as.numeric(sapply(
xmlSApply(node, function(x) x["no_moda"]),
xmlValue
))
frequencies <- as.numeric(sapply(
xmlSApply(node, function(x) x["frequency"]),
xmlValue
))
missing.categories.indexes <- setdiff(1:length(categories), moda.nodes)
for (missing.cat.index in missing.categories.indexes) {
frequencies <- append(frequencies, 0, after = missing.cat.index - 1)
}
return(frequencies)
}
}
all.frequencies <- t(round(sapply(nodes, get.distributions), 3))
aux <- data.frame(c(aux, as.data.frame(all.frequencies)))
colnames(aux) <- c("$H", variable.name, categories)
return(aux)
}
|
library(glmnet)
mydata = read.table("./TrainingSet/RF/thyroid.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.2,family="gaussian",standardize=TRUE)
sink('./Model/EN/Classifier/thyroid/thyroid_033.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/Classifier/thyroid/thyroid_033.R | no_license | leon1003/QSMART | R | false | false | 352 | r | library(glmnet)
mydata = read.table("./TrainingSet/RF/thyroid.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.2,family="gaussian",standardize=TRUE)
sink('./Model/EN/Classifier/thyroid/thyroid_033.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
compute.image.metric <- function(this_group, sf = 1,
duplicates=FALSE, frame=TRUE,
FUN=ssim, analysis.dir = 'analysis/'){
require(tidyverse)
source(paste(analysis.dir, 'ssim.R', sep=""))
source(paste(analysis.dir, 'image.dot.prod.R', sep=""))
source(paste(analysis.dir, 'load.wp.R', sep=""))
source(paste(analysis.dir, 'sub.sample.wp.R', sep=""))
wp_df <- readr::read_csv('analysis/data/wallpapers-on-databrary.csv')
wp_this_group <- dplyr::filter(wp_df, group == tolower(this_group))
start <- 2
row.index <- 1
if (duplicates) {
if (frame){
out <- array(dim=c(20*20,3))
for (i in 1:20){
i1 <- sub.sample.wp(load.wp(e = i, g = this_group), sf=sf)
i1_name <- wp_this_group$name[i]
for (j in 1:20){
i2 <- sub.sample.wp(load.wp(e = j, g = this_group), sf=sf)
i2_name <- wp_this_group$name[j]
out[row.index, 1] <- i1_name
out[row.index, 2] <- i2_name
#out[row.index, 3] <- FUN(i1, i2)
out[row.index, 3] <- do.call(FUN, list(i1, i2))
row.index <- row.index + 1
}
}
out <- data.frame(out)
names(out) <- c('Exemplar.Row', 'Exemplar.Col', 'Measure.Val')
out$Group <- rep(this_group, 400)
} else {
out <- array(dim=c(20,20))
for (i in 1:20){
i1 <- sub.sample.wp(load.wp(e = i, g = this_group), sf=sf)
for (j in 1:20){
i2 <- sub.sample.wp(load.wp(e = j, g = this_group), sf=sf)
#out[i, j] <- FUN(i1, i2)
out[i, j] <- do.call(FUN, list(i1, i2))
}
}
}
} else {
out <- array(dim=c(190,3))
for (i in 1:19){
i1 <- sub.sample.wp(load.wp(e = i, g = this_group), sf=sf)
i1_name <- wp_this_group$name[i]
for (j in i+1:(20-i)){
i2 <- sub.sample.wp(load.wp(e = j, g = this_group), sf=sf)
i2_name <- wp_this_group$name[j]
out[row.index, 1] <- i1_name
out[row.index, 2] <- i2_name
#out[row.index, 3] <- FUN(i1, i2)
out[row.index, 3] <- do.call(FUN, list(i1, i2))
row.index <- row.index + 1
}
}
out <- data.frame(out)
names(out) <- c('Exemplar.Row', 'Exemplar.Col', 'Measure_Val')
out$Group <- rep(this_group, 190)
#out$measure <- as.name(FUN) # Not sure how to do this yet
}
out
} | /_archival/analysis/compute.image.metric.R | no_license | gilmore-lab/symmetry-sorting | R | false | false | 2,390 | r | compute.image.metric <- function(this_group, sf = 1,
duplicates=FALSE, frame=TRUE,
FUN=ssim, analysis.dir = 'analysis/'){
require(tidyverse)
source(paste(analysis.dir, 'ssim.R', sep=""))
source(paste(analysis.dir, 'image.dot.prod.R', sep=""))
source(paste(analysis.dir, 'load.wp.R', sep=""))
source(paste(analysis.dir, 'sub.sample.wp.R', sep=""))
wp_df <- readr::read_csv('analysis/data/wallpapers-on-databrary.csv')
wp_this_group <- dplyr::filter(wp_df, group == tolower(this_group))
start <- 2
row.index <- 1
if (duplicates) {
if (frame){
out <- array(dim=c(20*20,3))
for (i in 1:20){
i1 <- sub.sample.wp(load.wp(e = i, g = this_group), sf=sf)
i1_name <- wp_this_group$name[i]
for (j in 1:20){
i2 <- sub.sample.wp(load.wp(e = j, g = this_group), sf=sf)
i2_name <- wp_this_group$name[j]
out[row.index, 1] <- i1_name
out[row.index, 2] <- i2_name
#out[row.index, 3] <- FUN(i1, i2)
out[row.index, 3] <- do.call(FUN, list(i1, i2))
row.index <- row.index + 1
}
}
out <- data.frame(out)
names(out) <- c('Exemplar.Row', 'Exemplar.Col', 'Measure.Val')
out$Group <- rep(this_group, 400)
} else {
out <- array(dim=c(20,20))
for (i in 1:20){
i1 <- sub.sample.wp(load.wp(e = i, g = this_group), sf=sf)
for (j in 1:20){
i2 <- sub.sample.wp(load.wp(e = j, g = this_group), sf=sf)
#out[i, j] <- FUN(i1, i2)
out[i, j] <- do.call(FUN, list(i1, i2))
}
}
}
} else {
out <- array(dim=c(190,3))
for (i in 1:19){
i1 <- sub.sample.wp(load.wp(e = i, g = this_group), sf=sf)
i1_name <- wp_this_group$name[i]
for (j in i+1:(20-i)){
i2 <- sub.sample.wp(load.wp(e = j, g = this_group), sf=sf)
i2_name <- wp_this_group$name[j]
out[row.index, 1] <- i1_name
out[row.index, 2] <- i2_name
#out[row.index, 3] <- FUN(i1, i2)
out[row.index, 3] <- do.call(FUN, list(i1, i2))
row.index <- row.index + 1
}
}
out <- data.frame(out)
names(out) <- c('Exemplar.Row', 'Exemplar.Col', 'Measure_Val')
out$Group <- rep(this_group, 190)
#out$measure <- as.name(FUN) # Not sure how to do this yet
}
out
} |
#' Nomis dataset overview
#'
#' Returns an overview of available metadata for a given dataset.
#'
#' @param id The ID of the particular dataset. Returns no data if not specified.
#'
#' @param select A string or character vector of one or more overview parts to
#' select, excluding all others. `select` is not case sensitive. The
#' options for `select` are described below, and are taken from the
#' \href{https://www.nomisweb.co.uk/api/v01/help}{Nomis API help page}.
#'
#' @return A tibble with two columns, one a character vector with the name of
#' the metadata category, and the other a list column of values for each
#' category.
#'
#' @section Overview part options:
#'
#' \describe{
#' \item{DatasetInfo}{General dataset information such as name, description,
#' sub-description, mnemonic, restricted access and status}
#' \item{Coverage}{Shows the geographic coverage of the main geography
#' dimension in this dataset (e.g. United Kingdom, England and Wales etc.)}
#' \item{Keywords}{The keywords allocated to the dataset}
#' \item{Units}{The units of measure supported by the dataset}
#' \item{ContentTypes}{The classifications allocated to this dataset}
#' \item{DateMetadata}{Information about the first release, last update and
#' next update}
#' \item{Contact}{Details for the point of contact for this dataset}
#' \item{Analyses}{Show the available analysis breakdowns of this dataset}
#' \item{Dimensions}{Individual dimension information (e.g. sex, geography,
#' date, etc.)}
#' \item{Dimension-concept}{Allows a specific dimension to be selected (e.g.
#' dimension-geography would allow information about geography dimension). This
#' is not used if "Dimensions" is specified too.}
#' \item{Codes}{Full list of selectable codes, excluding Geography, which as a
#' list of Types instead. (Requires "Dimensions" to be selected too)}
#' \item{Codes-concept}{Full list of selectable codes for a specific dimension,
#' excluding Geography, which as a list of Types instead. This is not used if
#' "Codes" is specified too (Requires "Dimensions" or equivalent to be
#' selected too)}
#' \item{DimensionMetadata}{Any available metadata attached at the dimensional
#' level (Requires "Dimensions" or equivalent to be selected too)}
#' \item{Make}{Information about whether user defined codes can be created with
#' the MAKE parameter when querying data (Requires "Dimensions" or equivalent
#' to be selected too)}
#' \item{DatasetMetadata}{Metadata attached at the dataset level}
#' }
#'
#' @export
#'
#' @seealso [nomis_data_info()]
#' @seealso [nomis_get_metadata()]
#'
#' @examples
#' \donttest{
#' library(dplyr)
#'
#' q <- nomis_overview("NM_1650_1")
#'
#' q %>%
#' tidyr::unnest(name) %>%
#' glimpse()
#'
#' s <- nomis_overview("NM_1650_1", select = c("Units", "Keywords"))
#'
#' s %>%
#' tidyr::unnest(name) %>%
#' glimpse()
#' }
#'
nomis_overview <- function(id, select = NULL) {
if (missing(id)) {
stop("The dataset ID must be specified.", call. = FALSE)
}
select_query <- ifelse(is.null(select), "",
paste0(
"?select=",
paste0(select, collapse = ",")
)
)
query <- paste0(base_url, id, ".overview.json", select_query)
s <- jsonlite::fromJSON(query, flatten = TRUE)
df <- tibble::enframe(s$overview)
df
}
| /R/overview.R | permissive | ropensci/nomisr | R | false | false | 3,280 | r |
#' Nomis dataset overview
#'
#' Returns an overview of available metadata for a given dataset.
#'
#' @param id The ID of the particular dataset. Returns no data if not specified.
#'
#' @param select A string or character vector of one or more overview parts to
#' select, excluding all others. `select` is not case sensitive. The
#' options for `select` are described below, and are taken from the
#' \href{https://www.nomisweb.co.uk/api/v01/help}{Nomis API help page}.
#'
#' @return A tibble with two columns, one a character vector with the name of
#' the metadata category, and the other a list column of values for each
#' category.
#'
#' @section Overview part options:
#'
#' \describe{
#' \item{DatasetInfo}{General dataset information such as name, description,
#' sub-description, mnemonic, restricted access and status}
#' \item{Coverage}{Shows the geographic coverage of the main geography
#' dimension in this dataset (e.g. United Kingdom, England and Wales etc.)}
#' \item{Keywords}{The keywords allocated to the dataset}
#' \item{Units}{The units of measure supported by the dataset}
#' \item{ContentTypes}{The classifications allocated to this dataset}
#' \item{DateMetadata}{Information about the first release, last update and
#' next update}
#' \item{Contact}{Details for the point of contact for this dataset}
#' \item{Analyses}{Show the available analysis breakdowns of this dataset}
#' \item{Dimensions}{Individual dimension information (e.g. sex, geography,
#' date, etc.)}
#' \item{Dimension-concept}{Allows a specific dimension to be selected (e.g.
#' dimension-geography would allow information about geography dimension). This
#' is not used if "Dimensions" is specified too.}
#' \item{Codes}{Full list of selectable codes, excluding Geography, which as a
#' list of Types instead. (Requires "Dimensions" to be selected too)}
#' \item{Codes-concept}{Full list of selectable codes for a specific dimension,
#' excluding Geography, which as a list of Types instead. This is not used if
#' "Codes" is specified too (Requires "Dimensions" or equivalent to be
#' selected too)}
#' \item{DimensionMetadata}{Any available metadata attached at the dimensional
#' level (Requires "Dimensions" or equivalent to be selected too)}
#' \item{Make}{Information about whether user defined codes can be created with
#' the MAKE parameter when querying data (Requires "Dimensions" or equivalent
#' to be selected too)}
#' \item{DatasetMetadata}{Metadata attached at the dataset level}
#' }
#'
#' @export
#'
#' @seealso [nomis_data_info()]
#' @seealso [nomis_get_metadata()]
#'
#' @examples
#' \donttest{
#' library(dplyr)
#'
#' q <- nomis_overview("NM_1650_1")
#'
#' q %>%
#' tidyr::unnest(name) %>%
#' glimpse()
#'
#' s <- nomis_overview("NM_1650_1", select = c("Units", "Keywords"))
#'
#' s %>%
#' tidyr::unnest(name) %>%
#' glimpse()
#' }
#'
nomis_overview <- function(id, select = NULL) {
if (missing(id)) {
stop("The dataset ID must be specified.", call. = FALSE)
}
select_query <- ifelse(is.null(select), "",
paste0(
"?select=",
paste0(select, collapse = ",")
)
)
query <- paste0(base_url, id, ".overview.json", select_query)
s <- jsonlite::fromJSON(query, flatten = TRUE)
df <- tibble::enframe(s$overview)
df
}
|
#This project creates custom naive bayes functions and includes an exmaple of how to use them
#Built on R version 4.0.3 (2020-10-10)
#Platform: x86_64-w64-mingw32/x64 (64-bit)
#The packages the functions requires
library(tm) #Used for text cleaning
library(tidytext) #Used for text cleaning
library(caret) #Used for data balancing
library(e1071) #Used for naive bayes model
library(matrixStats) #Used to extract key values from prediction matrix
#Package version:
# matrixStats_0.57.0 e1071_1.7-4 caret_6.0-86 ggplot2_3.3.3 lattice_0.20-41 tidytext_0.3.0 tm_0.7-8 NLP_0.2-1
text_clean <- function(text_input, # should be a columm from a dataframe
dataframe #should be the dataframe you want the clean text to be attached to
){
#These lines clean the text input
tidy_text <- text_input
tidy_text <- removePunctuation(tidy_text)
tidy_text <- tolower(tidy_text)
tidy_text <- removeNumbers(tidy_text) #This line is optional
tidy_text <- removeWords(tidy_text, stop_words$word)
#These lines create a new dataframe, attaches the clean text, and exports
tidy_text_df <- as.data.frame(dataframe)
tidy_text_df$tidy_text <- tidy_text
assign("tidytext_df", tidy_text_df, envir = globalenv())
}
balanced_data <- function(x, #Dataframe
y, #Labels to be balanced
train_percent = .75, #What percent of the data do you wish to train on? 75% by default
upsample= T) #If true upsamples the data, if false downsamples. True by default.
{
set.seed(42)
df <- as.data.frame(x)
df$label <- as.factor(y)
rows <- sample(nrow(df))
df <- df[rows, ]
rownames(df) <- NULL
observations <- as.numeric(nrow(df))
observations <- ceiling(observations)
train_upper_limit <- train_percent * observations
train_upper_limit <- ceiling(train_upper_limit)
train_data <- df[1:train_upper_limit, ]
test_data <- df[train_upper_limit:observations, ]
if(upsample==T){
new.df <- as.data.frame(upSample(train_data, train_data$label))
number <- nrow(new.df)
new.df <- new.df[,-ncol(new.df)]
colnames(test_data) <- colnames(new.df)
new.df <- rbind(new.df, test_data)
assign("balanced_df", new.df, envir = globalenv())
print("The row to stop training at is:")
print(number)
}
else{
new.df <- as.data.frame(downSample(train_data, train_data$label))
number <- nrow(new.df)
new.df <- new.df[,-ncol(new.df)]
colnames(test_data) <- colnames(new.df)
new.df <- rbind(new.df, test_data)
assign("balanced_df", new.df, envir = globalenv())
print("The row to stop training at is:")
print(number)
}
}
naive_bayes <- function(x, #independent variable; should be a vector of text
y, #dependent variable, should be a vector of labels
train_rows, #What row num should the model train up to?
plot = TRUE, #Prints a confusion matrix by default
print_stats = TRUE) #Do you want the model to print confusion matrix results?
{
#Convert text data to a corpus and then a document term matrix so that the model can be trained off it
function_corpus <- VCorpus(VectorSource(x))
function_dtm <- DocumentTermMatrix(function_corpus,
control = list(tolower = TRUE,removeNumbers = TRUE,
stopwords = TRUE,
removePunctuatio = TRUE,
stemming = TRUE))
x_dataframe <- as.data.frame(x)#Converts column to df
if (is.factor(y) != TRUE) {
y <- as.factor(y) #Converts labels to a factor so that classification can happen
}
y_dataframe <- as.data.frame(y) #Converts column of labels to df
#Divides the data into train and test
observations <- as.numeric(nrow(x_dataframe))
function_dtm_train <- function_dtm[1:train_rows,]
function_dtm_test <- function_dtm[train_rows:observations,]
function_train_labels <- y_dataframe[1:train_rows,]
function_test_labels <- y_dataframe[train_rows:observations,]
#Counting freq of words and training the naive bayes model
function_freq_words <- findFreqTerms(function_dtm_train,5)
function_dtm_freq_train <- function_dtm_train[,function_freq_words]
function_dtm_freq_test <- function_dtm_test[,function_freq_words]
convert_counts <- function(x){
x <- ifelse(x>0,"Yes","No")
}
function_train <- apply(function_dtm_freq_train,MARGIN = 2,convert_counts)
function_test <- apply(function_dtm_freq_test,MARGIN = 2,convert_counts)
#Trains the naive Bayes model
function_classifier <- naiveBayes(function_train, as.factor(function_train_labels))
#Testing the classifier on the test data
function_test_pred <- predict(function_classifier, newdata=function_test)
#Exporting the classifier
assign("naive_bayes_classifier", function_classifier, envir=globalenv())
#Creating statistical measures to evaluate model
cfm <- as.data.frame(table(function_test_pred, function_test_labels))
print("Model Accuracy is:")
print(1-mean(function_test_pred != function_test_labels))
cfm_stats <- confusionMatrix(function_test_pred, function_test_labels,positive = "pos")
#If the user selected it this prints the cofusion matrix stats
if(print_stats == TRUE){
assign("confusion_matrix_stats", cfm_stats, envir=globalenv())
}
#if the user wants a plot this code runs
if(plot == TRUE){
#Plots Predicted vs Actual labels from our test data
print(ggplot(data = cfm,
mapping = aes(x = function_test_pred,
y = function_test_labels)) +
geom_tile(aes(fill = Freq)) +
geom_text(aes(label = sprintf("%1.0f", Freq)), vjust = 1) +
scale_fill_gradient(low = "blue",
high = "red",
trans = "log") +ylab("Actual Labels\n") +
scale_x_discrete(guide = guide_axis(n.dodge=3))+
xlab("\nPredicted Labels"))
}
}
predict_input <- function(x) #Should be a vector of text
{
#Converts to a document term matrix
function_corpus <- VCorpus(VectorSource(x))
function_dtm <- DocumentTermMatrix(function_corpus,
control = list(tolower = TRUE,removeNumbers = TRUE,
stopwords = TRUE,
removePunctuatio = TRUE,
stemming = TRUE))
#Creates bag of words matrix
function_freq_words <- findFreqTerms(function_dtm,1) #This three here specifies how many times a word has to occur to be counted
function_dtm_freq <- function_dtm[,function_freq_words]
convert_counts <- function(x){
x <- ifelse(x>0,"Yes","No")
}
function_test <- apply(function_dtm_freq,MARGIN = 2,convert_counts)
assign("predict.input", function_test, envir=globalenv())
}
confidence_interval <- function(x, #Should be a model
y) #Should be the model's input
{
#Function that converts the input into a bag of words matrix
predict_input <- function(x)
{
function_corpus <- VCorpus(VectorSource(x))
function_dtm <- DocumentTermMatrix(function_corpus,
control = list(tolower = TRUE,removeNumbers = TRUE,
stopwords = TRUE,
removePunctuatio = TRUE,
stemming = TRUE))
function_freq_words <- findFreqTerms(function_dtm,3)
function_dtm_freq <- function_dtm[,function_freq_words]
convert_counts <- function(x){
x <- ifelse(x>0,"Yes","No")
}
function_test <- apply(function_dtm_freq,MARGIN = 2,convert_counts)
assign("predict.input", function_test, envir=globalenv())
}
y <- predict_input(y)
#Runs prediction on on the text and generates probability of every label
df <- as.matrix(predict(x, y, type="raw"))
#Picks the highest value in the predicted dataframe
max_row <- as.data.frame(rowMaxs(df, value = FALSE))
#Change column name
colnames(max_row) <- c("confidence")
#Attach a column to our df with the best prediction from the model
max_row$Prediction <- predict(x,y)
#Find and attach the column name for cell that has the second highest probability
max_row$second_most_probable <- apply(df[,-ncol(df)], 1,
FUN = function(x) which(x == sort(x, decreasing = TRUE)[2]))
max_row$second_most_probable <- colnames(df)[max_row$second_most_probable]
#Export to global enviornment so that it can be viewed
assign("confidence_interval.df", max_row, envir=globalenv())
}
#The following example illustrates how to use these functions
#This dataset examining airline tweet sentiment can be found at:
#https://www.kaggle.com/crowdflower/twitter-airline-sentiment
Airlines_Tweet_Dataset <- read.csv("Tweets.csv")
Airlines_New_Data <- Airlines_Tweet_Dataset[2001:2500,]
Airlines_Tweet_Dataset <- Airlines_Tweet_Dataset[1:2000,]
#Clean our text vector
text_clean(Airlines_Tweet_Dataset$text, Airlines_Tweet_Dataset)
#Feed our cleaned text into our function that will balance the data off of sentiment
balanced_data(tidytext_df, tidytext_df$airline_sentiment)
#Train the model up to the number the balanced data function told us to
naive_bayes(balanced_df$tidy_text, balanced_df$label, 2862)
#you can view the stats of your model by running this line
head(confusion_matrix_stats)
#Now we can take data our model has never seen and predict on it
text_clean(Airlines_New_Data$text, Airlines_New_Data)
predict_input(tidytext_df$tidy_text)
#Feeding our data this new data and collection predictions
prediction <- predict(naive_bayes_classifier, predict.input)
head(prediction)
#Or we can use the confidence interval function to create a dataframe with the predictions and conf. levels
confidence_interval(naive_bayes_classifier, tidytext_df$tidy_text)
head(confidence_interval.df)
#Now we can attach other info to our df like the actual label and run analyses off it
confidence_interval.df$Actual_Label <- tidytext_df$airline_sentiment
confidence_interval.df$is.match <- ifelse(confidence_interval.df$Prediction == confidence_interval.df$Actual_Label, 1 , 0)
head(confidence_interval.df)
#And we can make some graphs to check if our confidence level predicts whether or not a prediction is right
library(ggthemes)
g <- ggplot(confidence_interval.df, aes(confidence, is.match)) +
geom_point(size=1) +
stat_smooth(method="glm", se=FALSE, method.args = list(family=binomial),
color="#A100FF", size=2) + theme_economist_white()
plot(g) | /All Functions and Example.R | no_license | JackOgozaly/Naive-Bayes-Functions | R | false | false | 11,137 | r | #This project creates custom naive bayes functions and includes an exmaple of how to use them
#Built on R version 4.0.3 (2020-10-10)
#Platform: x86_64-w64-mingw32/x64 (64-bit)
#The packages the functions requires
library(tm) #Used for text cleaning
library(tidytext) #Used for text cleaning
library(caret) #Used for data balancing
library(e1071) #Used for naive bayes model
library(matrixStats) #Used to extract key values from prediction matrix
#Package version:
# matrixStats_0.57.0 e1071_1.7-4 caret_6.0-86 ggplot2_3.3.3 lattice_0.20-41 tidytext_0.3.0 tm_0.7-8 NLP_0.2-1
text_clean <- function(text_input, # should be a columm from a dataframe
dataframe #should be the dataframe you want the clean text to be attached to
){
#These lines clean the text input
tidy_text <- text_input
tidy_text <- removePunctuation(tidy_text)
tidy_text <- tolower(tidy_text)
tidy_text <- removeNumbers(tidy_text) #This line is optional
tidy_text <- removeWords(tidy_text, stop_words$word)
#These lines create a new dataframe, attaches the clean text, and exports
tidy_text_df <- as.data.frame(dataframe)
tidy_text_df$tidy_text <- tidy_text
assign("tidytext_df", tidy_text_df, envir = globalenv())
}
balanced_data <- function(x, #Dataframe
y, #Labels to be balanced
train_percent = .75, #What percent of the data do you wish to train on? 75% by default
upsample= T) #If true upsamples the data, if false downsamples. True by default.
{
set.seed(42)
df <- as.data.frame(x)
df$label <- as.factor(y)
rows <- sample(nrow(df))
df <- df[rows, ]
rownames(df) <- NULL
observations <- as.numeric(nrow(df))
observations <- ceiling(observations)
train_upper_limit <- train_percent * observations
train_upper_limit <- ceiling(train_upper_limit)
train_data <- df[1:train_upper_limit, ]
test_data <- df[train_upper_limit:observations, ]
if(upsample==T){
new.df <- as.data.frame(upSample(train_data, train_data$label))
number <- nrow(new.df)
new.df <- new.df[,-ncol(new.df)]
colnames(test_data) <- colnames(new.df)
new.df <- rbind(new.df, test_data)
assign("balanced_df", new.df, envir = globalenv())
print("The row to stop training at is:")
print(number)
}
else{
new.df <- as.data.frame(downSample(train_data, train_data$label))
number <- nrow(new.df)
new.df <- new.df[,-ncol(new.df)]
colnames(test_data) <- colnames(new.df)
new.df <- rbind(new.df, test_data)
assign("balanced_df", new.df, envir = globalenv())
print("The row to stop training at is:")
print(number)
}
}
naive_bayes <- function(x, #independent variable; should be a vector of text
y, #dependent variable, should be a vector of labels
train_rows, #What row num should the model train up to?
plot = TRUE, #Prints a confusion matrix by default
print_stats = TRUE) #Do you want the model to print confusion matrix results?
{
#Convert text data to a corpus and then a document term matrix so that the model can be trained off it
function_corpus <- VCorpus(VectorSource(x))
function_dtm <- DocumentTermMatrix(function_corpus,
control = list(tolower = TRUE,removeNumbers = TRUE,
stopwords = TRUE,
removePunctuatio = TRUE,
stemming = TRUE))
x_dataframe <- as.data.frame(x)#Converts column to df
if (is.factor(y) != TRUE) {
y <- as.factor(y) #Converts labels to a factor so that classification can happen
}
y_dataframe <- as.data.frame(y) #Converts column of labels to df
#Divides the data into train and test
observations <- as.numeric(nrow(x_dataframe))
function_dtm_train <- function_dtm[1:train_rows,]
function_dtm_test <- function_dtm[train_rows:observations,]
function_train_labels <- y_dataframe[1:train_rows,]
function_test_labels <- y_dataframe[train_rows:observations,]
#Counting freq of words and training the naive bayes model
function_freq_words <- findFreqTerms(function_dtm_train,5)
function_dtm_freq_train <- function_dtm_train[,function_freq_words]
function_dtm_freq_test <- function_dtm_test[,function_freq_words]
convert_counts <- function(x){
x <- ifelse(x>0,"Yes","No")
}
function_train <- apply(function_dtm_freq_train,MARGIN = 2,convert_counts)
function_test <- apply(function_dtm_freq_test,MARGIN = 2,convert_counts)
#Trains the naive Bayes model
function_classifier <- naiveBayes(function_train, as.factor(function_train_labels))
#Testing the classifier on the test data
function_test_pred <- predict(function_classifier, newdata=function_test)
#Exporting the classifier
assign("naive_bayes_classifier", function_classifier, envir=globalenv())
#Creating statistical measures to evaluate model
cfm <- as.data.frame(table(function_test_pred, function_test_labels))
print("Model Accuracy is:")
print(1-mean(function_test_pred != function_test_labels))
cfm_stats <- confusionMatrix(function_test_pred, function_test_labels,positive = "pos")
#If the user selected it this prints the cofusion matrix stats
if(print_stats == TRUE){
assign("confusion_matrix_stats", cfm_stats, envir=globalenv())
}
#if the user wants a plot this code runs
if(plot == TRUE){
#Plots Predicted vs Actual labels from our test data
print(ggplot(data = cfm,
mapping = aes(x = function_test_pred,
y = function_test_labels)) +
geom_tile(aes(fill = Freq)) +
geom_text(aes(label = sprintf("%1.0f", Freq)), vjust = 1) +
scale_fill_gradient(low = "blue",
high = "red",
trans = "log") +ylab("Actual Labels\n") +
scale_x_discrete(guide = guide_axis(n.dodge=3))+
xlab("\nPredicted Labels"))
}
}
predict_input <- function(x) #Should be a vector of text
{
#Converts to a document term matrix
function_corpus <- VCorpus(VectorSource(x))
function_dtm <- DocumentTermMatrix(function_corpus,
control = list(tolower = TRUE,removeNumbers = TRUE,
stopwords = TRUE,
removePunctuatio = TRUE,
stemming = TRUE))
#Creates bag of words matrix
function_freq_words <- findFreqTerms(function_dtm,1) #This three here specifies how many times a word has to occur to be counted
function_dtm_freq <- function_dtm[,function_freq_words]
convert_counts <- function(x){
x <- ifelse(x>0,"Yes","No")
}
function_test <- apply(function_dtm_freq,MARGIN = 2,convert_counts)
assign("predict.input", function_test, envir=globalenv())
}
confidence_interval <- function(x, #Should be a model
y) #Should be the model's input
{
#Function that converts the input into a bag of words matrix
predict_input <- function(x)
{
function_corpus <- VCorpus(VectorSource(x))
function_dtm <- DocumentTermMatrix(function_corpus,
control = list(tolower = TRUE,removeNumbers = TRUE,
stopwords = TRUE,
removePunctuatio = TRUE,
stemming = TRUE))
function_freq_words <- findFreqTerms(function_dtm,3)
function_dtm_freq <- function_dtm[,function_freq_words]
convert_counts <- function(x){
x <- ifelse(x>0,"Yes","No")
}
function_test <- apply(function_dtm_freq,MARGIN = 2,convert_counts)
assign("predict.input", function_test, envir=globalenv())
}
y <- predict_input(y)
#Runs prediction on on the text and generates probability of every label
df <- as.matrix(predict(x, y, type="raw"))
#Picks the highest value in the predicted dataframe
max_row <- as.data.frame(rowMaxs(df, value = FALSE))
#Change column name
colnames(max_row) <- c("confidence")
#Attach a column to our df with the best prediction from the model
max_row$Prediction <- predict(x,y)
#Find and attach the column name for cell that has the second highest probability
max_row$second_most_probable <- apply(df[,-ncol(df)], 1,
FUN = function(x) which(x == sort(x, decreasing = TRUE)[2]))
max_row$second_most_probable <- colnames(df)[max_row$second_most_probable]
#Export to global enviornment so that it can be viewed
assign("confidence_interval.df", max_row, envir=globalenv())
}
#The following example illustrates how to use these functions
#This dataset examining airline tweet sentiment can be found at:
#https://www.kaggle.com/crowdflower/twitter-airline-sentiment
Airlines_Tweet_Dataset <- read.csv("Tweets.csv")
Airlines_New_Data <- Airlines_Tweet_Dataset[2001:2500,]
Airlines_Tweet_Dataset <- Airlines_Tweet_Dataset[1:2000,]
#Clean our text vector
text_clean(Airlines_Tweet_Dataset$text, Airlines_Tweet_Dataset)
#Feed our cleaned text into our function that will balance the data off of sentiment
balanced_data(tidytext_df, tidytext_df$airline_sentiment)
#Train the model up to the number the balanced data function told us to
naive_bayes(balanced_df$tidy_text, balanced_df$label, 2862)
#you can view the stats of your model by running this line
head(confusion_matrix_stats)
#Now we can take data our model has never seen and predict on it
text_clean(Airlines_New_Data$text, Airlines_New_Data)
predict_input(tidytext_df$tidy_text)
#Feeding our data this new data and collection predictions
prediction <- predict(naive_bayes_classifier, predict.input)
head(prediction)
#Or we can use the confidence interval function to create a dataframe with the predictions and conf. levels
confidence_interval(naive_bayes_classifier, tidytext_df$tidy_text)
head(confidence_interval.df)
#Now we can attach other info to our df like the actual label and run analyses off it
confidence_interval.df$Actual_Label <- tidytext_df$airline_sentiment
confidence_interval.df$is.match <- ifelse(confidence_interval.df$Prediction == confidence_interval.df$Actual_Label, 1 , 0)
head(confidence_interval.df)
#And we can make some graphs to check if our confidence level predicts whether or not a prediction is right
library(ggthemes)
g <- ggplot(confidence_interval.df, aes(confidence, is.match)) +
geom_point(size=1) +
stat_smooth(method="glm", se=FALSE, method.args = list(family=binomial),
color="#A100FF", size=2) + theme_economist_white()
plot(g) |
\name{predict.mixor}
\alias{predict.mixor}
\title{
Predict Outcome From Mixor Fitted Model
}
\description{
\code{predict} method for class \code{mixor}.
}
\usage{
\method{predict}{mixor}(object, newdata = NULL, na.action = na.fail, ...)
}
\arguments{
\item{object}{
an object of class \code{mixor}.
}
\item{newdata}{
an optional data frame to be used for obtaining predictions when the random effects are zero. When \code{newdata} is not supplied, the random effects estimates are used in obtaining model predictions. All variables used in the \code{mixor} model, the fixed and the random effects models, as well as the grouping factors, must be present in the data frame.
}
\item{na.action}{
a function that indicates what should happen when \code{newdata} contains \code{NAS}. The default action (\code{na.fail}) causes the function to print an error message and terminate if there are any incomplete observations.
}
\item{\dots}{
other arguments.
}
}
\details{
This function returns fitted probabilities and the predicted class from a fitted \code{mixor} object. When the \code{newdata} parameter is specified, the random effects are taken to be zero and predictions are returned. Otherwise conditional predictions, which includes both fixed and random effects terms, for the observed data are returned.
}
\value{
\item{predicted}{class specific probabilities from the fitted model.}
\item{class}{predicted class, that having the largest fitted probability, from the fitted model.}
}
\author{
Kellie J. Archer, Donald Hedeker, Rachel Nordgren, Robert D. Gibbons
}
\seealso{
See Also \code{\link{mixor}}, \code{\link{print.mixor}}
}
\examples{
library("mixor")
data("schizophrenia")
### Random intercept
SCHIZO1.fit<-mixor(imps79o ~ TxDrug + SqrtWeek + TxSWeek, data=schizophrenia,
id=id, link="probit")
pihat<-predict(SCHIZO1.fit)
table(pihat$class, schizophrenia$imps79o)
head(pihat$predicted)
}
\keyword{ methods }
| /man/predict.mixor.Rd | no_license | cran/mixor | R | false | false | 1,936 | rd | \name{predict.mixor}
\alias{predict.mixor}
\title{
Predict Outcome From Mixor Fitted Model
}
\description{
\code{predict} method for class \code{mixor}.
}
\usage{
\method{predict}{mixor}(object, newdata = NULL, na.action = na.fail, ...)
}
\arguments{
\item{object}{
an object of class \code{mixor}.
}
\item{newdata}{
an optional data frame to be used for obtaining predictions when the random effects are zero. When \code{newdata} is not supplied, the random effects estimates are used in obtaining model predictions. All variables used in the \code{mixor} model, the fixed and the random effects models, as well as the grouping factors, must be present in the data frame.
}
\item{na.action}{
a function that indicates what should happen when \code{newdata} contains \code{NAS}. The default action (\code{na.fail}) causes the function to print an error message and terminate if there are any incomplete observations.
}
\item{\dots}{
other arguments.
}
}
\details{
This function returns fitted probabilities and the predicted class from a fitted \code{mixor} object. When the \code{newdata} parameter is specified, the random effects are taken to be zero and predictions are returned. Otherwise conditional predictions, which includes both fixed and random effects terms, for the observed data are returned.
}
\value{
\item{predicted}{class specific probabilities from the fitted model.}
\item{class}{predicted class, that having the largest fitted probability, from the fitted model.}
}
\author{
Kellie J. Archer, Donald Hedeker, Rachel Nordgren, Robert D. Gibbons
}
\seealso{
See Also \code{\link{mixor}}, \code{\link{print.mixor}}
}
\examples{
library("mixor")
data("schizophrenia")
### Random intercept
SCHIZO1.fit<-mixor(imps79o ~ TxDrug + SqrtWeek + TxSWeek, data=schizophrenia,
id=id, link="probit")
pihat<-predict(SCHIZO1.fit)
table(pihat$class, schizophrenia$imps79o)
head(pihat$predicted)
}
\keyword{ methods }
|
# Using MER Opportunity data, figure out which dust factor adjustments need to be
# applied for different target error margin bounds.
#
# Results:
#
# With outliers:
#
# -15%/+21% for a dust factor adjustment of 9.5% and shadowing loss of 5%.
# -15%/+21% for a dust factor adjustment of 7.35% and shadowing loss of 6%.
#
# -10%/+25% for a dust factor adjustment of 12.5% and shadowing loss of 5%.
# -10%/+25% for a dust factor adjustment of 10.25% and shadowing loss of 6%.
# -10%/+25% for a dust factor adjustment of 8.25% and shadowing loss of 7%.
#
# -23%/+15% for a dust factor adjustment of 4.9% and shadowing loss of 5%.
# -22%/+16% for a dust factor adjustment of 5.5% and shadowing loss of 5%.
#
# -20%/+18% for a dust factor adjustment of 5% and shadowing loss of 0%.
#
#
# Without outliers that are less than -20% divergence:
#
# -12%/+13% for a dust factor adjustment of 3% and shadowing loss of 5%.
# -11%/+14% for a dust factor adjustment of 4% and shadowing loss of 5%.
# -10%/+15% for a dust factor adjustment of 4.5% and shadowing loss of 5%.
library(whisker)
# Function to get a data frame with all the divergence data between predictions and opportunity measurements.
get_energy_divergences = dget(here("utils", "get_energy_divergences.R"))
result_message_template = 'For a {{DustFactor_adjustment}}% Dust Factor adjustment there are {{out_of_bounds}} out of bound measurements:
> {{upper_bound}}%: {{out_of_upper_bound}}
< {{lower_bound}}%: {{out_of_lower_bound}}
'
# Select measurements with less than -20% divergence as the outliers.
# 1. Sols 2185 (-16%), 2199 (-31%), 2204 (peak at -32%), 2211 (-27%), 2218 (-26%), and 2226 (-16%).
# 2. Sol 2519 (Ls 242, 23-FEB-2011) of Mars Year 29? Divergence of -25%.
# 3. Sol 3901 (Ls 271, 13-JAN-2015) of Mars Year 32? Divergence of -21%.
Sol_outliers = c(2199, 2204, 2211, 2218, 2519, 3901)
target_error_margin_lowest = -0.10
target_error_margin_highest = 0.12
# Try with dust factor adjustment from 0% to 15%
for(dfa in seq(0.04, 0.15, 0.005)){
energy_divergences = get_energy_divergences(
Loss_shadowing=0.036,
DustFactor_adjustment=dfa)
# Let's drop MY33 since it diverges so much compared to other years.
energy_divergences = energy_divergences[energy_divergences$MarsYear != 33,]
# Remove the outliers.
energy_divergences = energy_divergences[!(energy_divergences$Sol %in% Sol_outliers), ]
# Count how many measurements are outside of the target margin of error range.
divs_out_of_upper_bound = energy_divergences[energy_divergences$WhDiffPercentage > target_error_margin_highest*100, ]
divs_out_of_lower_bound = energy_divergences[energy_divergences$WhDiffPercentage < target_error_margin_lowest*100, ]
out_of_bounds = length(divs_out_of_upper_bound$Ls) + length(divs_out_of_lower_bound$Ls)
# Organiye results into an object
results = list(
DustFactor_adjustment=dfa * 100,
loss_shadowing = ls,
upper_bound = target_error_margin_highest*100,
lower_bound = target_error_margin_lowest*100,
out_of_bounds = out_of_bounds,
out_of_upper_bound = length(divs_out_of_upper_bound$Ls),
out_of_lower_bound = length(divs_out_of_lower_bound$Ls)
)
# Print result.
cat(whisker.render(result_message_template, results))
}
| /fixme/dust_factor_adjustments.R | no_license | georgeslabreche/mars-solar-energy-sherpatt-rover | R | false | false | 3,271 | r | # Using MER Opportunity data, figure out which dust factor adjustments need to be
# applied for different target error margin bounds.
#
# Results:
#
# With outliers:
#
# -15%/+21% for a dust factor adjustment of 9.5% and shadowing loss of 5%.
# -15%/+21% for a dust factor adjustment of 7.35% and shadowing loss of 6%.
#
# -10%/+25% for a dust factor adjustment of 12.5% and shadowing loss of 5%.
# -10%/+25% for a dust factor adjustment of 10.25% and shadowing loss of 6%.
# -10%/+25% for a dust factor adjustment of 8.25% and shadowing loss of 7%.
#
# -23%/+15% for a dust factor adjustment of 4.9% and shadowing loss of 5%.
# -22%/+16% for a dust factor adjustment of 5.5% and shadowing loss of 5%.
#
# -20%/+18% for a dust factor adjustment of 5% and shadowing loss of 0%.
#
#
# Without outliers that are less than -20% divergence:
#
# -12%/+13% for a dust factor adjustment of 3% and shadowing loss of 5%.
# -11%/+14% for a dust factor adjustment of 4% and shadowing loss of 5%.
# -10%/+15% for a dust factor adjustment of 4.5% and shadowing loss of 5%.
library(whisker)
# Function to get a data frame with all the divergence data between predictions and opportunity measurements.
get_energy_divergences = dget(here("utils", "get_energy_divergences.R"))
result_message_template = 'For a {{DustFactor_adjustment}}% Dust Factor adjustment there are {{out_of_bounds}} out of bound measurements:
> {{upper_bound}}%: {{out_of_upper_bound}}
< {{lower_bound}}%: {{out_of_lower_bound}}
'
# Select measurements with less than -20% divergence as the outliers.
# 1. Sols 2185 (-16%), 2199 (-31%), 2204 (peak at -32%), 2211 (-27%), 2218 (-26%), and 2226 (-16%).
# 2. Sol 2519 (Ls 242, 23-FEB-2011) of Mars Year 29? Divergence of -25%.
# 3. Sol 3901 (Ls 271, 13-JAN-2015) of Mars Year 32? Divergence of -21%.
Sol_outliers = c(2199, 2204, 2211, 2218, 2519, 3901)
target_error_margin_lowest = -0.10
target_error_margin_highest = 0.12
# Try with dust factor adjustment from 0% to 15%
for(dfa in seq(0.04, 0.15, 0.005)){
energy_divergences = get_energy_divergences(
Loss_shadowing=0.036,
DustFactor_adjustment=dfa)
# Let's drop MY33 since it diverges so much compared to other years.
energy_divergences = energy_divergences[energy_divergences$MarsYear != 33,]
# Remove the outliers.
energy_divergences = energy_divergences[!(energy_divergences$Sol %in% Sol_outliers), ]
# Count how many measurements are outside of the target margin of error range.
divs_out_of_upper_bound = energy_divergences[energy_divergences$WhDiffPercentage > target_error_margin_highest*100, ]
divs_out_of_lower_bound = energy_divergences[energy_divergences$WhDiffPercentage < target_error_margin_lowest*100, ]
out_of_bounds = length(divs_out_of_upper_bound$Ls) + length(divs_out_of_lower_bound$Ls)
# Organiye results into an object
results = list(
DustFactor_adjustment=dfa * 100,
loss_shadowing = ls,
upper_bound = target_error_margin_highest*100,
lower_bound = target_error_margin_lowest*100,
out_of_bounds = out_of_bounds,
out_of_upper_bound = length(divs_out_of_upper_bound$Ls),
out_of_lower_bound = length(divs_out_of_lower_bound$Ls)
)
# Print result.
cat(whisker.render(result_message_template, results))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{tp_cpp}
\alias{tp_cpp}
\title{Transpose a matrix}
\usage{
tp_cpp(X)
}
\arguments{
\item{X}{an R matrix (expression matrix)}
}
\value{
a transposed matrix
}
\description{
Transpose a matrix
}
\examples{
mat_test <-matrix(rnbinom(1000000,mu=0.01, size=10),nrow=100)
tp_mat <- tp_cpp(mat_test)
}
| /man/tp_cpp.Rd | no_license | IMB-Computational-Genomics-Lab/scGPS | R | false | true | 391 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{tp_cpp}
\alias{tp_cpp}
\title{Transpose a matrix}
\usage{
tp_cpp(X)
}
\arguments{
\item{X}{an R matrix (expression matrix)}
}
\value{
a transposed matrix
}
\description{
Transpose a matrix
}
\examples{
mat_test <-matrix(rnbinom(1000000,mu=0.01, size=10),nrow=100)
tp_mat <- tp_cpp(mat_test)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GENIE3.R
\name{GENIE3}
\alias{GENIE3}
\title{GENIE3}
\usage{
GENIE3(exprMatrix, regulators = NULL, targets = NULL,
treeMethod = "RF", K = "sqrt", nTrees = 1000, nCores = 1,
verbose = FALSE)
}
\arguments{
\item{exprMatrix}{Expression matrix (genes x samples). Every row is a gene, every column is a sample.
The expression matrix can also be provided as one of the Bioconductor classes:
\itemize{
\item \code{ExpressionSet}: The matrix will be obtained through exprs(exprMatrix)
\item \code{RangedSummarizedExperiment}: The matrix will be obtained through assay(exprMatrix), wich will extract the first assay (usually the counts)
}}
\item{regulators}{Subset of genes used as candidate regulators. Must be either a vector of indices, e.g. \code{c(1,5,6,7)}, or a vector of gene names, e.g. \code{c("at_12377", "at_10912")}. The default value NULL means that all the genes are used as candidate regulators.}
\item{targets}{Subset of genes to which potential regulators will be calculated. Must be either a vector of indices, e.g. \code{c(1,5,6,7)}, or a vector of gene names, e.g. \code{c("at_12377", "at_10912")}. If NULL (default), regulators will be calculated for all genes in the input matrix.}
\item{treeMethod}{Tree-based method used. Must be either "RF" for Random Forests (default) or "ET" for Extra-Trees.}
\item{K}{Number of candidate regulators randomly selected at each tree node (for the determination of the best split). Must be either "sqrt" for the square root of the total number of candidate regulators (default), "all" for the total number of candidate regulators, or a stricly positive integer.}
\item{nTrees}{Number of trees in an ensemble for each target gene. Default: 1000.}
\item{nCores}{Number of cores to use for parallel computing. Default: 1.}
\item{verbose}{If set to TRUE, a feedback on the progress of the calculations is given. Default: FALSE.}
}
\value{
Weighted adjacency matrix of inferred network. Element w_ij (row i, column j) gives the importance of the link from regulatory gene i to target gene j.
}
\description{
\code{GENIE3} Infers a gene regulatory network (in the form of a weighted adjacency matrix) from expression data, using ensembles of regression trees.
}
\examples{
## Generate fake expression matrix
exprMatrix <- matrix(sample(1:10, 100, replace=TRUE), nrow=20)
rownames(exprMatrix) <- paste("Gene", 1:20, sep="")
colnames(exprMatrix) <- paste("Sample", 1:5, sep="")
## Run GENIE3
set.seed(123) # For reproducibility of results
weightMatrix <- GENIE3(exprMatrix, regulators=paste("Gene", 1:5, sep=""))
## Get ranking of edges
linkList <- getLinkList(weightMatrix)
head(linkList)
}
| /man/GENIE3.Rd | no_license | mschubert/GENIE3 | R | false | true | 2,725 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GENIE3.R
\name{GENIE3}
\alias{GENIE3}
\title{GENIE3}
\usage{
GENIE3(exprMatrix, regulators = NULL, targets = NULL,
treeMethod = "RF", K = "sqrt", nTrees = 1000, nCores = 1,
verbose = FALSE)
}
\arguments{
\item{exprMatrix}{Expression matrix (genes x samples). Every row is a gene, every column is a sample.
The expression matrix can also be provided as one of the Bioconductor classes:
\itemize{
\item \code{ExpressionSet}: The matrix will be obtained through exprs(exprMatrix)
\item \code{RangedSummarizedExperiment}: The matrix will be obtained through assay(exprMatrix), wich will extract the first assay (usually the counts)
}}
\item{regulators}{Subset of genes used as candidate regulators. Must be either a vector of indices, e.g. \code{c(1,5,6,7)}, or a vector of gene names, e.g. \code{c("at_12377", "at_10912")}. The default value NULL means that all the genes are used as candidate regulators.}
\item{targets}{Subset of genes to which potential regulators will be calculated. Must be either a vector of indices, e.g. \code{c(1,5,6,7)}, or a vector of gene names, e.g. \code{c("at_12377", "at_10912")}. If NULL (default), regulators will be calculated for all genes in the input matrix.}
\item{treeMethod}{Tree-based method used. Must be either "RF" for Random Forests (default) or "ET" for Extra-Trees.}
\item{K}{Number of candidate regulators randomly selected at each tree node (for the determination of the best split). Must be either "sqrt" for the square root of the total number of candidate regulators (default), "all" for the total number of candidate regulators, or a stricly positive integer.}
\item{nTrees}{Number of trees in an ensemble for each target gene. Default: 1000.}
\item{nCores}{Number of cores to use for parallel computing. Default: 1.}
\item{verbose}{If set to TRUE, a feedback on the progress of the calculations is given. Default: FALSE.}
}
\value{
Weighted adjacency matrix of inferred network. Element w_ij (row i, column j) gives the importance of the link from regulatory gene i to target gene j.
}
\description{
\code{GENIE3} Infers a gene regulatory network (in the form of a weighted adjacency matrix) from expression data, using ensembles of regression trees.
}
\examples{
## Generate fake expression matrix
exprMatrix <- matrix(sample(1:10, 100, replace=TRUE), nrow=20)
rownames(exprMatrix) <- paste("Gene", 1:20, sep="")
colnames(exprMatrix) <- paste("Sample", 1:5, sep="")
## Run GENIE3
set.seed(123) # For reproducibility of results
weightMatrix <- GENIE3(exprMatrix, regulators=paste("Gene", 1:5, sep=""))
## Get ranking of edges
linkList <- getLinkList(weightMatrix)
head(linkList)
}
|
rm(list = ls())
library(Daniel)
library(dplyr)
library(nnet)
CalcCImultinom <- function(fit)
{
s <- summary(fit)
coef <- s$coefficients
ses <- s$standard.errors
ci.1 <- coef[1,2] + c(-1, 1)*1.96*ses[1, 2]
ci.2 <- coef[2,2] + c(-1, 1)*1.96*ses[2, 2]
return(rbind(ci.1,ci.2))
}
#key
# A, B,C,D,E,F - betaE[2] = 1.25, 1.5, 1.75, 2, 2.25, 2.5
# A,B,C, D, E,F - betaU = 2,3,4,5,6,7
patt <- "BC"
beta0 <- c(-6, -5)
betaE <- c(log(2.5), log(1.5))
betaU <- c(log(4), log(1.5))
sigmaU <- 1
n.sample <- 50000
n.sim <- 1000
AllY <- matrix(nr = n.sim, nc = 3)
sace.diff1 <- sace.diff2 <- ace.diff1 <- ace.diff2 <-
sace.or1 <- sace.or2 <- ace.or1 <- ace.or2 <-
or.approx1 <- or.approx2 <- or.approx.true1 <- or.approx.true2 <-
pop.never.s1 <- pop.never.s2 <- vector(length = n.sim)
ci1 <- ci2 <- matrix(nr = n.sim, nc = 2)
for (j in 1:n.sim)
{
CatIndex(j)
# Simulate genetic score
U <- rnorm(n.sample, 0, sd = sigmaU)
#### Calcualte probabilites for each subtype with and without the exposure ####
e1E0 <- exp(beta0[1] + betaU[1]*U)
e1E1 <- exp(beta0[1] + betaE[1] + betaU[1]*U)
e2E0 <- exp(beta0[2] + betaU[2]*U)
e2E1 <- exp(beta0[2] + betaE[2] + betaU[2]*U)
prE0Y1 <- e1E0/(1 + e1E0 + e2E0)
prE0Y2 <- e2E0/(1 + e1E0 + e2E0)
prE1Y1 <- e1E1/(1 + e1E1 + e2E1)
prE1Y2 <- e2E1/(1 + e1E1 + e2E1)
probsE0 <- cbind(prE0Y1, prE0Y2, 1 - prE0Y1 - prE0Y2)
probsE1 <- cbind(prE1Y1, prE1Y2, 1 - prE1Y1 - prE1Y2)
# Simulate subtypes #
Yctrl <- Ytrt <- vector(length = n.sample)
X <- rbinom(n = n.sample, 1, 0.5)
for (i in 1:n.sample)
{
Yctrl[i] <- sample(c(1,2,0), 1, replace = T, prob = probsE0[i, ])
Ytrt[i] <- sample(c(1,2,0), 1, replace = T, prob = probsE1[i, ])
}
Y <- (1-X)*Yctrl + X*Ytrt
AllY[j, ] <- table(Y)
Y1ctrl <- Yctrl==1
Y1trt <- Ytrt==1
Y2ctrl <- Yctrl==2
Y2trt <- Ytrt==2
pop.never.s1[j] <- mean(Y1ctrl==0 & Y1trt==0)
pop.never.s2[j] <- mean(Y2ctrl==0 & Y2trt==0)
# estimate causal parameters
sace.diff1[j] <- mean((Y1trt - Y1ctrl)[Y2ctrl==0 & Y2trt==0])
sace.diff2[j]<- mean((Y2trt - Y2ctrl)[Y1ctrl==0 & Y1trt==0])
ace.diff1[j] <- mean((Y1trt[Y2trt==0 & X==1]) - mean(Y1ctrl[Y2ctrl==0 & X==0]))
ace.diff2[j] <- mean((Y2trt[Y1trt==0 & X==1]) - mean(Y2ctrl[Y1ctrl==0 & X==0]))
# Ypo <- c(Yctrl, Ytrt)
# Upo <- rep(U,2)
# Xpo <- rep(x = c(0,1), each = n.sample)
# fit.full.po <- multinom(Ypo~ Xpo + Upo)
# fit.po <- multinom(Ypo~ Xpo)
fit <- multinom(Y~ X)
cis <- CalcCImultinom(fit)
ci1[j, ] <- cis[1, ]
ci2[j, ] <- cis[2, ]
Y1only <- Y[Y<2]
X1only <- X[Y<2]
U1only <-U[Y<2]
Y2only <- Y[Y!=1]
X2only <- X[Y!=1]
U2only <-U[Y!=1]
Y2only[Y2only>0] <- 1
vec.for.or.1only <- c(sum((1 - Y1only) * (1 - X1only)) , sum(Y1only * (1 - X1only)),
sum((1 - Y1only) * X1only), sum(Y1only*X1only))
vec.for.or.2only <- c(sum((1 - Y2only) * (1 - X2only)) , sum(Y2only * (1 - X2only)),
sum((1 - Y2only) * X2only), sum(Y2only*X2only))
ace.or1[j] <- CalcOR(vec.for.or.1only)
ace.or2[j] <- CalcOR(vec.for.or.2only)
Y1only.sace <- Y[Ytrt <2 & Yctrl < 2]
X1only.sace <- X[Ytrt <2 & Yctrl < 2]
U1only.sace <-U[Ytrt <2 & Yctrl < 2]
Y2only.sace <- Y[Ytrt!=1 & Y1ctrl!=1]
X2only.sace <- X[Ytrt!=1 & Y1ctrl!=1]
U2only.sace <-U[Ytrt!=1 & Y1ctrl!=1]
Y2only.sace[Y2only.sace>0] <- 1
vec.for.or.sace1 <- c(sum((1 - Y1only.sace) * (1 - X1only.sace)) , sum(Y1only.sace * (1 - X1only.sace)),
sum((1 - Y1only.sace) * X1only.sace), sum(Y1only.sace*X1only.sace))
vec.for.or.sace2 <- c(sum((1 - Y2only.sace) * (1 - X2only.sace)) , sum(Y2only.sace * (1 - X2only.sace)),
sum((1 - Y2only.sace) * X2only.sace), sum(Y2only.sace*X2only.sace))
sace.or1[j] <- CalcOR(vec.for.or.sace1)
sace.or2[j] <- CalcOR(vec.for.or.sace2)
Y1 <- Y==1
Y2 <- Y==2
fit.logistic.Y1 <- glm(Y1 ~ X, family = "binomial")
fit.logistic.true.Y1 <- glm(Y1 ~ X + U, family = "binomial")
fit.logistic.Y2 <- glm(Y2 ~ X, family = "binomial")
fit.logistic.true.Y2 <- glm(Y2 ~ X + U, family = "binomial")
or.approx1[j] <- exp(coef(fit.logistic.Y1)[2])
or.approx.true1[j] <- exp(coef(fit.logistic.true.Y1)[2])
or.approx2[j] <- exp(coef(fit.logistic.Y2)[2])
or.approx.true2[j] <- exp(coef(fit.logistic.true.Y2)[2])
}
save.image(paste0("CMPEn50krareScen18a",patt,".RData"))
| /Simulations/Scripts/R/Rare/Scenario 18a/CMPEn50KrareScen18aBC.R | no_license | yadevi/CausalMPE | R | false | false | 4,221 | r | rm(list = ls())
library(Daniel)
library(dplyr)
library(nnet)
CalcCImultinom <- function(fit)
{
s <- summary(fit)
coef <- s$coefficients
ses <- s$standard.errors
ci.1 <- coef[1,2] + c(-1, 1)*1.96*ses[1, 2]
ci.2 <- coef[2,2] + c(-1, 1)*1.96*ses[2, 2]
return(rbind(ci.1,ci.2))
}
#key
# A, B,C,D,E,F - betaE[2] = 1.25, 1.5, 1.75, 2, 2.25, 2.5
# A,B,C, D, E,F - betaU = 2,3,4,5,6,7
patt <- "BC"
beta0 <- c(-6, -5)
betaE <- c(log(2.5), log(1.5))
betaU <- c(log(4), log(1.5))
sigmaU <- 1
n.sample <- 50000
n.sim <- 1000
AllY <- matrix(nr = n.sim, nc = 3)
sace.diff1 <- sace.diff2 <- ace.diff1 <- ace.diff2 <-
sace.or1 <- sace.or2 <- ace.or1 <- ace.or2 <-
or.approx1 <- or.approx2 <- or.approx.true1 <- or.approx.true2 <-
pop.never.s1 <- pop.never.s2 <- vector(length = n.sim)
ci1 <- ci2 <- matrix(nr = n.sim, nc = 2)
for (j in 1:n.sim)
{
CatIndex(j)
# Simulate genetic score
U <- rnorm(n.sample, 0, sd = sigmaU)
#### Calcualte probabilites for each subtype with and without the exposure ####
e1E0 <- exp(beta0[1] + betaU[1]*U)
e1E1 <- exp(beta0[1] + betaE[1] + betaU[1]*U)
e2E0 <- exp(beta0[2] + betaU[2]*U)
e2E1 <- exp(beta0[2] + betaE[2] + betaU[2]*U)
prE0Y1 <- e1E0/(1 + e1E0 + e2E0)
prE0Y2 <- e2E0/(1 + e1E0 + e2E0)
prE1Y1 <- e1E1/(1 + e1E1 + e2E1)
prE1Y2 <- e2E1/(1 + e1E1 + e2E1)
probsE0 <- cbind(prE0Y1, prE0Y2, 1 - prE0Y1 - prE0Y2)
probsE1 <- cbind(prE1Y1, prE1Y2, 1 - prE1Y1 - prE1Y2)
# Simulate subtypes #
Yctrl <- Ytrt <- vector(length = n.sample)
X <- rbinom(n = n.sample, 1, 0.5)
for (i in 1:n.sample)
{
Yctrl[i] <- sample(c(1,2,0), 1, replace = T, prob = probsE0[i, ])
Ytrt[i] <- sample(c(1,2,0), 1, replace = T, prob = probsE1[i, ])
}
Y <- (1-X)*Yctrl + X*Ytrt
AllY[j, ] <- table(Y)
Y1ctrl <- Yctrl==1
Y1trt <- Ytrt==1
Y2ctrl <- Yctrl==2
Y2trt <- Ytrt==2
pop.never.s1[j] <- mean(Y1ctrl==0 & Y1trt==0)
pop.never.s2[j] <- mean(Y2ctrl==0 & Y2trt==0)
# estimate causal parameters
sace.diff1[j] <- mean((Y1trt - Y1ctrl)[Y2ctrl==0 & Y2trt==0])
sace.diff2[j]<- mean((Y2trt - Y2ctrl)[Y1ctrl==0 & Y1trt==0])
ace.diff1[j] <- mean((Y1trt[Y2trt==0 & X==1]) - mean(Y1ctrl[Y2ctrl==0 & X==0]))
ace.diff2[j] <- mean((Y2trt[Y1trt==0 & X==1]) - mean(Y2ctrl[Y1ctrl==0 & X==0]))
# Ypo <- c(Yctrl, Ytrt)
# Upo <- rep(U,2)
# Xpo <- rep(x = c(0,1), each = n.sample)
# fit.full.po <- multinom(Ypo~ Xpo + Upo)
# fit.po <- multinom(Ypo~ Xpo)
fit <- multinom(Y~ X)
cis <- CalcCImultinom(fit)
ci1[j, ] <- cis[1, ]
ci2[j, ] <- cis[2, ]
Y1only <- Y[Y<2]
X1only <- X[Y<2]
U1only <-U[Y<2]
Y2only <- Y[Y!=1]
X2only <- X[Y!=1]
U2only <-U[Y!=1]
Y2only[Y2only>0] <- 1
vec.for.or.1only <- c(sum((1 - Y1only) * (1 - X1only)) , sum(Y1only * (1 - X1only)),
sum((1 - Y1only) * X1only), sum(Y1only*X1only))
vec.for.or.2only <- c(sum((1 - Y2only) * (1 - X2only)) , sum(Y2only * (1 - X2only)),
sum((1 - Y2only) * X2only), sum(Y2only*X2only))
ace.or1[j] <- CalcOR(vec.for.or.1only)
ace.or2[j] <- CalcOR(vec.for.or.2only)
Y1only.sace <- Y[Ytrt <2 & Yctrl < 2]
X1only.sace <- X[Ytrt <2 & Yctrl < 2]
U1only.sace <-U[Ytrt <2 & Yctrl < 2]
Y2only.sace <- Y[Ytrt!=1 & Y1ctrl!=1]
X2only.sace <- X[Ytrt!=1 & Y1ctrl!=1]
U2only.sace <-U[Ytrt!=1 & Y1ctrl!=1]
Y2only.sace[Y2only.sace>0] <- 1
vec.for.or.sace1 <- c(sum((1 - Y1only.sace) * (1 - X1only.sace)) , sum(Y1only.sace * (1 - X1only.sace)),
sum((1 - Y1only.sace) * X1only.sace), sum(Y1only.sace*X1only.sace))
vec.for.or.sace2 <- c(sum((1 - Y2only.sace) * (1 - X2only.sace)) , sum(Y2only.sace * (1 - X2only.sace)),
sum((1 - Y2only.sace) * X2only.sace), sum(Y2only.sace*X2only.sace))
sace.or1[j] <- CalcOR(vec.for.or.sace1)
sace.or2[j] <- CalcOR(vec.for.or.sace2)
Y1 <- Y==1
Y2 <- Y==2
fit.logistic.Y1 <- glm(Y1 ~ X, family = "binomial")
fit.logistic.true.Y1 <- glm(Y1 ~ X + U, family = "binomial")
fit.logistic.Y2 <- glm(Y2 ~ X, family = "binomial")
fit.logistic.true.Y2 <- glm(Y2 ~ X + U, family = "binomial")
or.approx1[j] <- exp(coef(fit.logistic.Y1)[2])
or.approx.true1[j] <- exp(coef(fit.logistic.true.Y1)[2])
or.approx2[j] <- exp(coef(fit.logistic.Y2)[2])
or.approx.true2[j] <- exp(coef(fit.logistic.true.Y2)[2])
}
save.image(paste0("CMPEn50krareScen18a",patt,".RData"))
|
#'Set data endpoint
#'
#'access Indicate which dataRetrieval access code
#' you want to use options: \code{c('public','internal')}
#'
#' @param access code for data access. Options are: "public","internal","cooperator", or "USGS".
#' \itemize{
#' \item{"internal" represents Access=3 ...for a single water science center}
#' \item{"USGS" represents Access=2 ...for all water science centers}
#' \item{"cooperator" represents Access=1}
#' \item{"public" represents Access=0, public access}
#' }
#'
#'@author Luke Winslow, Jordan S Read
#'
#'@examples
#'
#'\dontrun{
#'setAccess('internal')
#'
#'setAccess('public')
#'
#'}
#'
#' @export
setAccess = function(access="public"){
access = match.arg(access, c('public','internal','cooperator','USGS'))
if(access=="internal"){
pkg.env$access = '3'
message('setting access to internal')
} else if(access=="cooperator"){
pkg.env$access = '1'
message('setting access to cooperator')
} else if(access=="USGS"){
pkg.env$access = '2'
message('setting access to all USGS Water Science Centers')
} else {
pkg.env$access = NULL
message('setting access to public')
}
pkg.env$waterservices = "http://waterservices.usgs.gov/nwis/site/"
pkg.env$iv = "http://nwis.waterservices.usgs.gov/nwis/iv/"
pkg.env$dv = "http://waterservices.usgs.gov/nwis/dv/"
pkg.env$gwlevels = "http://waterservices.usgs.gov/nwis/gwlevels/"
options(Access.dataRetrieval = access)
}
drURL <- function(base.name, ..., arg.list=NULL){
queryString <- drQueryArgs(..., arg.list=arg.list)
#to do: add something to check for redundant params
return(paste0(pkg.env[[base.name]], '?', queryString))
}
drQueryArgs <- function(..., arg.list){
dots <- list(...)
dots <- dots[!vapply(X=dots,FUN=is.null,FUN.VALUE = TRUE)]
args <- append(expand.grid(dots, stringsAsFactors = FALSE), arg.list)
# get the args into name=value strings
keyValues <- paste0(names(args),unname(lapply(args, function(x) paste0('=',x[[1]]))))
return(paste(keyValues, collapse='&'))
}
appendDrURL <- function(url, ..., arg.list=NULL){
queryString <- drQueryArgs(..., arg.list=arg.list)
return(paste0(url, "&", queryString))
} | /R/setAccess.R | permissive | slarge/dataRetrieval | R | false | false | 2,198 | r | #'Set data endpoint
#'
#'access Indicate which dataRetrieval access code
#' you want to use options: \code{c('public','internal')}
#'
#' @param access code for data access. Options are: "public","internal","cooperator", or "USGS".
#' \itemize{
#' \item{"internal" represents Access=3 ...for a single water science center}
#' \item{"USGS" represents Access=2 ...for all water science centers}
#' \item{"cooperator" represents Access=1}
#' \item{"public" represents Access=0, public access}
#' }
#'
#'@author Luke Winslow, Jordan S Read
#'
#'@examples
#'
#'\dontrun{
#'setAccess('internal')
#'
#'setAccess('public')
#'
#'}
#'
#' @export
setAccess = function(access="public"){
access = match.arg(access, c('public','internal','cooperator','USGS'))
if(access=="internal"){
pkg.env$access = '3'
message('setting access to internal')
} else if(access=="cooperator"){
pkg.env$access = '1'
message('setting access to cooperator')
} else if(access=="USGS"){
pkg.env$access = '2'
message('setting access to all USGS Water Science Centers')
} else {
pkg.env$access = NULL
message('setting access to public')
}
pkg.env$waterservices = "http://waterservices.usgs.gov/nwis/site/"
pkg.env$iv = "http://nwis.waterservices.usgs.gov/nwis/iv/"
pkg.env$dv = "http://waterservices.usgs.gov/nwis/dv/"
pkg.env$gwlevels = "http://waterservices.usgs.gov/nwis/gwlevels/"
options(Access.dataRetrieval = access)
}
drURL <- function(base.name, ..., arg.list=NULL){
queryString <- drQueryArgs(..., arg.list=arg.list)
#to do: add something to check for redundant params
return(paste0(pkg.env[[base.name]], '?', queryString))
}
drQueryArgs <- function(..., arg.list){
dots <- list(...)
dots <- dots[!vapply(X=dots,FUN=is.null,FUN.VALUE = TRUE)]
args <- append(expand.grid(dots, stringsAsFactors = FALSE), arg.list)
# get the args into name=value strings
keyValues <- paste0(names(args),unname(lapply(args, function(x) paste0('=',x[[1]]))))
return(paste(keyValues, collapse='&'))
}
appendDrURL <- function(url, ..., arg.list=NULL){
queryString <- drQueryArgs(..., arg.list=arg.list)
return(paste0(url, "&", queryString))
} |
require(mvtnorm)
## ----- used in sapply to find all the densities
dmvnorm_log <- function(index, mu, sigma, y) {
## index = row index of mu
## mu = K by p matrix, each row represents one cluster mean
## y = n by p data matrix
## sigma = p by p covariance matrix (assume same covariance for each cluster)
## log.scale = T means output is log of the density
return(dmvnorm(y, mu[index,], sigma, log=TRUE))
}
## ----- compute the number of unique cluster means for each dimension
## ----- used in compute BIC or GIC
count.mu <- function(mu.j, eps.diff) {
temp.dist <- as.matrix(dist(mu.j, method = 'manhattan'))
ct <- length(mu.j[abs(mu.j)>eps.diff])
## initial counts (nonzero elements)
## --- if exists same means
temp.dist[upper.tri(temp.dist, diag = T)] <- NA
if(any(temp.dist < eps.diff, na.rm = T)) {
temp.index <- which(temp.dist < eps.diff, arr.ind = TRUE)
temp1 <- mu.j
## --- truncated distance so means are not exactly same, make them equal
for(i in 1:dim(temp.index)[1]){
temp1[temp.index[i,]] <- min(temp1[temp.index[i,]])
}
ct <- length(unique(temp1[abs(temp1)>eps.diff]))
}
return(ct)
}
| /R/additional_fn.R | no_license | lu-lu10/PARSE | R | false | false | 1,168 | r | require(mvtnorm)
## ----- used in sapply to find all the densities
dmvnorm_log <- function(index, mu, sigma, y) {
## index = row index of mu
## mu = K by p matrix, each row represents one cluster mean
## y = n by p data matrix
## sigma = p by p covariance matrix (assume same covariance for each cluster)
## log.scale = T means output is log of the density
return(dmvnorm(y, mu[index,], sigma, log=TRUE))
}
## ----- compute the number of unique cluster means for each dimension
## ----- used in compute BIC or GIC
count.mu <- function(mu.j, eps.diff) {
temp.dist <- as.matrix(dist(mu.j, method = 'manhattan'))
ct <- length(mu.j[abs(mu.j)>eps.diff])
## initial counts (nonzero elements)
## --- if exists same means
temp.dist[upper.tri(temp.dist, diag = T)] <- NA
if(any(temp.dist < eps.diff, na.rm = T)) {
temp.index <- which(temp.dist < eps.diff, arr.ind = TRUE)
temp1 <- mu.j
## --- truncated distance so means are not exactly same, make them equal
for(i in 1:dim(temp.index)[1]){
temp1[temp.index[i,]] <- min(temp1[temp.index[i,]])
}
ct <- length(unique(temp1[abs(temp1)>eps.diff]))
}
return(ct)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/varSelc.R
\name{varSelc}
\alias{varSelc}
\title{Performing two-stage variable selection}
\usage{
varSelc(b, method = "Zcut", wEst = NULL, bf.thres)
}
\arguments{
\item{b}{\eqn{tau} x \eqn{p} matrix of estimates of beta}
\item{method}{thresholding method to be used 'Zcut' or 'Wcut', defaults to 'Zcut'}
\item{wEst}{estimates of complexity parameters required if method == 'Wcut'}
\item{bf.thres}{threshold to be used to compare Bayes factor}
}
\description{
Performing two-stage variable selection using the MCMC estimates of local Bayesian modeling
}
\examples{
varSelc()
}
\keyword{varSelc()}
| /man/varSelc.Rd | no_license | shariq-mohammed/SpikeSlabEEG | R | false | true | 700 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/varSelc.R
\name{varSelc}
\alias{varSelc}
\title{Performing two-stage variable selection}
\usage{
varSelc(b, method = "Zcut", wEst = NULL, bf.thres)
}
\arguments{
\item{b}{\eqn{tau} x \eqn{p} matrix of estimates of beta}
\item{method}{thresholding method to be used 'Zcut' or 'Wcut', defaults to 'Zcut'}
\item{wEst}{estimates of complexity parameters required if method == 'Wcut'}
\item{bf.thres}{threshold to be used to compare Bayes factor}
}
\description{
Performing two-stage variable selection using the MCMC estimates of local Bayesian modeling
}
\examples{
varSelc()
}
\keyword{varSelc()}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/validationUtil.R
\name{getLTsld}
\alias{getLTsld}
\title{Get annual and seasonal Below of a predefined threshold Spell Length Distribution from a station or field object}
\usage{
getLTsld(data, threshold, INDEX = 1:dim(data)[1])
}
\description{
Get annual and seasonal Below of a predefined threshold Spell Length Distribution from a station or field object
}
\keyword{internal}
| /man/getLTsld.Rd | no_license | jamiepg3/R_VALUE | R | false | false | 466 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/validationUtil.R
\name{getLTsld}
\alias{getLTsld}
\title{Get annual and seasonal Below of a predefined threshold Spell Length Distribution from a station or field object}
\usage{
getLTsld(data, threshold, INDEX = 1:dim(data)[1])
}
\description{
Get annual and seasonal Below of a predefined threshold Spell Length Distribution from a station or field object
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sqladmin_functions.R
\name{sql.instances.clone}
\alias{sql.instances.clone}
\title{Creates a Cloud SQL instance as a clone of the source instance. The API is not ready for Second Generation instances yet.}
\usage{
sql.instances.clone(InstancesCloneRequest, project, instance)
}
\arguments{
\item{InstancesCloneRequest}{The \link{InstancesCloneRequest} object to pass to this method}
\item{project}{Project ID of the source as well as the clone Cloud SQL instance}
\item{instance}{The ID of the Cloud SQL instance to be cloned (source)}
}
\description{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
}
\details{
Authentication scopes used by this function are:
\itemize{
\item https://www.googleapis.com/auth/cloud-platform
\item https://www.googleapis.com/auth/sqlservice.admin
}
Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform, https://www.googleapis.com/auth/sqlservice.admin)}
Then run \code{googleAuthR::gar_auth()} to authenticate.
See \code{\link[googleAuthR]{gar_auth}} for details.
}
\seealso{
\href{https://cloud.google.com/sql/docs/reference/latest}{Google Documentation}
Other InstancesCloneRequest functions: \code{\link{InstancesCloneRequest}}
}
| /googlesqladminv1beta4.auto/man/sql.instances.clone.Rd | permissive | Phippsy/autoGoogleAPI | R | false | true | 1,315 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sqladmin_functions.R
\name{sql.instances.clone}
\alias{sql.instances.clone}
\title{Creates a Cloud SQL instance as a clone of the source instance. The API is not ready for Second Generation instances yet.}
\usage{
sql.instances.clone(InstancesCloneRequest, project, instance)
}
\arguments{
\item{InstancesCloneRequest}{The \link{InstancesCloneRequest} object to pass to this method}
\item{project}{Project ID of the source as well as the clone Cloud SQL instance}
\item{instance}{The ID of the Cloud SQL instance to be cloned (source)}
}
\description{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
}
\details{
Authentication scopes used by this function are:
\itemize{
\item https://www.googleapis.com/auth/cloud-platform
\item https://www.googleapis.com/auth/sqlservice.admin
}
Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform, https://www.googleapis.com/auth/sqlservice.admin)}
Then run \code{googleAuthR::gar_auth()} to authenticate.
See \code{\link[googleAuthR]{gar_auth}} for details.
}
\seealso{
\href{https://cloud.google.com/sql/docs/reference/latest}{Google Documentation}
Other InstancesCloneRequest functions: \code{\link{InstancesCloneRequest}}
}
|
library(MESS)
### Name: bin
### Title: Fast binning of numeric vector into equidistant bins
### Aliases: bin
### ** Examples
set.seed(1)
x <- sample(10, 20, replace = TRUE)
bin(x, 15)
| /data/genthat_extracted_code/MESS/examples/bin.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 193 | r | library(MESS)
### Name: bin
### Title: Fast binning of numeric vector into equidistant bins
### Aliases: bin
### ** Examples
set.seed(1)
x <- sample(10, 20, replace = TRUE)
bin(x, 15)
|
amvce <-
function(y,X,A,tolerance=1.0e-3,maxIter=100,verbose=FALSE){
#Animal model variance component estimation
#Author: Minghui Wang <m.h.wang@live.com>
y=as.vector(y);X=as.matrix(X);A=as.matrix(A)
n=dim(X)[1]
NP=dim(X)[2]
if(n!=length(y) || n!=nrow(A) || n!=ncol(A)) stop('Invalid input\n')
R=diag(n) #200 x 200 with 1's on the diagonal
ireduce<-function(X){
n=nrow(X)
NP=ncol(X)
ij=do.call("order", split(X, col(X)))
iRemv=rep(FALSE,n)
nUr=1
lstEt=NA
for(i in 2:n){
if(all(X[ij[i],]==X[ij[i-1],])){
lstEt=ij[i]
if(i==n) iRemv[lstEt]=T
}else{
if(!is.na(lstEt)) iRemv[lstEt]=T
lstEt=NA
nUr=nUr+1
}
}
if(n-nUr<NP){
iRemv[(1:n)[!iRemv][1:(NP+nUr-n)]]=T
}
return(iRemv)
}
ginv <- function(X, tol = sqrt(.Machine$double.eps)){
#borrowed from library MASS
if(length(dim(X)) > 2L || !(is.numeric(X) || is.complex(X)))
stop("'X' must be a numeric or complex matrix")
if(!is.matrix(X)) X <- as.matrix(X)
Xsvd <- svd(X)
if(is.complex(X)) Xsvd$u <- Conj(Xsvd$u)
Positive <- Xsvd$d > max(tol * Xsvd$d[1L], 0)
if (all(Positive)) Xsvd$v %*% (1/Xsvd$d * t(Xsvd$u))
else if(!any(Positive)) array(0, dim(X)[2L:1L])
else Xsvd$v[, Positive, drop=FALSE] %*% ((1/Xsvd$d[Positive]) * t(Xsvd$u[, Positive, drop=FALSE]))
}
ptr<-function(A,B){
s=0.0
for(i in 1:nrow(A)){
s=s+sum(A[i,]*B[,i])
}
return(s)
}
logdet<-function(a) determinant(a,logarithm=TRUE)[[1]][1]
M=diag(n) - X %*% tcrossprod(ginv(crossprod(X,X)),X)
iRemoval=ireduce(X)
K1=M[!iRemoval,]
K=t(K1)
Xr=X[!iRemoval,]
nIter=0
if(verbose==T) cat('Running REML algorithm...\nIter.\tlogL\tV(u)\tV(e)\n')
loglik0=NA
loglik=NA
V=NA
K3=K %*% solve(K1 %*% K,K1)
VC=var(y)*(1:2/3)
while(nIter<=maxIter){
V=VC[1] * A + VC[2] * R
V=K1 %*% V %*% K
Vi=solve(V)
P=crossprod(K1,Vi %*% K1)
P1=P %*% y
tum=VC[1]*n-VC[1]^2 * ptr(P,A)
tum=tum+VC[1]^2.0 * (crossprod(P1,A %*% P1))
tem=VC[2]*sum(diag(K3))-VC[2]^2*ptr(K3,P)
tem=tem+VC[2]^2*(crossprod(P1,K3 %*% P1))
VC[1]=as.vector(tum/n)
VC[2]=as.vector(tem/(n-NP))
nIter=nIter+1
X3=crossprod(Xr,Vi) %*% Xr
loglik=-0.5*(logdet(V)+logdet(X3)+crossprod(y,P1)[,1])
if(verbose==T) cat(nIter,'\t',sprintf("%.4f",loglik),'\t',paste(sprintf("%.3f",VC),collapse='\t'),'\n',sep='')
if(nIter>1 && abs(loglik-loglik0)<tolerance) break
loglik0=loglik
}
if(nIter>maxIter){
warning(paste('Failed to converge after',nIter,'iterations'))
}
Vi=solve(VC[1] * A + VC[2] * R)
b=tcrossprod(solve(crossprod(X,Vi) %*% X),X) %*% (Vi %*% y)[,1]
res=list(coefficients=b,VC=VC,loglik=loglik)
return(res)
}
| /R/amvce.R | no_license | JingChen1114/TetraQTLAnalysis | R | false | false | 2,603 | r | amvce <-
function(y,X,A,tolerance=1.0e-3,maxIter=100,verbose=FALSE){
#Animal model variance component estimation
#Author: Minghui Wang <m.h.wang@live.com>
y=as.vector(y);X=as.matrix(X);A=as.matrix(A)
n=dim(X)[1]
NP=dim(X)[2]
if(n!=length(y) || n!=nrow(A) || n!=ncol(A)) stop('Invalid input\n')
R=diag(n) #200 x 200 with 1's on the diagonal
ireduce<-function(X){
n=nrow(X)
NP=ncol(X)
ij=do.call("order", split(X, col(X)))
iRemv=rep(FALSE,n)
nUr=1
lstEt=NA
for(i in 2:n){
if(all(X[ij[i],]==X[ij[i-1],])){
lstEt=ij[i]
if(i==n) iRemv[lstEt]=T
}else{
if(!is.na(lstEt)) iRemv[lstEt]=T
lstEt=NA
nUr=nUr+1
}
}
if(n-nUr<NP){
iRemv[(1:n)[!iRemv][1:(NP+nUr-n)]]=T
}
return(iRemv)
}
ginv <- function(X, tol = sqrt(.Machine$double.eps)){
#borrowed from library MASS
if(length(dim(X)) > 2L || !(is.numeric(X) || is.complex(X)))
stop("'X' must be a numeric or complex matrix")
if(!is.matrix(X)) X <- as.matrix(X)
Xsvd <- svd(X)
if(is.complex(X)) Xsvd$u <- Conj(Xsvd$u)
Positive <- Xsvd$d > max(tol * Xsvd$d[1L], 0)
if (all(Positive)) Xsvd$v %*% (1/Xsvd$d * t(Xsvd$u))
else if(!any(Positive)) array(0, dim(X)[2L:1L])
else Xsvd$v[, Positive, drop=FALSE] %*% ((1/Xsvd$d[Positive]) * t(Xsvd$u[, Positive, drop=FALSE]))
}
ptr<-function(A,B){
s=0.0
for(i in 1:nrow(A)){
s=s+sum(A[i,]*B[,i])
}
return(s)
}
logdet<-function(a) determinant(a,logarithm=TRUE)[[1]][1]
M=diag(n) - X %*% tcrossprod(ginv(crossprod(X,X)),X)
iRemoval=ireduce(X)
K1=M[!iRemoval,]
K=t(K1)
Xr=X[!iRemoval,]
nIter=0
if(verbose==T) cat('Running REML algorithm...\nIter.\tlogL\tV(u)\tV(e)\n')
loglik0=NA
loglik=NA
V=NA
K3=K %*% solve(K1 %*% K,K1)
VC=var(y)*(1:2/3)
while(nIter<=maxIter){
V=VC[1] * A + VC[2] * R
V=K1 %*% V %*% K
Vi=solve(V)
P=crossprod(K1,Vi %*% K1)
P1=P %*% y
tum=VC[1]*n-VC[1]^2 * ptr(P,A)
tum=tum+VC[1]^2.0 * (crossprod(P1,A %*% P1))
tem=VC[2]*sum(diag(K3))-VC[2]^2*ptr(K3,P)
tem=tem+VC[2]^2*(crossprod(P1,K3 %*% P1))
VC[1]=as.vector(tum/n)
VC[2]=as.vector(tem/(n-NP))
nIter=nIter+1
X3=crossprod(Xr,Vi) %*% Xr
loglik=-0.5*(logdet(V)+logdet(X3)+crossprod(y,P1)[,1])
if(verbose==T) cat(nIter,'\t',sprintf("%.4f",loglik),'\t',paste(sprintf("%.3f",VC),collapse='\t'),'\n',sep='')
if(nIter>1 && abs(loglik-loglik0)<tolerance) break
loglik0=loglik
}
if(nIter>maxIter){
warning(paste('Failed to converge after',nIter,'iterations'))
}
Vi=solve(VC[1] * A + VC[2] * R)
b=tcrossprod(solve(crossprod(X,Vi) %*% X),X) %*% (Vi %*% y)[,1]
res=list(coefficients=b,VC=VC,loglik=loglik)
return(res)
}
|
libs <- c("shiny", "shinyBS", "Hmisc", "xtable", 'colourpicker')
chk <- !libs %in% installed.packages()
inst <- lapply(libs[chk], install.packages)
library(shiny)
library(shinyBS)
library(LBSPR)
library(Hmisc)
library(xtable)
shinyServer(function(input, output, clientData, session) {
values <- reactiveValues(useDF=FALSE, default=NULL,useExamp=FALSE,
ShowResults=FALSE, AssessReady=FALSE, DoneAssess=FALSE)
observeEvent(input$binswidth, {
values$ShowResults <- FALSE
values$AssessReady <- FALSE
values$DoneAssess <- FALSE
})
# If any biological parameters are changed, the assessment is reset
observeEvent(input$MK, {
values$ShowResults <- FALSE
values$AssessReady <- FALSE
values$DoneAssess <- FALSE
})
observeEvent(input$relLinf, {
values$ShowResults <- FALSE
values$AssessReady <- FALSE
values$DoneAssess <- FALSE
})
observeEvent(input$SetLinf, {
values$ShowResults <- FALSE
values$AssessReady <- FALSE
values$DoneAssess <- FALSE
})
observeEvent(input$L50, {
values$ShowResults <- FALSE
values$AssessReady <- FALSE
values$DoneAssess <- FALSE
})
observeEvent(input$L95, {
values$ShowResults <- FALSE
values$AssessReady <- FALSE
values$DoneAssess <- FALSE
})
# Observe Events
observeEvent(input$defPars, {
values$useDF <- TRUE
values$default <- c(1.5, 100, 66, 70, 0.66)
})
observeEvent(input$exampData, {
values$ShowResults <- FALSE
values$AssessReady <- FALSE
values$DoneAssess <- FALSE
})
observeEvent(input$exmpData, {
values$useExamp <- TRUE
values$ShowResults <- FALSE
values$AssessReady <- FALSE
values$DoneAssess <- FALSE
})
observeEvent(input$file1, {
values$useExamp <- FALSE
values$ShowResults <- FALSE
values$AssessReady <- FALSE
values$DoneAssess <- FALSE
})
observeEvent(input$assessReady, {
if(input$assessReady == 0) return(NULL)
if (!is.null(data())) values$AssessReady <- TRUE
})
observeEvent(input$goAssess, {
if(input$goAssess == 0) return(NULL)
values$ShowResults <- TRUE
})
# Tool Tips
addTooltip(session, id = "file1", title = "CSV or text file only",
placement = "right", trigger = "hover")
addTooltip(session, id="header", title = "Does the first row contain labels?",
placement = "right", trigger = "hover")
addTooltip(session, id="smooth", title = "Only used if more than one year",
placement = "right", trigger = "hover")
## Alerts ##
observe({
# Pars <- Lens <- NULL
# ParsOK <- FALSE
# if(chkFileUp() & is.null(UpLoadMSG())) Pars <- getLB_pars()
# if(chkFileUp() & is.null(UpLoadMSG())) Lens <- getLB_lens()
# if (class(Pars)!="NULL") {
# if(length(Pars@Linf)>0 & is.finite(Pars@Linf)) ParsOK <- TRUE
# }
# if (ParsOK & class(Lens) !="NULL") {
# if (Pars@Linf < max(Lens@LMids)) {
# createAlert(session, "LMidsErr", "lmidserr", title = "Error",
# content=HTML(paste0(tags$i("L"), tags$sub(HTML("∞")),
# " must be larger than maximum length bin (", max(Lens@LMids), ")")), append=FALSE)
# } else {
# closeAlert(session, "lmidserr")
# }
# }
# if (!values$ShowResults) return(NULL)
MK <- as.numeric(input$MK)
Linf <- getLinf() # as.numeric(input$L50) / as.numeric(input$relLinf)
# Linf <- getLinf() # as.numeric(input$Linf)
L50 <- as.numeric(input$L50)
L95 <- as.numeric(input$L95)
relLinf <- as.numeric(input$relLinf)
if (length(MK) <1) MK <- NA
if (length(Linf)<1) Linf <- NA
if (length(L50)<1) L50 <- NA
if (length(L95)<1) L95 <- NA
if (length(relLinf)<1) relLinf <- NA
if (is.null(MK)) MK <- NA
if (is.null(Linf)) Linf <- NA
if (is.null(L50)) L50 <- NA
if (is.null(L95)) L95 <- NA
if (is.null(relLinf)) relLinf <- NA
# Errors #
if (input$sprtarg <= input$sprlim) {
createAlert(session, "refalert", "refs", title = "Error",
content = "SPR Limit must be less than SPR Target", append = FALSE)
} else {
closeAlert(session, "refs")
}
chk <- any(c(MK, Linf, L50, L95, relLinf)<0)
if (all(is.na(chk))) doChk <- FALSE
if (!all(is.na(chk))) {
doChk <- chk
}
if (doChk) {
createAlert(session, "NegVals", "negvals", title = "Error",
content="Negative values don't make much sense!", append=FALSE)
} else {
closeAlert(session, "negvals")
}
if (!is.na(L95) & !is.na(L50)) {
if (L95 <= L50) {
createAlert(session, "lmalert", "lmpar", title = "Error",
content = HTML(paste0(tags$i("L", tags$sub("50")), " must be less than ",
tags$i("L", tags$sub("95")))),
append = FALSE)
} else {
closeAlert(session, "lmpar")
}
}
# Warnings
if (!is.na(L95) & !is.na(L50) & !is.na(Linf)) {
if (L50 >= Linf | L95 >= Linf) {
createAlert(session, "lmalert2", "lmpar2", title = "Error",
content = HTML(paste0("Maturity parameters are higher than ", tags$i("L"), tags$sub(HTML("∞")))),
append = FALSE)
} else {
closeAlert(session, "lmpar2")
}
}
if (!is.na(Linf) & Linf < 10) {
createAlert(session, "linfalert", "linf1", title = "Warning",
content = HTML(paste0("Are you sure ", tags$i("L"), tags$sub(HTML("∞")), " is so low?")),
append = FALSE)
} else {
closeAlert(session, "linf1")
}
if (!is.na(Linf) & is.null(UpLoadMSG()) & input$dataType == "freq") {
Lens <- getLB_lens()
if (Linf > max(Lens@LMids)) {
createAlert(session, "linfalert3", "linf2", title = "Error",
content = HTML(paste0("Maximum length bin (", round(max(Lens@LMids),2),
") must be greater than ", tags$i("L"), tags$sub(HTML("∞")))),
append = FALSE)
} else {
closeAlert(session, "linf2")
}
}
if (!is.na(MK) & (MK < 0.2 | MK > 6)) {
createAlert(session, "mkalert", "mk1", title = "Warning",
content = HTML(paste0("Are you sure of the ", tags$i("M/K"), " ratio? Model may not perform well
at extreme values")),
append = FALSE)
} else {
closeAlert(session, "mk1")
}
if (!is.na(relLinf) & (relLinf >=1 | relLinf <=0)) {
createAlert(session, "RelLinferr", "relLinferr", title = "Error",
content = HTML(paste0("Relative size at maturity must be between 0 and 1")),
append=FALSE)
} else {
closeAlert(session, "relLinferr")
}
templen <- NULL
if (chkPars()) templen <- getLB_lens()
if (class(templen) != "NULL") {
if (!is.na(Linf) & Linf > max(templen@LMids)) {
createAlert(session, "linfalert2", "linf2", title = "Error",
content = HTML(paste0(tags$i("L"), tags$sub(HTML("∞")), "(", Linf, ") must be lower than the largest length bin (", max(templen@LMids), ")")),
append = FALSE)
} else {
closeAlert(session, "linf2")
}
}
# Add checks for all parameters
})
#############################
### Read in CSV file ###
### Check that data is ok ###
#############################
ExampleDataFile <- reactive({
switch(input$exampData,
rawSingHead = "../../LRaw_SingYrHead.csv",
rawSing = "../../LRaw_SingYr.csv",
rawMultiHead = "../../LRaw_MultiYrHead.csv",
rawMulti = "../../LRaw_MultiYr.csv",
freqSingHead = "../../LFreq_SingYrHead.csv",
freqSing = "../../LFreq_SingYr.csv",
freqMultiHead = "../../LFreq_MultiYrHead.csv",
freqMulti = "../../LFreq_MultiYr.csv")
})
output$downloadExample <- renderUI({
if (!is.null(data()) & values$useExamp) {
fluidRow(
h5(strong("Download Example File")),
downloadButton("dnlData", label = "Download", class = NULL)
, style="padding: 5px 15px;")
}
})
output$dnlData <- downloadHandler(
filename = function() {
nm <- ExampleDataFile()
nm <- gsub("data/", "", nm)
nm <- gsub('.csv', "", nm)
paste(nm, '.csv', sep='')
},
content = function(file) {
write.table(data(), file, sep=",", row.names=FALSE, col.names=FALSE)
}
)
data <- reactive({
if (values$useExamp) {
read.csv(ExampleDataFile(), header = input$header,
sep = input$sep,
stringsAsFactors=FALSE, check.names=FALSE)
} else {
file1 <- input$file1
if (is.null(file1)) return(NULL)
dat <- read.csv(file1$datapath, header = input$header,
sep = input$sep, stringsAsFactors=FALSE, check.names=FALSE)
if (class(dat) == "data.frame" | class(dat) == "matrix") {
if (ncol(dat) > 1) {
chkNAs <- apply(dat, 2, is.na) # check NAs
dat <- dat[!apply(chkNAs, 1, prod),, drop=FALSE]
dat <- dat[,!apply(chkNAs, 2, prod), drop=FALSE]
}
}
if (class(dat) == "numeric" | class(dat) == "integer") {
dat <- dat[!is.na(dat)]
}
dat
}
})
# Check - has file been uploaded?
chkFileUp <- reactive({
if(is.null(data())) return(FALSE)
return(TRUE)
})
chkSep <- reactive({
if(is.null(data())) return(TRUE)
lendat <- as.matrix(data())
ind1 <- any(grepl(";", lendat))
ind2 <- any(grepl(",", lendat))
if (ind1) return(FALSE)
if (ind2) return(FALSE)
return(TRUE)
})
chkText <- reactive({
if(is.null(data())) return(FALSE)
if(chkFileUp() == FALSE) return(FALSE)
if(!chkSep()) return(TRUE)
if(class(data()) == "character") return(TRUE)
if(class(data()[1,1]) == "character") return(TRUE)
FALSE
})
UpLoadMSG <- reactive({
msg1 <- msg2 <- msg3 <- msg4 <- msg5 <- msg6 <- NULL
if(chkFileUp() == FALSE) msg1 <- "Please upload a CSV data file"
if(!chkSep()) msg6 <- "Check File Separator"
if(chkFileUp() == TRUE & chkSep()) {
if(chkFreq() & input$dataType == "raw") {
msg2 <- "It looks like you've uploaded length frequencies? Please change Data Type"
}
if(!chkFreq() & input$dataType == "freq") {
msg3 <- "It looks like you've uploaded length measurements? Please change Data Type"
}
if(chkHeader() & !input$header) {
msg4 <- "It looks like the file has a header row? Please check Header box"
}
if(chkText()) {
msg5 <- "Text in the data file. Do you have a header?"
}
}
out <- c(msg1, msg2, msg3, msg4, msg5, msg6)
out
})
output$UpLoadText <- renderUI({
out <- UpLoadMSG()
out <- paste(out, collapse="<br/>")
HTML(out)
})
chkFreq <- reactive({ # Check if data appears to be length frequencies
if(!chkFileUp()) return(NULL)
if(!chkSep()) return(NULL)
lendat <- as.matrix(data())
if (ncol(lendat) == 1) return (FALSE)
fst <- lendat[,1]
fst <- fst[is.finite(fst)]
if (all(diff(fst) == median(diff(fst)))) return(TRUE)
FALSE
})
chkHeader <- reactive({ # Check if there appears to be a header
if(!chkFileUp()) return(NULL)
if(!chkSep()) return(NULL)
if(input$header) return(TRUE)
lendat <- as.matrix(data())
topRow <- lendat[1,, drop=FALSE]
if (class(topRow) == "character") return(TRUE)
if (chkFreq() & is.na(topRow[1])) return(TRUE)
lendat <- as.matrix(lendat)
topRow <- as.numeric(lendat[1,, drop=FALSE])
if (!chkFreq()) {
if (ncol(lendat) > 1) {
if (all(diff(topRow) == 1)) return(TRUE)
if (all(topRow > 1900 & topRow < 2100)) return(TRUE)
}
if (ncol(lendat) == 1) {
if (topRow[1] > 1900 & topRow[1] < 2100) return(TRUE)
}
}
FALSE
})
chkMulitYear <- reactive({ # Check if there are multiple years
if(!chkFileUp()) return(NULL)
if(!chkSep()) return(NULL)
lendat <- as.matrix(data())
Ncol <- ncol(lendat)
if(chkFreq() & Ncol > 2) return(TRUE)
if(!chkFreq() & Ncol > 1) return(TRUE)
FALSE
})
output$FileTable <- renderDataTable({
if(!chkFileUp()) return(NULL)
if(values$useExamp) {
DF <- data.frame(Filename=ExampleDataFile(),
DataType=ifelse(chkFreq(), "Frequency", "Raw"),
Header=chkHeader(),
MultiYear=chkMulitYear())
} else {
DF <- data.frame(Filename=input$file1$name,
DataType=ifelse(chkFreq(), "Frequency", "Raw"),
Header=chkHeader(),
MultiYear=chkMulitYear())
}
return(DF)
}, options=list(pageLength=-1, searching = FALSE, paging = FALSE,
ordering=FALSE, info=FALSE, rowCallback = I(
'function(row, data) {
$("td", row).css("text-align", "center");
}'
)))
output$metadata <- renderUI({
if(values$useExamp) return()
if(!chkFileUp()) return()
HTML(paste(h3("Check the Uploaded File"),
p("Does everything look right?"),
h4("File Metadata")))
})
output$topdata <- renderDataTable({
# Print out first 6 observations
if(!chkFileUp()) return(NULL)
dat <- data()
if (input$header == TRUE) {
innames <- colnames(dat)
if (input$dataType == "freq") {
if (ncol(dat) >1) {
# innames[1] <- "Length.Bins"
# innames[2:ncol(dat)] <- gsub("X", "", innames[2:ncol(dat)])
# colnames(dat) <- innames
}
}
if (input$dataType == "raw") {
innames <- gsub("X", "", innames)
colnames(dat) <- innames
}
}
if (input$header == FALSE & input$dataType == "freq") {
# if (ncol(dat) >1) {
# colnames(dat)[1] <- "Length.Bins"
# colnames(dat)[2:length(colnames(dat))] <- 1:(length(colnames(dat))-1)
# }
}
head(dat)
}, options=list(pageLength=-1, searching = FALSE, paging = FALSE,
ordering=FALSE, info=FALSE)
)
output$fileContents <- renderUI({
if(!chkFileUp()) return()
HTML(paste(h4("File contents"),
p("This shows the first six rows of your data file. All numbers below the bold black heading should be your length data. If you have multiple years of data, they should appear in seperate columns in the table below. ")
))
})
###################
## Fit Model Tab ##
###################
# observeEvent(input$relLinf, {
# observe({
# myval <- as.numeric(input$L50) / as.numeric(input$relLinf)
# # myval <- round(myval, 5)
# if (length(myval)>0) updateTextInput(session, "Linf", value = myval)
# })
# observeEvent(input$Linf, {
# if (length(as.numeric(input$relLinf)<1) & length(as.numeric(input$Linf))>0) {
# myval <- as.numeric(input$L50) / as.numeric(input$Linf)
# # myval <- round(myval, 2)
# if (!is.na(myval)) {
# updateTextInput(session, "relLinf", value = myval)
# }
# }
# }
# # )
getLinf <- reactive({
if (input$dorelLinf == TRUE) {
tryLinf <- as.numeric(input$L50) / as.numeric(input$relLinf)
if (length(tryLinf)>0) {
if(is.na(tryLinf)) return(NULL)
return(tryLinf)
# return(round(tryLinf,2))
}
} else {
return(as.numeric(input$SetLinf))
}
})
# output$CurrLinf <- renderText(paste0("Linf = ", getLinf()))
output$CurrLinf <- renderUI({
myLinf <- getLinf()
if(!is.numeric(myLinf)) return("")
myLinf <- round(getLinf(),2)
HTML(paste0(tags$i("L"), tags$sub(HTML("∞"))), "=", myLinf)
})
output$InputPars <- renderUI({
times <- input$defPars
# MKVal <- 1.5 #""
# relLinfVal <- 0.66 #""
# LinfVal <- ""
# L50Val <- 66 #""
# L95Val <- 70 #""
# disLinf <- getLinf() # round(as.numeric(input$L50) / as.numeric(input$relLinf),2)
# if (is.numeric(disLinf)) disLinf <- round(disLinf,2)
# if (values$useDF) {
# MKVal <- values$default[1]
# LinfVal <- values$default[2]
# L50Val <- values$default[3]
# L95Val <- values$default[4]
# relLinfVal <- values$default[5]
# }
div(id=letters[(times %% length(letters)) + 1],
h4("Life history ratios"),
fluidRow(
column(6,
textInput("MK", label=tags$i("M/K ratio"), value=1.5)
),
conditionalPanel(condition="input.dorelLinf == 'TRUE'",
column(6,
textInput("relLinf", label = HTML(paste0(tags$i("L", tags$sub("50")), "/",tags$i("L"), tags$sub(HTML("∞")))), value=0.66))
),
# sliderInput("relLinf", label = HTML(paste0(tags$i("L", tags$sub("50")), "/",tags$i("L"), tags$sub(HTML("∞")))),
# min=0.0, max=1, step=0.01, value=input$relLinf))),
column(6,
conditionalPanel(condition="input.dorelLinf == 'FALSE'",
textInput("SetLinf", label = HTML(paste0(tags$i("L"), tags$sub(HTML("∞")))), value=100))
)),
h4("Length-at-Maturity"),
fluidRow(
column(6,
textInput("L50", label = tags$i(HTML(paste0("L", tags$sub("50")))), value=66)
),
column(6,
textInput("L95", label = tags$i(HTML(paste0("L", tags$sub("95")))), value=70)
))
)
})
output$HistControl <- renderUI({
if (!chkFileUp()) return(NULL)
if (!chkFileUp()) return(NULL)
if(!is.null(UpLoadMSG())) return(NULL)
if (input$dataType != "raw") return(NULL)
dat <- data()
Min <- round(min(dat, na.rm=TRUE),0)
Max <- ceiling(max(dat, na.rm=TRUE)/5)*5
Start <- floor(max(1/20 * Max)/5) * 5
Max <- ceiling((max(dat, na.rm=TRUE)/10)/5)*5
sliderInput("binswidth","Width of length bins:", min = 1, max = Max, value = Start)
})
getLB_pars <- reactive({
# print(values$ShowResults)
# if (!values$ShowResults) return(NULL)
LB_pars <- new("LB_pars", verbose=FALSE)
linf <- getLinf()
if (class(linf) == "NULL") return(NULL)
LB_pars@Linf <- linf # as.numeric(input$L50) / as.numeric(input$relLinf) # as.numeric(input$Linf)
LB_pars@L50 <- as.numeric(input$L50)
LB_pars@L95 <- as.numeric(input$L95)
LB_pars@MK <- as.numeric(input$MK)
LB_pars@Species <- input$Species
LB_pars@L_units <- input$Lunits
binwidth <- input$binswidth
LB_pars@BinWidth <- ifelse(is.null(binwidth), 5, binwidth)
LB_pars
})
getLB_lens <- reactive({
if (!chkFileUp()) return(NULL)
if (!chkSep()) return(NULL)
if(!is.null(UpLoadMSG())) return(NULL)
dat <- data()
dat <- as.matrix(data())
LB_pars <- getLB_pars()
if (class(LB_pars) == "NULL") return(NULL)
if (class(LB_pars) != "LB_pars") return(NULL)
LB_lengths <- new("LB_lengths", file=dat, LB_pars=LB_pars, dataType=input$dataType, verbose=FALSE)
LB_lengths
})
chkPars <- reactive({ # Are all input parameters entered?
Linf <- getLinf() # as.numeric(input$L50) / as.numeric(input$relLinf)
pars <- as.numeric(c(input$MK, Linf, input$L50, input$L95, input$relLinf))
if(any(!is.finite(pars))) return(FALSE)
if (any(pars <=0)) return(FALSE)
if(length(pars)<1) return(FALSE)
if (pars[4] <= pars[3]) return(FALSE)
if (pars[5] <=0 | pars[5] >=1) return(FALSE)
#Pars <- getLB_pars()
Lens <- getLB_lens()
if (class(Lens) != "LB_lengths") return(FALSE)
if (pars[3] >= Linf | pars[4] >= Linf) return(FALSE)
if (Linf > max(Lens@LMids)) return(FALSE)
TRUE
})
output$ValidPars <- renderText({
if(chkText()) return("")
if (!chkPars()) return("Invalid input parameters")
if (chkPars()) return("")
})
output$ValidData <- renderText({
if(!is.null(UpLoadMSG())) return("No valid data file")
if(is.null(UpLoadMSG())) return("")
})
MakeHist <- reactive({
if(chkText()) return(NULL)
if(!chkFileUp()) return(NULL)
if(!chkPars()) return(NULL)
if(!is.null(UpLoadMSG())) return(NULL)
if (values$AssessReady & !values$ShowResults) return(plotSize(getLB_lens()))
if (values$AssessReady & values$ShowResults) return(plotSize(doAssess()))
})
output$DatHistPlot <- renderPlot({
if(chkText()) return(NULL)
if(!chkFileUp()) return(NULL)
if(!chkPars()) return(NULL)
if(!is.null(UpLoadMSG())) return(NULL)
histdat <- MakeHist()
# need to add check here if model fails to converge
print(histdat)
})
MatSel <- reactive({
if(chkText()) return(NULL)
if(!chkFileUp()) return(NULL)
if(!chkPars()) return(NULL)
if(!is.null(UpLoadMSG())) return(NULL)
if (values$AssessReady & !values$ShowResults) {
LBobj <- new("LB_obj")
LB_lengths <- getLB_lens()
LB_pars <- getLB_pars()
Slots <- slotNames(LB_lengths)
for (X in 1:length(Slots)) slot(LBobj, Slots[X]) <- slot(LB_lengths, Slots[X])
Slots <- slotNames(LB_pars)
for (X in 1:length(Slots)) slot(LBobj, Slots[X]) <- slot(LB_pars, Slots[X])
LBobj@LMids[1] <- 0 # hack to make maturity curve start at zero
return(plotMat(LBobj))
}
if (values$AssessReady & values$ShowResults) return(plotMat(doAssess()))
})
output$DatMat <- renderPlot({
if(chkText()) return(NULL)
if(!chkFileUp()) return(NULL)
if(!chkPars()) return(NULL)
if(!is.null(UpLoadMSG())) return(NULL)
try(print(MatSel()))
})
## Fit the Model ##
# Run the LBSPR assessment routine
doAssess <- reactive({
if(chkText()) return(NULL)
if(!chkFileUp()) return(NULL)
if (!values$ShowResults) return(NULL)
if (values$ShowResults) {
values$DoneAssess <- TRUE
lens <- getLB_lens()
fitmod <- try(LBSPRfit(getLB_pars(), getLB_lens(), useCPP = TRUE))
if (class(fitmod) == "LB_obj") return(fitmod)
if (class(fitmod) != "LB_obj") {
values$DoneAssess <- FALSE
return(FALSE)
}
}
})
output$clickAssess <- renderUI({
# if (!values$AssessReady) return("")
if(chkText()) return("")
if (!chkPars()) return("")
if(!is.null(UpLoadMSG())) return("")
if (values$AssessReady) {
fluidRow(
h4("Ready to Fit Model"),
actionButton("goAssess", "Fit Model", icon("line-chart"), style="color: #fff; background-color: #00B700; border-color: #006D00")
, style="padding: 15px 15px 15px 15px;")
} else {
fluidRow(
h4("Plot the Data"),
actionButton("assessReady", "Plot Data", icon("area-chart"), style="color: #fff; background-color: #337ab7; border-color: #2e6da4")
,style="padding: 15px 15px 15px 15px;")
}
})
output$Histogram <- renderUI({
if(!is.null(UpLoadMSG())) return("")
if(is.null(UpLoadMSG()) & values$AssessReady & chkPars()) {
fluidRow(
h4("Histogram of Length Data"),
plotOutput("DatHistPlot"),
downloadButton("dnloadSize", label = "Download", class = NULL),
style="padding-top: 25px;")
}
})
output$dnloadSize <- downloadHandler(
filename = function() {
nm <- input$Species
nm <- gsub(" ", "", nm)
if (nchar(nm) <1) nm <- "MySpecies"
paste0(nm, '_SizeDist.png')
},
content = function(file) {
ggsave(file, plot = MakeHist(), device = "png")
}
)
output$MatSelPlot <- renderUI({
if(is.null(UpLoadMSG()) & values$AssessReady & chkPars()) {
fluidRow(
h4("Maturity-at-Length"),
plotOutput("DatMat"),
downloadButton("dnloadMat", label = "Download", class = NULL),
style="padding-top: 25px;")
}
})
output$dnloadMat <- downloadHandler(
filename = function() {
nm <- input$Species
nm <- gsub(" ", "", nm)
if (nchar(nm) <1) nm <- "MySpecies"
paste0(nm, '_MatSel.png')
},
content = function(file) {
ggsave(file, plot = MatSel(), device = "png")
}
)
### Results Tab ###
output$ResultsText <- renderUI({
if (values$DoneAssess == FALSE) {
h4(HTML("Model hasn't been fitted"), style = "color:red")
} else {
# fluidRow(
# h3("Heading"),
# p("Use the controls on the left to select ")
# , style="padding: 0px 0px 0px 15px;")
}
})
### Table of Estimates ###
GetEstimates <- reactive({
if (!values$DoneAssess) return("")
if (!"table" %in% input$pTypes) return("")
ModelFit <- doAssess()
# Results <- round(ModelFit@Ests,2)
Results <- matrix(c(ModelFit@SL50, ModelFit@SL95, ModelFit@FM, ModelFit@SPR),
ncol=4, byrow=FALSE)
# 95% confidence intervals #
CIlower <- Results[,1:4] - 1.96 * sqrt(ModelFit@Vars)
CIupper <- Results[,1:4] + 1.96 * sqrt(ModelFit@Vars)
CIlower[!is.finite(CIlower)] <- 0
CIupper[!is.finite(CIupper)] <- 0
CIlower[CIlower <0 ] <- 0
CIupper[CIupper <0 ] <- 0
# correct bounded parameters - dodgy I know!
CIlower[CIlower[,3]<0,3] <- 0
CIupper[CIupper[,4]>1,4] <- 1
CIlower[CIlower[,4]<0,4] <- 0
CIlower <- round(CIlower,2)
CIupper <- round(CIupper,2)
#chk <- is.finite(CIlower)
#if (any(!chk)) CIlower[!chk] <- 0
DF <- data.frame(Years=ModelFit@Years,
SPR=paste0(round(ModelFit@SPR, 2), " (", CIlower[,4], " - ", CIupper[,4], ")"),
SL50=paste0(round(ModelFit@SL50, 2), " (", CIlower[,1], " - ", CIupper[,1], ")"),
SL95=paste0(round(ModelFit@SL95, 2), " (", CIlower[,2], " - ", CIupper[,2], ")"),
FM=paste0(round(ModelFit@FM, 2), " (", CIlower[,3], " - ", CIupper[,3], ")"))
rownames(DF) <- 1:nrow(DF)# ModelFit@Years
names(DF) <- c('Years',
# 'M/K',
# 'Linf',
# 'L50',
# 'L95',
'SPR',
'SL50',
'SL95',
'F/M')
# 'Above Target?',
# 'Above Limit?')
if (input$smooth == "TRUE" & length(ModelFit@Years) > 1) {
Results <- as.data.frame(round(ModelFit@Ests,2))
DF$SPR <- Results$SPR
DF$SL50 <- Results$SL50
DF$SL95 <- Results$SL95
DF[,5] <- Results$FM
}
fitLog <- ModelFit@fitLog
if (any(fitLog > 0)) {
DF$Note <- rep("", nrow(DF))
ind <- which(names(DF) == "Note")
DF[which(fitLog == 1),ind] <- "Model did not converge"
DF[which(fitLog == 2),ind] <- "Estimated selectivity may be unrealistically high"
DF[which(fitLog == 3),ind] <- "Estimated F/M may be unrealistically high"
DF[which(fitLog == 4),ind] <- "Estimated selectivity and F/M may be unrealistically high"
}
DF
})
output$Estimates <- renderDataTable({
if (!values$DoneAssess) return("")
if (!"table" %in% input$pTypes) return("")
GetEstimates()
}, options=list(pageLength=-1, searching = FALSE, paging = FALSE,
ordering=FALSE, info=FALSE)
# }, options=list(pageLength=-1, searching = FALSE, paging = FALSE,
# ordering=FALSE, info=FALSE, rowCallback = I(
# 'function(row, data) {
# $("td", row).css("text-align", "center");
# }'))
)
output$downloadEsts <- renderUI({
if (!values$DoneAssess) return("")
if (!"table" %in% input$pTypes) return("")
downloadButton("dwnbuttonEsts", label = "Download", class = NULL)
})
AllPars <- reactive({
if (!values$DoneAssess) return("")
if (!"table" %in% input$pTypes) return("")
Linf <- getLinf() # as.numeric(input$L50) / as.numeric(input$relLinf)
DFAll <- GetEstimates()
DFAll$MK <- input$MK
DFAll$Linf <- Linf # getLinf() # input$Linf
DFAll$L50 <- input$L50
DFAll$L95 <- input$L95
DFAll$CVLinf <- getLB_pars()@CVLinf
DFAll$FecB <- getLB_pars()@FecB
DFAll$Mpow <- getLB_pars()@Mpow
DFAll$Smooth <- input$smooth
DFAll
})
output$dwnbuttonEsts <- downloadHandler(
filename = function() {
nm <- input$Species
nm <- gsub(" ", "", nm)
if (nchar(nm) <1) nm <- "MySpecies"
paste0(nm, '_Ests.csv')
},
content = function(file) {
write.table(AllPars(), file, sep=",", row.names=FALSE, col.names=TRUE)
}
)
output$TableHeader <- renderUI({
if (!values$DoneAssess) return("")
if (!"table" %in% input$pTypes) return("")
# HTML("Parameters: Input and Estimates")
HTML("Model Estimates (95% confidence intervals)")
})
### Plot SPR Circle ####
output$SPRCircle <- renderPlot({
if (!values$DoneAssess) return("")
if (!"spr" %in% input$pTypes) return("")
if (input$smooth == "TRUE") smooth=TRUE
if (input$smooth != "TRUE") smooth=FALSE
labcol <- input$labcol
if (labcol=="#FFFFFF") labcol <- NULL
plotSPRCirc(doAssess(), SPRTarg=input$sprtarg, SPRLim=input$sprlim,
useSmooth=smooth, bgcol=input$bgcol, limcol=input$limcol,
targcol=input$targcol, abtgcol=input$abtgcol,
labcol=labcol, labcex=input$labcex, texcex=input$texcex)
})
output$PSPRCirc <- renderUI({
if (!values$DoneAssess) return("")
if (!"spr" %in% input$pTypes) return("")
if (input$smooth == "TRUE") smooth=TRUE
if (input$smooth != "TRUE") smooth=FALSE
fluidRow(
h4("Estimated Spawning Potential and Reference Points"),
h5("Note: if multiple years, only the estimate from the last year is shown"),
plotOutput("SPRCircle"),
downloadButton("downloadSPRcirc2", label = "Download", class = NULL),
style="padding-top: 25px;")
})
plotOut1 <- function(){
if (input$smooth == "TRUE") smooth=TRUE
if (input$smooth != "TRUE") smooth=FALSE
labcol <- input$labcol
if (labcol=="#FFFFFF") labcol <- NULL
plotSPRCirc(doAssess(), SPRTarg=input$sprtarg, SPRLim=input$sprlim,
useSmooth=smooth, bgcol=input$bgcol, limcol=input$limcol,
targcol=input$targcol, abtgcol=input$abtgcol,
labcol=labcol, labcex=input$labcex, texcex=input$texcex)
}
output$downloadSPRcirc2 <- downloadHandler(
filename = function() {
nm <- input$Species
nm <- gsub(" ", "", nm)
if (nchar(nm) <1) nm <- "MySpecies"
paste0(nm, '_SPREst.png')
},
content = function(file) {
png(file)
plotOut1()
dev.off()
}
)
### Estimates over Time ###
output$YrEsts <- renderPlot({
plotOut2()
})
output$EstsByYear <- renderUI({
if (!values$DoneAssess) return("")
if (!"ests" %in% input$pTypes) return("")
if (getLB_lens()@NYears < 2) {
return(
fluidRow(
h4("Estimates by Year: only one year - plot not shown"),
style="padding-top: 25px;")
)
}
fluidRow(
h4("Estimates by Year (with 95% confidence intervals)"),
plotOutput("YrEsts", height="220px"),
downloadButton("dnloadEsts", label = "Download", class = NULL),
style="padding-top: 25px;")
})
plotOut2 <- function(){
if (!values$DoneAssess) return("")
if (!"ests" %in% input$pTypes) return("")
if (input$smooth == "TRUE") smooth <- TRUE
if (input$smooth != "TRUE") smooth <- FALSE
if (input$incL50 == "TRUE") incL50 <- TRUE
if (input$incL50 != "TRUE") incL50 <- FALSE
plotEsts(doAssess(), doSmooth=smooth, CIcol=input$CIcol, axCex=input$axCex,
labCex=input$labCex, ptCex=input$ptCex, incL50=incL50, L50col=input$L50col)
}
output$dnloadEsts <- downloadHandler(
filename = function() {
nm <- input$Species
nm <- gsub(" ", "", nm)
if (nchar(nm) <1) nm <- "MySpecies"
paste0(nm, '_YrEsts.png')
},
content = function(file) {
png(file, width=900, height=550)
plotOut2()
dev.off()
}
)
})
| /inst/shiny_apps/LBSPR/server.r | no_license | Udhy/LBSPR | R | false | false | 29,455 | r | libs <- c("shiny", "shinyBS", "Hmisc", "xtable", 'colourpicker')
chk <- !libs %in% installed.packages()
inst <- lapply(libs[chk], install.packages)
library(shiny)
library(shinyBS)
library(LBSPR)
library(Hmisc)
library(xtable)
shinyServer(function(input, output, clientData, session) {
values <- reactiveValues(useDF=FALSE, default=NULL,useExamp=FALSE,
ShowResults=FALSE, AssessReady=FALSE, DoneAssess=FALSE)
observeEvent(input$binswidth, {
values$ShowResults <- FALSE
values$AssessReady <- FALSE
values$DoneAssess <- FALSE
})
# If any biological parameters are changed, the assessment is reset
observeEvent(input$MK, {
values$ShowResults <- FALSE
values$AssessReady <- FALSE
values$DoneAssess <- FALSE
})
observeEvent(input$relLinf, {
values$ShowResults <- FALSE
values$AssessReady <- FALSE
values$DoneAssess <- FALSE
})
observeEvent(input$SetLinf, {
values$ShowResults <- FALSE
values$AssessReady <- FALSE
values$DoneAssess <- FALSE
})
observeEvent(input$L50, {
values$ShowResults <- FALSE
values$AssessReady <- FALSE
values$DoneAssess <- FALSE
})
observeEvent(input$L95, {
values$ShowResults <- FALSE
values$AssessReady <- FALSE
values$DoneAssess <- FALSE
})
# Observe Events
observeEvent(input$defPars, {
values$useDF <- TRUE
values$default <- c(1.5, 100, 66, 70, 0.66)
})
observeEvent(input$exampData, {
values$ShowResults <- FALSE
values$AssessReady <- FALSE
values$DoneAssess <- FALSE
})
observeEvent(input$exmpData, {
values$useExamp <- TRUE
values$ShowResults <- FALSE
values$AssessReady <- FALSE
values$DoneAssess <- FALSE
})
observeEvent(input$file1, {
values$useExamp <- FALSE
values$ShowResults <- FALSE
values$AssessReady <- FALSE
values$DoneAssess <- FALSE
})
observeEvent(input$assessReady, {
if(input$assessReady == 0) return(NULL)
if (!is.null(data())) values$AssessReady <- TRUE
})
observeEvent(input$goAssess, {
if(input$goAssess == 0) return(NULL)
values$ShowResults <- TRUE
})
# Tool Tips
addTooltip(session, id = "file1", title = "CSV or text file only",
placement = "right", trigger = "hover")
addTooltip(session, id="header", title = "Does the first row contain labels?",
placement = "right", trigger = "hover")
addTooltip(session, id="smooth", title = "Only used if more than one year",
placement = "right", trigger = "hover")
## Alerts ##
observe({
# Pars <- Lens <- NULL
# ParsOK <- FALSE
# if(chkFileUp() & is.null(UpLoadMSG())) Pars <- getLB_pars()
# if(chkFileUp() & is.null(UpLoadMSG())) Lens <- getLB_lens()
# if (class(Pars)!="NULL") {
# if(length(Pars@Linf)>0 & is.finite(Pars@Linf)) ParsOK <- TRUE
# }
# if (ParsOK & class(Lens) !="NULL") {
# if (Pars@Linf < max(Lens@LMids)) {
# createAlert(session, "LMidsErr", "lmidserr", title = "Error",
# content=HTML(paste0(tags$i("L"), tags$sub(HTML("∞")),
# " must be larger than maximum length bin (", max(Lens@LMids), ")")), append=FALSE)
# } else {
# closeAlert(session, "lmidserr")
# }
# }
# if (!values$ShowResults) return(NULL)
MK <- as.numeric(input$MK)
Linf <- getLinf() # as.numeric(input$L50) / as.numeric(input$relLinf)
# Linf <- getLinf() # as.numeric(input$Linf)
L50 <- as.numeric(input$L50)
L95 <- as.numeric(input$L95)
relLinf <- as.numeric(input$relLinf)
if (length(MK) <1) MK <- NA
if (length(Linf)<1) Linf <- NA
if (length(L50)<1) L50 <- NA
if (length(L95)<1) L95 <- NA
if (length(relLinf)<1) relLinf <- NA
if (is.null(MK)) MK <- NA
if (is.null(Linf)) Linf <- NA
if (is.null(L50)) L50 <- NA
if (is.null(L95)) L95 <- NA
if (is.null(relLinf)) relLinf <- NA
# Errors #
if (input$sprtarg <= input$sprlim) {
createAlert(session, "refalert", "refs", title = "Error",
content = "SPR Limit must be less than SPR Target", append = FALSE)
} else {
closeAlert(session, "refs")
}
chk <- any(c(MK, Linf, L50, L95, relLinf)<0)
if (all(is.na(chk))) doChk <- FALSE
if (!all(is.na(chk))) {
doChk <- chk
}
if (doChk) {
createAlert(session, "NegVals", "negvals", title = "Error",
content="Negative values don't make much sense!", append=FALSE)
} else {
closeAlert(session, "negvals")
}
if (!is.na(L95) & !is.na(L50)) {
if (L95 <= L50) {
createAlert(session, "lmalert", "lmpar", title = "Error",
content = HTML(paste0(tags$i("L", tags$sub("50")), " must be less than ",
tags$i("L", tags$sub("95")))),
append = FALSE)
} else {
closeAlert(session, "lmpar")
}
}
# Warnings
if (!is.na(L95) & !is.na(L50) & !is.na(Linf)) {
if (L50 >= Linf | L95 >= Linf) {
createAlert(session, "lmalert2", "lmpar2", title = "Error",
content = HTML(paste0("Maturity parameters are higher than ", tags$i("L"), tags$sub(HTML("∞")))),
append = FALSE)
} else {
closeAlert(session, "lmpar2")
}
}
if (!is.na(Linf) & Linf < 10) {
createAlert(session, "linfalert", "linf1", title = "Warning",
content = HTML(paste0("Are you sure ", tags$i("L"), tags$sub(HTML("∞")), " is so low?")),
append = FALSE)
} else {
closeAlert(session, "linf1")
}
if (!is.na(Linf) & is.null(UpLoadMSG()) & input$dataType == "freq") {
Lens <- getLB_lens()
if (Linf > max(Lens@LMids)) {
createAlert(session, "linfalert3", "linf2", title = "Error",
content = HTML(paste0("Maximum length bin (", round(max(Lens@LMids),2),
") must be greater than ", tags$i("L"), tags$sub(HTML("∞")))),
append = FALSE)
} else {
closeAlert(session, "linf2")
}
}
if (!is.na(MK) & (MK < 0.2 | MK > 6)) {
createAlert(session, "mkalert", "mk1", title = "Warning",
content = HTML(paste0("Are you sure of the ", tags$i("M/K"), " ratio? Model may not perform well
at extreme values")),
append = FALSE)
} else {
closeAlert(session, "mk1")
}
if (!is.na(relLinf) & (relLinf >=1 | relLinf <=0)) {
createAlert(session, "RelLinferr", "relLinferr", title = "Error",
content = HTML(paste0("Relative size at maturity must be between 0 and 1")),
append=FALSE)
} else {
closeAlert(session, "relLinferr")
}
templen <- NULL
if (chkPars()) templen <- getLB_lens()
if (class(templen) != "NULL") {
if (!is.na(Linf) & Linf > max(templen@LMids)) {
createAlert(session, "linfalert2", "linf2", title = "Error",
content = HTML(paste0(tags$i("L"), tags$sub(HTML("∞")), "(", Linf, ") must be lower than the largest length bin (", max(templen@LMids), ")")),
append = FALSE)
} else {
closeAlert(session, "linf2")
}
}
# Add checks for all parameters
})
#############################
### Read in CSV file ###
### Check that data is ok ###
#############################
ExampleDataFile <- reactive({
switch(input$exampData,
rawSingHead = "../../LRaw_SingYrHead.csv",
rawSing = "../../LRaw_SingYr.csv",
rawMultiHead = "../../LRaw_MultiYrHead.csv",
rawMulti = "../../LRaw_MultiYr.csv",
freqSingHead = "../../LFreq_SingYrHead.csv",
freqSing = "../../LFreq_SingYr.csv",
freqMultiHead = "../../LFreq_MultiYrHead.csv",
freqMulti = "../../LFreq_MultiYr.csv")
})
output$downloadExample <- renderUI({
if (!is.null(data()) & values$useExamp) {
fluidRow(
h5(strong("Download Example File")),
downloadButton("dnlData", label = "Download", class = NULL)
, style="padding: 5px 15px;")
}
})
output$dnlData <- downloadHandler(
filename = function() {
nm <- ExampleDataFile()
nm <- gsub("data/", "", nm)
nm <- gsub('.csv', "", nm)
paste(nm, '.csv', sep='')
},
content = function(file) {
write.table(data(), file, sep=",", row.names=FALSE, col.names=FALSE)
}
)
data <- reactive({
if (values$useExamp) {
read.csv(ExampleDataFile(), header = input$header,
sep = input$sep,
stringsAsFactors=FALSE, check.names=FALSE)
} else {
file1 <- input$file1
if (is.null(file1)) return(NULL)
dat <- read.csv(file1$datapath, header = input$header,
sep = input$sep, stringsAsFactors=FALSE, check.names=FALSE)
if (class(dat) == "data.frame" | class(dat) == "matrix") {
if (ncol(dat) > 1) {
chkNAs <- apply(dat, 2, is.na) # check NAs
dat <- dat[!apply(chkNAs, 1, prod),, drop=FALSE]
dat <- dat[,!apply(chkNAs, 2, prod), drop=FALSE]
}
}
if (class(dat) == "numeric" | class(dat) == "integer") {
dat <- dat[!is.na(dat)]
}
dat
}
})
# Check - has file been uploaded?
chkFileUp <- reactive({
if(is.null(data())) return(FALSE)
return(TRUE)
})
chkSep <- reactive({
if(is.null(data())) return(TRUE)
lendat <- as.matrix(data())
ind1 <- any(grepl(";", lendat))
ind2 <- any(grepl(",", lendat))
if (ind1) return(FALSE)
if (ind2) return(FALSE)
return(TRUE)
})
chkText <- reactive({
if(is.null(data())) return(FALSE)
if(chkFileUp() == FALSE) return(FALSE)
if(!chkSep()) return(TRUE)
if(class(data()) == "character") return(TRUE)
if(class(data()[1,1]) == "character") return(TRUE)
FALSE
})
UpLoadMSG <- reactive({
msg1 <- msg2 <- msg3 <- msg4 <- msg5 <- msg6 <- NULL
if(chkFileUp() == FALSE) msg1 <- "Please upload a CSV data file"
if(!chkSep()) msg6 <- "Check File Separator"
if(chkFileUp() == TRUE & chkSep()) {
if(chkFreq() & input$dataType == "raw") {
msg2 <- "It looks like you've uploaded length frequencies? Please change Data Type"
}
if(!chkFreq() & input$dataType == "freq") {
msg3 <- "It looks like you've uploaded length measurements? Please change Data Type"
}
if(chkHeader() & !input$header) {
msg4 <- "It looks like the file has a header row? Please check Header box"
}
if(chkText()) {
msg5 <- "Text in the data file. Do you have a header?"
}
}
out <- c(msg1, msg2, msg3, msg4, msg5, msg6)
out
})
output$UpLoadText <- renderUI({
out <- UpLoadMSG()
out <- paste(out, collapse="<br/>")
HTML(out)
})
chkFreq <- reactive({ # Check if data appears to be length frequencies
if(!chkFileUp()) return(NULL)
if(!chkSep()) return(NULL)
lendat <- as.matrix(data())
if (ncol(lendat) == 1) return (FALSE)
fst <- lendat[,1]
fst <- fst[is.finite(fst)]
if (all(diff(fst) == median(diff(fst)))) return(TRUE)
FALSE
})
chkHeader <- reactive({ # Check if there appears to be a header
if(!chkFileUp()) return(NULL)
if(!chkSep()) return(NULL)
if(input$header) return(TRUE)
lendat <- as.matrix(data())
topRow <- lendat[1,, drop=FALSE]
if (class(topRow) == "character") return(TRUE)
if (chkFreq() & is.na(topRow[1])) return(TRUE)
lendat <- as.matrix(lendat)
topRow <- as.numeric(lendat[1,, drop=FALSE])
if (!chkFreq()) {
if (ncol(lendat) > 1) {
if (all(diff(topRow) == 1)) return(TRUE)
if (all(topRow > 1900 & topRow < 2100)) return(TRUE)
}
if (ncol(lendat) == 1) {
if (topRow[1] > 1900 & topRow[1] < 2100) return(TRUE)
}
}
FALSE
})
chkMulitYear <- reactive({ # Check if there are multiple years
if(!chkFileUp()) return(NULL)
if(!chkSep()) return(NULL)
lendat <- as.matrix(data())
Ncol <- ncol(lendat)
if(chkFreq() & Ncol > 2) return(TRUE)
if(!chkFreq() & Ncol > 1) return(TRUE)
FALSE
})
output$FileTable <- renderDataTable({
if(!chkFileUp()) return(NULL)
if(values$useExamp) {
DF <- data.frame(Filename=ExampleDataFile(),
DataType=ifelse(chkFreq(), "Frequency", "Raw"),
Header=chkHeader(),
MultiYear=chkMulitYear())
} else {
DF <- data.frame(Filename=input$file1$name,
DataType=ifelse(chkFreq(), "Frequency", "Raw"),
Header=chkHeader(),
MultiYear=chkMulitYear())
}
return(DF)
}, options=list(pageLength=-1, searching = FALSE, paging = FALSE,
ordering=FALSE, info=FALSE, rowCallback = I(
'function(row, data) {
$("td", row).css("text-align", "center");
}'
)))
output$metadata <- renderUI({
if(values$useExamp) return()
if(!chkFileUp()) return()
HTML(paste(h3("Check the Uploaded File"),
p("Does everything look right?"),
h4("File Metadata")))
})
output$topdata <- renderDataTable({
# Print out first 6 observations
if(!chkFileUp()) return(NULL)
dat <- data()
if (input$header == TRUE) {
innames <- colnames(dat)
if (input$dataType == "freq") {
if (ncol(dat) >1) {
# innames[1] <- "Length.Bins"
# innames[2:ncol(dat)] <- gsub("X", "", innames[2:ncol(dat)])
# colnames(dat) <- innames
}
}
if (input$dataType == "raw") {
innames <- gsub("X", "", innames)
colnames(dat) <- innames
}
}
if (input$header == FALSE & input$dataType == "freq") {
# if (ncol(dat) >1) {
# colnames(dat)[1] <- "Length.Bins"
# colnames(dat)[2:length(colnames(dat))] <- 1:(length(colnames(dat))-1)
# }
}
head(dat)
}, options=list(pageLength=-1, searching = FALSE, paging = FALSE,
ordering=FALSE, info=FALSE)
)
output$fileContents <- renderUI({
if(!chkFileUp()) return()
HTML(paste(h4("File contents"),
p("This shows the first six rows of your data file. All numbers below the bold black heading should be your length data. If you have multiple years of data, they should appear in seperate columns in the table below. ")
))
})
###################
## Fit Model Tab ##
###################
# observeEvent(input$relLinf, {
# observe({
# myval <- as.numeric(input$L50) / as.numeric(input$relLinf)
# # myval <- round(myval, 5)
# if (length(myval)>0) updateTextInput(session, "Linf", value = myval)
# })
# observeEvent(input$Linf, {
# if (length(as.numeric(input$relLinf)<1) & length(as.numeric(input$Linf))>0) {
# myval <- as.numeric(input$L50) / as.numeric(input$Linf)
# # myval <- round(myval, 2)
# if (!is.na(myval)) {
# updateTextInput(session, "relLinf", value = myval)
# }
# }
# }
# # )
getLinf <- reactive({
if (input$dorelLinf == TRUE) {
tryLinf <- as.numeric(input$L50) / as.numeric(input$relLinf)
if (length(tryLinf)>0) {
if(is.na(tryLinf)) return(NULL)
return(tryLinf)
# return(round(tryLinf,2))
}
} else {
return(as.numeric(input$SetLinf))
}
})
# output$CurrLinf <- renderText(paste0("Linf = ", getLinf()))
output$CurrLinf <- renderUI({
myLinf <- getLinf()
if(!is.numeric(myLinf)) return("")
myLinf <- round(getLinf(),2)
HTML(paste0(tags$i("L"), tags$sub(HTML("∞"))), "=", myLinf)
})
output$InputPars <- renderUI({
times <- input$defPars
# MKVal <- 1.5 #""
# relLinfVal <- 0.66 #""
# LinfVal <- ""
# L50Val <- 66 #""
# L95Val <- 70 #""
# disLinf <- getLinf() # round(as.numeric(input$L50) / as.numeric(input$relLinf),2)
# if (is.numeric(disLinf)) disLinf <- round(disLinf,2)
# if (values$useDF) {
# MKVal <- values$default[1]
# LinfVal <- values$default[2]
# L50Val <- values$default[3]
# L95Val <- values$default[4]
# relLinfVal <- values$default[5]
# }
div(id=letters[(times %% length(letters)) + 1],
h4("Life history ratios"),
fluidRow(
column(6,
textInput("MK", label=tags$i("M/K ratio"), value=1.5)
),
conditionalPanel(condition="input.dorelLinf == 'TRUE'",
column(6,
textInput("relLinf", label = HTML(paste0(tags$i("L", tags$sub("50")), "/",tags$i("L"), tags$sub(HTML("∞")))), value=0.66))
),
# sliderInput("relLinf", label = HTML(paste0(tags$i("L", tags$sub("50")), "/",tags$i("L"), tags$sub(HTML("∞")))),
# min=0.0, max=1, step=0.01, value=input$relLinf))),
column(6,
conditionalPanel(condition="input.dorelLinf == 'FALSE'",
textInput("SetLinf", label = HTML(paste0(tags$i("L"), tags$sub(HTML("∞")))), value=100))
)),
h4("Length-at-Maturity"),
fluidRow(
column(6,
textInput("L50", label = tags$i(HTML(paste0("L", tags$sub("50")))), value=66)
),
column(6,
textInput("L95", label = tags$i(HTML(paste0("L", tags$sub("95")))), value=70)
))
)
})
output$HistControl <- renderUI({
if (!chkFileUp()) return(NULL)
if (!chkFileUp()) return(NULL)
if(!is.null(UpLoadMSG())) return(NULL)
if (input$dataType != "raw") return(NULL)
dat <- data()
Min <- round(min(dat, na.rm=TRUE),0)
Max <- ceiling(max(dat, na.rm=TRUE)/5)*5
Start <- floor(max(1/20 * Max)/5) * 5
Max <- ceiling((max(dat, na.rm=TRUE)/10)/5)*5
sliderInput("binswidth","Width of length bins:", min = 1, max = Max, value = Start)
})
getLB_pars <- reactive({
# print(values$ShowResults)
# if (!values$ShowResults) return(NULL)
LB_pars <- new("LB_pars", verbose=FALSE)
linf <- getLinf()
if (class(linf) == "NULL") return(NULL)
LB_pars@Linf <- linf # as.numeric(input$L50) / as.numeric(input$relLinf) # as.numeric(input$Linf)
LB_pars@L50 <- as.numeric(input$L50)
LB_pars@L95 <- as.numeric(input$L95)
LB_pars@MK <- as.numeric(input$MK)
LB_pars@Species <- input$Species
LB_pars@L_units <- input$Lunits
binwidth <- input$binswidth
LB_pars@BinWidth <- ifelse(is.null(binwidth), 5, binwidth)
LB_pars
})
getLB_lens <- reactive({
if (!chkFileUp()) return(NULL)
if (!chkSep()) return(NULL)
if(!is.null(UpLoadMSG())) return(NULL)
dat <- data()
dat <- as.matrix(data())
LB_pars <- getLB_pars()
if (class(LB_pars) == "NULL") return(NULL)
if (class(LB_pars) != "LB_pars") return(NULL)
LB_lengths <- new("LB_lengths", file=dat, LB_pars=LB_pars, dataType=input$dataType, verbose=FALSE)
LB_lengths
})
chkPars <- reactive({ # Are all input parameters entered?
Linf <- getLinf() # as.numeric(input$L50) / as.numeric(input$relLinf)
pars <- as.numeric(c(input$MK, Linf, input$L50, input$L95, input$relLinf))
if(any(!is.finite(pars))) return(FALSE)
if (any(pars <=0)) return(FALSE)
if(length(pars)<1) return(FALSE)
if (pars[4] <= pars[3]) return(FALSE)
if (pars[5] <=0 | pars[5] >=1) return(FALSE)
#Pars <- getLB_pars()
Lens <- getLB_lens()
if (class(Lens) != "LB_lengths") return(FALSE)
if (pars[3] >= Linf | pars[4] >= Linf) return(FALSE)
if (Linf > max(Lens@LMids)) return(FALSE)
TRUE
})
output$ValidPars <- renderText({
if(chkText()) return("")
if (!chkPars()) return("Invalid input parameters")
if (chkPars()) return("")
})
output$ValidData <- renderText({
if(!is.null(UpLoadMSG())) return("No valid data file")
if(is.null(UpLoadMSG())) return("")
})
MakeHist <- reactive({
if(chkText()) return(NULL)
if(!chkFileUp()) return(NULL)
if(!chkPars()) return(NULL)
if(!is.null(UpLoadMSG())) return(NULL)
if (values$AssessReady & !values$ShowResults) return(plotSize(getLB_lens()))
if (values$AssessReady & values$ShowResults) return(plotSize(doAssess()))
})
output$DatHistPlot <- renderPlot({
if(chkText()) return(NULL)
if(!chkFileUp()) return(NULL)
if(!chkPars()) return(NULL)
if(!is.null(UpLoadMSG())) return(NULL)
histdat <- MakeHist()
# need to add check here if model fails to converge
print(histdat)
})
MatSel <- reactive({
if(chkText()) return(NULL)
if(!chkFileUp()) return(NULL)
if(!chkPars()) return(NULL)
if(!is.null(UpLoadMSG())) return(NULL)
if (values$AssessReady & !values$ShowResults) {
LBobj <- new("LB_obj")
LB_lengths <- getLB_lens()
LB_pars <- getLB_pars()
Slots <- slotNames(LB_lengths)
for (X in 1:length(Slots)) slot(LBobj, Slots[X]) <- slot(LB_lengths, Slots[X])
Slots <- slotNames(LB_pars)
for (X in 1:length(Slots)) slot(LBobj, Slots[X]) <- slot(LB_pars, Slots[X])
LBobj@LMids[1] <- 0 # hack to make maturity curve start at zero
return(plotMat(LBobj))
}
if (values$AssessReady & values$ShowResults) return(plotMat(doAssess()))
})
output$DatMat <- renderPlot({
if(chkText()) return(NULL)
if(!chkFileUp()) return(NULL)
if(!chkPars()) return(NULL)
if(!is.null(UpLoadMSG())) return(NULL)
try(print(MatSel()))
})
## Fit the Model ##
# Run the LBSPR assessment routine
doAssess <- reactive({
if(chkText()) return(NULL)
if(!chkFileUp()) return(NULL)
if (!values$ShowResults) return(NULL)
if (values$ShowResults) {
values$DoneAssess <- TRUE
lens <- getLB_lens()
fitmod <- try(LBSPRfit(getLB_pars(), getLB_lens(), useCPP = TRUE))
if (class(fitmod) == "LB_obj") return(fitmod)
if (class(fitmod) != "LB_obj") {
values$DoneAssess <- FALSE
return(FALSE)
}
}
})
output$clickAssess <- renderUI({
# if (!values$AssessReady) return("")
if(chkText()) return("")
if (!chkPars()) return("")
if(!is.null(UpLoadMSG())) return("")
if (values$AssessReady) {
fluidRow(
h4("Ready to Fit Model"),
actionButton("goAssess", "Fit Model", icon("line-chart"), style="color: #fff; background-color: #00B700; border-color: #006D00")
, style="padding: 15px 15px 15px 15px;")
} else {
fluidRow(
h4("Plot the Data"),
actionButton("assessReady", "Plot Data", icon("area-chart"), style="color: #fff; background-color: #337ab7; border-color: #2e6da4")
,style="padding: 15px 15px 15px 15px;")
}
})
output$Histogram <- renderUI({
if(!is.null(UpLoadMSG())) return("")
if(is.null(UpLoadMSG()) & values$AssessReady & chkPars()) {
fluidRow(
h4("Histogram of Length Data"),
plotOutput("DatHistPlot"),
downloadButton("dnloadSize", label = "Download", class = NULL),
style="padding-top: 25px;")
}
})
output$dnloadSize <- downloadHandler(
filename = function() {
nm <- input$Species
nm <- gsub(" ", "", nm)
if (nchar(nm) <1) nm <- "MySpecies"
paste0(nm, '_SizeDist.png')
},
content = function(file) {
ggsave(file, plot = MakeHist(), device = "png")
}
)
output$MatSelPlot <- renderUI({
if(is.null(UpLoadMSG()) & values$AssessReady & chkPars()) {
fluidRow(
h4("Maturity-at-Length"),
plotOutput("DatMat"),
downloadButton("dnloadMat", label = "Download", class = NULL),
style="padding-top: 25px;")
}
})
output$dnloadMat <- downloadHandler(
filename = function() {
nm <- input$Species
nm <- gsub(" ", "", nm)
if (nchar(nm) <1) nm <- "MySpecies"
paste0(nm, '_MatSel.png')
},
content = function(file) {
ggsave(file, plot = MatSel(), device = "png")
}
)
### Results Tab ###
output$ResultsText <- renderUI({
if (values$DoneAssess == FALSE) {
h4(HTML("Model hasn't been fitted"), style = "color:red")
} else {
# fluidRow(
# h3("Heading"),
# p("Use the controls on the left to select ")
# , style="padding: 0px 0px 0px 15px;")
}
})
### Table of Estimates ###
GetEstimates <- reactive({
if (!values$DoneAssess) return("")
if (!"table" %in% input$pTypes) return("")
ModelFit <- doAssess()
# Results <- round(ModelFit@Ests,2)
Results <- matrix(c(ModelFit@SL50, ModelFit@SL95, ModelFit@FM, ModelFit@SPR),
ncol=4, byrow=FALSE)
# 95% confidence intervals #
CIlower <- Results[,1:4] - 1.96 * sqrt(ModelFit@Vars)
CIupper <- Results[,1:4] + 1.96 * sqrt(ModelFit@Vars)
CIlower[!is.finite(CIlower)] <- 0
CIupper[!is.finite(CIupper)] <- 0
CIlower[CIlower <0 ] <- 0
CIupper[CIupper <0 ] <- 0
# correct bounded parameters - dodgy I know!
CIlower[CIlower[,3]<0,3] <- 0
CIupper[CIupper[,4]>1,4] <- 1
CIlower[CIlower[,4]<0,4] <- 0
CIlower <- round(CIlower,2)
CIupper <- round(CIupper,2)
#chk <- is.finite(CIlower)
#if (any(!chk)) CIlower[!chk] <- 0
DF <- data.frame(Years=ModelFit@Years,
SPR=paste0(round(ModelFit@SPR, 2), " (", CIlower[,4], " - ", CIupper[,4], ")"),
SL50=paste0(round(ModelFit@SL50, 2), " (", CIlower[,1], " - ", CIupper[,1], ")"),
SL95=paste0(round(ModelFit@SL95, 2), " (", CIlower[,2], " - ", CIupper[,2], ")"),
FM=paste0(round(ModelFit@FM, 2), " (", CIlower[,3], " - ", CIupper[,3], ")"))
rownames(DF) <- 1:nrow(DF)# ModelFit@Years
names(DF) <- c('Years',
# 'M/K',
# 'Linf',
# 'L50',
# 'L95',
'SPR',
'SL50',
'SL95',
'F/M')
# 'Above Target?',
# 'Above Limit?')
if (input$smooth == "TRUE" & length(ModelFit@Years) > 1) {
Results <- as.data.frame(round(ModelFit@Ests,2))
DF$SPR <- Results$SPR
DF$SL50 <- Results$SL50
DF$SL95 <- Results$SL95
DF[,5] <- Results$FM
}
fitLog <- ModelFit@fitLog
if (any(fitLog > 0)) {
DF$Note <- rep("", nrow(DF))
ind <- which(names(DF) == "Note")
DF[which(fitLog == 1),ind] <- "Model did not converge"
DF[which(fitLog == 2),ind] <- "Estimated selectivity may be unrealistically high"
DF[which(fitLog == 3),ind] <- "Estimated F/M may be unrealistically high"
DF[which(fitLog == 4),ind] <- "Estimated selectivity and F/M may be unrealistically high"
}
DF
})
output$Estimates <- renderDataTable({
if (!values$DoneAssess) return("")
if (!"table" %in% input$pTypes) return("")
GetEstimates()
}, options=list(pageLength=-1, searching = FALSE, paging = FALSE,
ordering=FALSE, info=FALSE)
# }, options=list(pageLength=-1, searching = FALSE, paging = FALSE,
# ordering=FALSE, info=FALSE, rowCallback = I(
# 'function(row, data) {
# $("td", row).css("text-align", "center");
# }'))
)
output$downloadEsts <- renderUI({
if (!values$DoneAssess) return("")
if (!"table" %in% input$pTypes) return("")
downloadButton("dwnbuttonEsts", label = "Download", class = NULL)
})
AllPars <- reactive({
if (!values$DoneAssess) return("")
if (!"table" %in% input$pTypes) return("")
Linf <- getLinf() # as.numeric(input$L50) / as.numeric(input$relLinf)
DFAll <- GetEstimates()
DFAll$MK <- input$MK
DFAll$Linf <- Linf # getLinf() # input$Linf
DFAll$L50 <- input$L50
DFAll$L95 <- input$L95
DFAll$CVLinf <- getLB_pars()@CVLinf
DFAll$FecB <- getLB_pars()@FecB
DFAll$Mpow <- getLB_pars()@Mpow
DFAll$Smooth <- input$smooth
DFAll
})
output$dwnbuttonEsts <- downloadHandler(
filename = function() {
nm <- input$Species
nm <- gsub(" ", "", nm)
if (nchar(nm) <1) nm <- "MySpecies"
paste0(nm, '_Ests.csv')
},
content = function(file) {
write.table(AllPars(), file, sep=",", row.names=FALSE, col.names=TRUE)
}
)
output$TableHeader <- renderUI({
if (!values$DoneAssess) return("")
if (!"table" %in% input$pTypes) return("")
# HTML("Parameters: Input and Estimates")
HTML("Model Estimates (95% confidence intervals)")
})
### Plot SPR Circle ####
output$SPRCircle <- renderPlot({
if (!values$DoneAssess) return("")
if (!"spr" %in% input$pTypes) return("")
if (input$smooth == "TRUE") smooth=TRUE
if (input$smooth != "TRUE") smooth=FALSE
labcol <- input$labcol
if (labcol=="#FFFFFF") labcol <- NULL
plotSPRCirc(doAssess(), SPRTarg=input$sprtarg, SPRLim=input$sprlim,
useSmooth=smooth, bgcol=input$bgcol, limcol=input$limcol,
targcol=input$targcol, abtgcol=input$abtgcol,
labcol=labcol, labcex=input$labcex, texcex=input$texcex)
})
output$PSPRCirc <- renderUI({
if (!values$DoneAssess) return("")
if (!"spr" %in% input$pTypes) return("")
if (input$smooth == "TRUE") smooth=TRUE
if (input$smooth != "TRUE") smooth=FALSE
fluidRow(
h4("Estimated Spawning Potential and Reference Points"),
h5("Note: if multiple years, only the estimate from the last year is shown"),
plotOutput("SPRCircle"),
downloadButton("downloadSPRcirc2", label = "Download", class = NULL),
style="padding-top: 25px;")
})
plotOut1 <- function(){
if (input$smooth == "TRUE") smooth=TRUE
if (input$smooth != "TRUE") smooth=FALSE
labcol <- input$labcol
if (labcol=="#FFFFFF") labcol <- NULL
plotSPRCirc(doAssess(), SPRTarg=input$sprtarg, SPRLim=input$sprlim,
useSmooth=smooth, bgcol=input$bgcol, limcol=input$limcol,
targcol=input$targcol, abtgcol=input$abtgcol,
labcol=labcol, labcex=input$labcex, texcex=input$texcex)
}
output$downloadSPRcirc2 <- downloadHandler(
filename = function() {
nm <- input$Species
nm <- gsub(" ", "", nm)
if (nchar(nm) <1) nm <- "MySpecies"
paste0(nm, '_SPREst.png')
},
content = function(file) {
png(file)
plotOut1()
dev.off()
}
)
### Estimates over Time ###
output$YrEsts <- renderPlot({
plotOut2()
})
output$EstsByYear <- renderUI({
if (!values$DoneAssess) return("")
if (!"ests" %in% input$pTypes) return("")
if (getLB_lens()@NYears < 2) {
return(
fluidRow(
h4("Estimates by Year: only one year - plot not shown"),
style="padding-top: 25px;")
)
}
fluidRow(
h4("Estimates by Year (with 95% confidence intervals)"),
plotOutput("YrEsts", height="220px"),
downloadButton("dnloadEsts", label = "Download", class = NULL),
style="padding-top: 25px;")
})
plotOut2 <- function(){
if (!values$DoneAssess) return("")
if (!"ests" %in% input$pTypes) return("")
if (input$smooth == "TRUE") smooth <- TRUE
if (input$smooth != "TRUE") smooth <- FALSE
if (input$incL50 == "TRUE") incL50 <- TRUE
if (input$incL50 != "TRUE") incL50 <- FALSE
plotEsts(doAssess(), doSmooth=smooth, CIcol=input$CIcol, axCex=input$axCex,
labCex=input$labCex, ptCex=input$ptCex, incL50=incL50, L50col=input$L50col)
}
output$dnloadEsts <- downloadHandler(
filename = function() {
nm <- input$Species
nm <- gsub(" ", "", nm)
if (nchar(nm) <1) nm <- "MySpecies"
paste0(nm, '_YrEsts.png')
},
content = function(file) {
png(file, width=900, height=550)
plotOut2()
dev.off()
}
)
})
|
library(RSQLite)
library(DBI)
#' There is a fair amount of denormalization in the tables below. We are doing this as a computational expedient. This structure is
#' for demonstration, education and entertainment only.
#' SQLite is such a pain in issuing multiple statements.
#' This is an adaptabion of the SQL provision script with each call wrapped in dbExecute. This is no longer flexible to the provision
#' paraemters. You just have to run several calls. Limited time here.
db_con<-dbConnect(SQLite(),'big_long.sqlite')
dbExecute(db_con,"DROP TABLE IF EXISTS tbl_segment")
dbExecute(db_con,"DROP TABLE IF EXISTS tbl_player")
dbExecute(db_con,"DROP TABLE IF EXISTS tbl_policyholder_experiece")
dbExecute(db_con,"DROP TABLE IF EXISTS tbl_player_experience")
dbExecute(db_con,"
CREATE TABLE
tbl_segment(
name TEXT NOT NULL PRIMARY KEY
, compare_alpha REAL
, compare_beta REAL
, compare_trend REAL
, freq_shape REAL
, freq_scale REAL
, freq_trend REAL
, sev_shape REAL
, sev_scale REAL
, sev_trend REAL
, expected_freq REAL
, expected_severity REAL
, expected_cost REAL
)")
dbExecute(db_con,"
CREATE TABLE
tbl_policyholder(
id INTEGER NOT NULL PRIMARY KEY
, segment_name TEXT NOT NULL
, expected_cost REAL NOT NULL
, compare REAL NOT NULL
, frequency REAL NOT NULL
, severity REAL NOT NULL
)")
dbExecute(db_con,"
CREATE TABLE
tbl_player(
name TEXT NOT NULL PRIMARY KEY
, bot INTEGER NOT NULL
, default_rate_change REAL
, hist_include REAL
, attenuation REAL
, cap_increase REAL
, cap_decrease REAL
)")
dbExecute(db_con,"
CREATE TABLE
tbl_policyholder_experience(
round_num INTEGER NOT NULL
, policyholder_id INTEGER NOT NULL
, segment_name TEXT NOT NULL
, expected_cost REAL NOT NULL
, compare REAL NOT NULL
, observed_claims REAL NOT NULL
, observed_cost REAL NOT NULL
, compared REAL NOT NULL
, written_by REAL
, income REAL
, written_premium REAL
, PRIMARY KEY (round_num, policyholder_id)
)")
dbExecute(db_con,"
CREATE TABLE
tbl_player_experience (
player_name TEXT NOT NULL
, segment_name TEXT NOT NULL
, round_num INTEGER NOT NULL
, prior_offer_premium REAL
, historical_cost REAL
, historical_premium REAL
, indicated_pure_premium REAL
, indicated_change REAL
, default_rate_change REAL
, rate_change REAL
, offer_premium REAL NOT NULL
, PRIMARY KEY (player_name, segment_name, round_num)
)")
| /provision_db.R | no_license | jimscratch/shiny_big_long | R | false | false | 2,569 | r | library(RSQLite)
library(DBI)
#' There is a fair amount of denormalization in the tables below. We are doing this as a computational expedient. This structure is
#' for demonstration, education and entertainment only.
#' SQLite is such a pain in issuing multiple statements.
#' This is an adaptabion of the SQL provision script with each call wrapped in dbExecute. This is no longer flexible to the provision
#' paraemters. You just have to run several calls. Limited time here.
db_con<-dbConnect(SQLite(),'big_long.sqlite')
dbExecute(db_con,"DROP TABLE IF EXISTS tbl_segment")
dbExecute(db_con,"DROP TABLE IF EXISTS tbl_player")
dbExecute(db_con,"DROP TABLE IF EXISTS tbl_policyholder_experiece")
dbExecute(db_con,"DROP TABLE IF EXISTS tbl_player_experience")
dbExecute(db_con,"
CREATE TABLE
tbl_segment(
name TEXT NOT NULL PRIMARY KEY
, compare_alpha REAL
, compare_beta REAL
, compare_trend REAL
, freq_shape REAL
, freq_scale REAL
, freq_trend REAL
, sev_shape REAL
, sev_scale REAL
, sev_trend REAL
, expected_freq REAL
, expected_severity REAL
, expected_cost REAL
)")
dbExecute(db_con,"
CREATE TABLE
tbl_policyholder(
id INTEGER NOT NULL PRIMARY KEY
, segment_name TEXT NOT NULL
, expected_cost REAL NOT NULL
, compare REAL NOT NULL
, frequency REAL NOT NULL
, severity REAL NOT NULL
)")
dbExecute(db_con,"
CREATE TABLE
tbl_player(
name TEXT NOT NULL PRIMARY KEY
, bot INTEGER NOT NULL
, default_rate_change REAL
, hist_include REAL
, attenuation REAL
, cap_increase REAL
, cap_decrease REAL
)")
dbExecute(db_con,"
CREATE TABLE
tbl_policyholder_experience(
round_num INTEGER NOT NULL
, policyholder_id INTEGER NOT NULL
, segment_name TEXT NOT NULL
, expected_cost REAL NOT NULL
, compare REAL NOT NULL
, observed_claims REAL NOT NULL
, observed_cost REAL NOT NULL
, compared REAL NOT NULL
, written_by REAL
, income REAL
, written_premium REAL
, PRIMARY KEY (round_num, policyholder_id)
)")
dbExecute(db_con,"
CREATE TABLE
tbl_player_experience (
player_name TEXT NOT NULL
, segment_name TEXT NOT NULL
, round_num INTEGER NOT NULL
, prior_offer_premium REAL
, historical_cost REAL
, historical_premium REAL
, indicated_pure_premium REAL
, indicated_change REAL
, default_rate_change REAL
, rate_change REAL
, offer_premium REAL NOT NULL
, PRIMARY KEY (player_name, segment_name, round_num)
)")
|
source("./cmaes.R")
source("./test_functions.R")
source("./RSalgorithm.R")
source("./NSalgorithm.R")
source("./lbfgs.R")
source("./of_wrapper.R")
antennes2d<-load("antennes_2d_train.Rdata")
X.train<-C
S.train<-S
summary(antennes2d)
library(DiceKriging)
library("rgl") # library for plots
nb_antennas=1
zdim <- 2
budget <- 3000
LB = -6
UB = 8
x1=seq(-6,5,length=10)
x2=seq(-6,8.7,length=10)
grid <- expand.grid(x1=x1, x2=x2)
grid<-cbind(grid[,1],grid[,2])
# grid<-as.matrix(grid)
# y<- meanofGP(grid)
# y <- apply(grid, 1, krigemean)
GPmodel <- km(design=X.train, response=S.train ,covtype="matern3_2")
y <- apply(grid, 1, meanofGP)
# p <- predict(m1, X.test , type="SK")
fun <- meanofGP
# optimize with normal search
paramNS <- list(LB=LB,UB = UB,budget = budget,dim=zdim, xinit=rep(-3.2,2),sigma=0.3) # param for normal_search
optresNS <- normal_search(fun, paramNS)
open3d()
surface3d(x1, x2, y, col= "lightblue")
points3d(optresNS$xhist[,1], optresNS$xhist[,2], optresNS$fhist, pch=19, col="red", size=10)
title3d("mean of kriging using ES-(1+1)", col="blue", font=4)
decorate3d()
aspect3d(1, 1, 1)
# controls for noisy functions and other dirty global variables
glob_noisy <- FALSE # is the function noisy
glob_tau <- 1 # noise std deviation
# glob_estim_noise <- FALSE # this should go in KNF parameters
glob_xstar <- rep(2.5,zdim)
store_hist <<- FALSE # TRUE only inside lbfgs.R, see file.
paramCMA <- list(LB=LB,UB = UB,budget = budget, dim=zdim, xinit=rep(-3.3,2),sigma=1.) # param for cmaes
optresCMA <- cmaes(fun, paramCMA)
open3d()
surface3d(x1, x2, y, col= "lightblue")
points3d(optresCMA$xmeanhist[,1], optresCMA$xmeanhist[,2], optresCMA$ymeanhist, pch=19, col="red", size=10)
title3d("meanofGP using CMAES", col="blue", font=4)
decorate3d()
aspect3d(1, 1, 1)
par(mfrow=c(1,1))
# print out results
cat("xbest=")
optresNS$x_best
cat("fbest=")
optresNS$f_best
plot(optresNS$fhist,type="l",
xlab="no. calls to f",ylab="f")
title("ES-(1+1)")
# print out results
cat("xbest=")
optresCMA$x_best
cat("fbest=")
optresCMA$f_best
plot(optresCMA$ymeanhist,type="l",
xlab="no. of iterations",ylab="f of xmean")
title("CMA-ES")
# save the results, put your names in the output file
fname <- "Adref_Benechehab_Gueddari_cma_2d.Rdata"
x_solution_cma_2d <- matrix(optresNS$x_best,ncol=zdim)
y_solution_cma_2d <- predict(object = GPmodel,newdata=data.frame(x_solution_cma_2d),type="UK")
# y_solution_cma_2d$mean for GPmodel prediction
# and y_solution_cma_2d$sd for GPmodel standard deviation at x_solution
# just saving the 2d solution, you'll have to add the 6d solution as well
save(x_solution_cma_2d,y_solution_cma_2d,file=fname)
# at the end for example, load("temporary_files.Rdata") to get back
# other solutions (6d, with EGO) and save again in a final file
# save(... all objects ..., file="Name1_Name2_Name3.Rdata")
| /OptimGlob/CMAESKrigingMean2D.R | no_license | Hakiiiim/The-antennas-location-problem | R | false | false | 2,947 | r | source("./cmaes.R")
source("./test_functions.R")
source("./RSalgorithm.R")
source("./NSalgorithm.R")
source("./lbfgs.R")
source("./of_wrapper.R")
antennes2d<-load("antennes_2d_train.Rdata")
X.train<-C
S.train<-S
summary(antennes2d)
library(DiceKriging)
library("rgl") # library for plots
nb_antennas=1
zdim <- 2
budget <- 3000
LB = -6
UB = 8
x1=seq(-6,5,length=10)
x2=seq(-6,8.7,length=10)
grid <- expand.grid(x1=x1, x2=x2)
grid<-cbind(grid[,1],grid[,2])
# grid<-as.matrix(grid)
# y<- meanofGP(grid)
# y <- apply(grid, 1, krigemean)
GPmodel <- km(design=X.train, response=S.train ,covtype="matern3_2")
y <- apply(grid, 1, meanofGP)
# p <- predict(m1, X.test , type="SK")
fun <- meanofGP
# optimize with normal search
paramNS <- list(LB=LB,UB = UB,budget = budget,dim=zdim, xinit=rep(-3.2,2),sigma=0.3) # param for normal_search
optresNS <- normal_search(fun, paramNS)
open3d()
surface3d(x1, x2, y, col= "lightblue")
points3d(optresNS$xhist[,1], optresNS$xhist[,2], optresNS$fhist, pch=19, col="red", size=10)
title3d("mean of kriging using ES-(1+1)", col="blue", font=4)
decorate3d()
aspect3d(1, 1, 1)
# controls for noisy functions and other dirty global variables
glob_noisy <- FALSE # is the function noisy
glob_tau <- 1 # noise std deviation
# glob_estim_noise <- FALSE # this should go in KNF parameters
glob_xstar <- rep(2.5,zdim)
store_hist <<- FALSE # TRUE only inside lbfgs.R, see file.
paramCMA <- list(LB=LB,UB = UB,budget = budget, dim=zdim, xinit=rep(-3.3,2),sigma=1.) # param for cmaes
optresCMA <- cmaes(fun, paramCMA)
open3d()
surface3d(x1, x2, y, col= "lightblue")
points3d(optresCMA$xmeanhist[,1], optresCMA$xmeanhist[,2], optresCMA$ymeanhist, pch=19, col="red", size=10)
title3d("meanofGP using CMAES", col="blue", font=4)
decorate3d()
aspect3d(1, 1, 1)
par(mfrow=c(1,1))
# print out results
cat("xbest=")
optresNS$x_best
cat("fbest=")
optresNS$f_best
plot(optresNS$fhist,type="l",
xlab="no. calls to f",ylab="f")
title("ES-(1+1)")
# print out results
cat("xbest=")
optresCMA$x_best
cat("fbest=")
optresCMA$f_best
plot(optresCMA$ymeanhist,type="l",
xlab="no. of iterations",ylab="f of xmean")
title("CMA-ES")
# save the results, put your names in the output file
fname <- "Adref_Benechehab_Gueddari_cma_2d.Rdata"
x_solution_cma_2d <- matrix(optresNS$x_best,ncol=zdim)
y_solution_cma_2d <- predict(object = GPmodel,newdata=data.frame(x_solution_cma_2d),type="UK")
# y_solution_cma_2d$mean for GPmodel prediction
# and y_solution_cma_2d$sd for GPmodel standard deviation at x_solution
# just saving the 2d solution, you'll have to add the 6d solution as well
save(x_solution_cma_2d,y_solution_cma_2d,file=fname)
# at the end for example, load("temporary_files.Rdata") to get back
# other solutions (6d, with EGO) and save again in a final file
# save(... all objects ..., file="Name1_Name2_Name3.Rdata")
|
############################
# STATISTICAL SIGNIFICANCE #
############################
#Name:
#Date:
#Summary: This assignment is to :
#1. practice using ggplot2
#2. demonstrate tests for statistical significance
#3. distinguish between statistical significance and practical importance
# set the working directory you want to use
setwd("C://Users/Desktop")
# install and load some of the required packages we need
install.packages("ggplot2")
install.packages("readr")
library("ggplot2")
library(readr)
theme_set(theme_bw())
#Download NYPD Crash Data from NYC Open Data Portal: data.cityofnewyork.us
# this is a large data set (286mb) and takes a while to download.
url <- "https://data.cityofnewyork.us/api/views/h9gi-nx95/rows.csv?accessType=DOWNLOAD"
?read_csv
# use ? or ?? to find out information about a function
crashes <- read_csv(file = url)
# now we have the the data in the object called crash
# we want to explore the properties of this
#Dataframes
# find out what type of object it is
class(crashes)
head(crashes)
tail(crashes)
dim(crashes)
c(nrow(crashes), ncol(crashes))
## We can do similar things over just a single variable in the data
# Summarizing Columns of Dataframes
# Remember we access a variable of a df with $
head(crashes$DATE)
head(crashes$`CRASH DATE`)
tail(crashes$`CRASH DATE`)
class(crashes$`CRASH DATE`)
summary(crashes$`CRASH DATE`)
summary(object = crashes$DATE)
crashes$date <- as.Date(x = crashes$`CRASH DATE`, format = "%m/%d/%Y") #add new column
dim(crashes)
head(crashes$date)
class(crashes$date)
summary(object = crashes$date)
c(min(crashes$date), max(crashes$date))
range(crashes$date)
range(crashes$DATE)
# this last one is different as its just characters, doesn't know how to rank them correctly
##### Explore the data with some plots
# Multiple ways to plot in R. Traditional base plot or more felxible ggplot
hist(x = crashes$date, breaks = 30) #base R
qplot(x = crashes$date) #ggplot2
# to do a histogram you need to specify how to bin the data. this can change how these plots look
# dramatically
qplot(crashes$date,binwidth = 5)
qplot(crashes$date, binwidth = 50)
# check is anything strange happening when look at plots. see the first and last
# why could this be?
#Are you more likley to crash at the end of the month?
?as.Date
# what day of the month do crashes occur?
crashes$day <- format(x = crashes$date, format = "%d")
?ifelse
class(crashes$day)
crashes$day <- as.numeric(format(x = crashes$date, format = "%d"))
class(crashes$day)
crashes$half <- ifelse(test = crashes$day < 15, "First Half", "Second Half")
qplot(x = crashes$half)
qplot(x = crashes$half,
xlab = "",
ylab = "Total Number of Crashes",
main = "Are you more likely to crash at the end of the month?")
#How many crashes happen in the first half of the month?
sum(crashes$half == "First Half")
table(crashes$half)
table(crashes$half)/nrow(crashes)
c(sum(crashes$half == "First Half"),sum(crashes$half == "Second Half"))/nrow(crashes)
#Is this statistically significant?
?pbinom
pbinom(q = sum(crashes$half == "First Half"),
size = nrow(crashes),
prob = .5)
pbinom(q = sum(crashes$half == "First Half"),
size = nrow(crashes),
prob = .5,
log = TRUE)
exp(-4088.944)
#What's wrong with our definition of the first half of the month?
every_day <- data.frame()
class(every_day)
head(every_day)
?seq.Date
every_day <- data.frame(date = seq(from = min(crashes$date),
to = max(crashes$date),
by = "day"))
head(every_day)
every_day$day <- as.numeric(format(x = every_day$date, format = "%d"))
every_day$half <- ifelse(test = every_day$day < 15, "First Half", "Second Half")
table(every_day$half)
table(every_day$half)/nrow(every_day)
table(crashes$half)/nrow(crashes)
#Are you really more likely to crash at the end of the month?
#How likley are these values if you are equally likely to get a crash in each half?
pbinom(q = sum(crashes$half == "First Half"),
size = nrow(crashes),
prob = mean(every_day$half == "First Half"))
#Plot crashes per day
# assign a weight based on the half of the month
crashes$weight <- ifelse(crashes$half == "First Half",
1/sum(every_day$half == "First Half"),
1/sum(every_day$half == "Second Half"))
qplot(x = half, weight = crashes$weight)
qplot(x = crashes$half, weight = weight)
qplot(x = crashes$half, weight = weight, data = crashes)
qplot(x = half,
weight = weight,
data = crashes,
xlab = "",
ylab = "Total Number of Crashes per Day",
main = "Are you more likely to crash at the end of the month?")
#What about a ticket?
#Download DOF Parking Violations Data from NYC Open Data Portal: data.cityofnewyork.us
#tickets <- rbind(read.csv("https://data.cityofnewyork.us/api/views/pvqr-7yc4/rows.csv?accessType=DOWNLOAD"),
# read.csv("https://data.cityofnewyork.us/api/views/kiv2-tbus/rows.csv?accessType=DOWNLOAD"),
# read.csv("https://data.cityofnewyork.us/api/views/c284-tqph/rows.csv?accessType=DOWNLOAD"),
# read.csv("https://data.cityofnewyork.us/api/views/jt7v-77mi/rows.csv?accessType=DOWNLOAD"))
#These files are really big so we'll download a smaller version from github instead
#
# load in, assuming its in the folder we set as working directory before
tickets2013 <- read.csv(unz("tickets2013.zip","tickets2013"))
tickets2014 <- read.csv(unz("tickets2014.zip","tickets2014"))
tickets2015 <- read.csv(unz("tickets2015.zip","tickets2015"))
tickets <- rbind(tickets2013,tickets2014,tickets2015)
head(tickets)
tickets$Date <- as.Date(tickets$Date,"%Y-%m-%d")
tickets$day <- as.numeric(format(x = tickets$Date, format = "%d"))
tickets$half <- ifelse(test = tickets$day < 15, "First Half", "Second Half")
table(tickets$half)
table(tickets$half)/nrow(tickets)
pbinom(q = sum(tickets$half == "First Half"),
size = nrow(tickets),
prob = .5)
qplot(x = half,
data = tickets,
xlab = "",
ylab = "Total Number of Tickets",
main = "Are you more likely to get a ticket at the end of the month?")
#Is it statistically significant?
pbinom(q = sum(tickets$half == "First Half"),
size = nrow(tickets),
prob = sum(every_day$half == "First Half")/nrow(every_day))
#every_day <- data.frame(date = seq(from = min(tickets$Date),
# to = max(tickets$Date),
# by = "day"))
head(every_day)
every_day$day <- as.numeric(format(x = every_day$date, format = "%d"))
every_day$half <- ifelse(test = every_day$day < 15, "First Half", "Second Half")
tickets$weight <- ifelse(tickets$half == "First Half",
1/sum(every_day$half == "First Half"),
1/sum(every_day$half == "Second Half"))
#Is the difference actually meaningful? Does this look like a lot of evidence?
qplot(x = half,
weight = weight,
data = tickets,
xlab = "",
ylab = "Total Number of Tickets per Day",
main = "Are you more likely to get a ticket at the end of the month?")
pbinom(q = sum(tickets$half == "First Half"),
size = nrow(tickets),
prob = mean(every_day$day < 15))
# precincts, but too slow to run
pct <- c(1,5,6,7,9,10, 13, 17, 19, 20, 23, 24, 25, 26, 28, 30, 32, 33, 34,
40:50, 52, 60:63,66:73,75:79,81,83,84,88,90,94,100:115, 120:123)
#skip this
qplot(x = half,
weight = weight,
data = tickets[tickets %in% pct, ],
xlab = "",
ylab = "Total Number of Tickets per Day",
main = "Are you more likely to get a ticket at the end of the month?")
qplot(x = half,
weight = weight,
data = tickets,
xlab = "",
ylab = "Total Number of Tickets per Day",
main = "Are you more likely to get a ticket at the end of the month?") +
facet_wrap(~ Violation.Name)
qplot(x = half,
weight = weight,
data = tickets,
xlab = "",
ylab = "Total Number of Tickets per Day",
main = "Are you more likely to get a ticket at the end of the month?") +
facet_wrap(~ Violation.Name, scales = "free")
# removes constraint that all plots have same scale, which can be a bad idea
#Of course, this is just comparing month half. Maybe we've "smoothed" over the end of the month increase?
#What if we increase the number of bins?
qplot(x = day,
bins = 31,
data = tickets,
xlab = "",
ylab = "Total Number of Tickets",
main = "Are you more likely to get a ticket at the end of the month?") +
facet_wrap(~Violation.Name, scales = "free")
#We're running into binning issues again:
#The dips at the end are due to the fact that only some months have 31 days
#Instead of weighting 31 bins, what if we look at day/max days in the month?
tickets$month <- format(tickets$Date,"%m")
head(tickets$month)
tickets$month <- as.numeric(format(tickets$Date,"%m"))
tickets$max_day <- ifelse(tickets$month %in% c(4,6,9,11),30,31)
tickets$max_day[tickets$month == 2] <-28
qplot(x = day/max_day,
bins = 31,
data = tickets,
xlab = "",
ylab = "Total Number of Tickets",
main = "Are you more likely to get a ticket at the end of the month?") +
facet_wrap(~Violation.Name, scales = "free")
#The dips at the end are gone, but what are these dips in the middle?
#They're actually another artifact of binning!
qplot(x = day/max_day,
bins = 150,
data = tickets,
xlab = "",
ylab = "Total Number of Tickets",
main = "Are you more likely to get a ticket at the end of the month?") +
facet_wrap(~Violation.Name, scales = "free")
#It's actually easier to see in polar coordinates. This is called a Rose Plot
qplot(x = day/max_day,
bins = 150,
data = tickets,
xlab = "",
ylab = "Total Number of Tickets (Smoothed)",
main = "Are you more likely to get a ticket at the end of the month?") +
coord_polar() +
facet_wrap(~Violation.Name)
qplot(x = day/max_day,
bins = 150,
data = tickets[tickets$Violation.Name == "Red Light Camera",],
xlab = "",
ylab = "Total Number of Tickets (Smoothed)",
main = "Are you more likely to get a ticket at the end of the month?") +
coord_polar()
#How can we perform a significance test?
#We can use the Von Mises Score test:
head((tickets$day/tickets$max_day)*2*pi)
summary( (tickets$day/tickets$max_day)*2*pi)
test <- sum(cos( (tickets$day/tickets$max_day)*2*pi ))
test
#How likely is this value? We can simulate the probability under the null
#We can create fake data
fake_test <- sum(cos(runif(nrow(tickets),0,2*pi)))
fake_test <- numeric(100)
head(fake_test)
for(sim in seq_along(fake_test)){ fake_test[sim] <- sum(cos(runif(nrow(tickets),0,2*pi)))}
head(fake_test)
head(abs(fake_test) > abs(test))
sum(abs(fake_test) > abs(test))/100
#How meaningful is this value?
qplot(x = day/max_day,
data = tickets,
geom = "density",
xlab = "",
ylab = "Smoothed Number of Tickets per Day",
main = "Are you more likely to get a ticket at the end of the month?") +
coord_polar() +
facet_wrap(~Violation.Name)
### Add Hadley Wickham dataviz permuting graphs paper here
install.packages("nullabor")
library(nullabor)
library(dplyr)
tickets_sample <- sample_n(tickets[tickets$Violation.Name == "Double Parking",], size = 1000000)
d <- lineup(null_dist("day", dist = "uniform", params = c(min = 1, max = 31)),tickets_sample, n = 20)
#head(d)
qplot(x = day,
data = d,
geom = "density",
xlab = "",
ylab = "Smoothed Number of Tickets per Day",
main = "Are you more likely to get a ticket at the end of the month?") +
#coord_polar() +
facet_wrap(~.sample)
qplot(x = day,
data = d,
geom = "density",
xlab = "",
ylab = "Smoothed Number of Tickets per Day",
main = "Are you more likely to get a ticket at the end of the month?") +
coord_polar() +
facet_wrap(~.sample)
| /Intro_Data_Science_Spring_2019-gh-pages/example/significance.R | no_license | collin-cademartori/SHP_EIDS | R | false | false | 12,416 | r | ############################
# STATISTICAL SIGNIFICANCE #
############################
#Name:
#Date:
#Summary: This assignment is to :
#1. practice using ggplot2
#2. demonstrate tests for statistical significance
#3. distinguish between statistical significance and practical importance
# set the working directory you want to use
setwd("C://Users/Desktop")
# install and load some of the required packages we need
install.packages("ggplot2")
install.packages("readr")
library("ggplot2")
library(readr)
theme_set(theme_bw())
#Download NYPD Crash Data from NYC Open Data Portal: data.cityofnewyork.us
# this is a large data set (286mb) and takes a while to download.
url <- "https://data.cityofnewyork.us/api/views/h9gi-nx95/rows.csv?accessType=DOWNLOAD"
?read_csv
# use ? or ?? to find out information about a function
crashes <- read_csv(file = url)
# now we have the the data in the object called crash
# we want to explore the properties of this
#Dataframes
# find out what type of object it is
class(crashes)
head(crashes)
tail(crashes)
dim(crashes)
c(nrow(crashes), ncol(crashes))
## We can do similar things over just a single variable in the data
# Summarizing Columns of Dataframes
# Remember we access a variable of a df with $
head(crashes$DATE)
head(crashes$`CRASH DATE`)
tail(crashes$`CRASH DATE`)
class(crashes$`CRASH DATE`)
summary(crashes$`CRASH DATE`)
summary(object = crashes$DATE)
crashes$date <- as.Date(x = crashes$`CRASH DATE`, format = "%m/%d/%Y") #add new column
dim(crashes)
head(crashes$date)
class(crashes$date)
summary(object = crashes$date)
c(min(crashes$date), max(crashes$date))
range(crashes$date)
range(crashes$DATE)
# this last one is different as its just characters, doesn't know how to rank them correctly
##### Explore the data with some plots
# Multiple ways to plot in R. Traditional base plot or more felxible ggplot
hist(x = crashes$date, breaks = 30) #base R
qplot(x = crashes$date) #ggplot2
# to do a histogram you need to specify how to bin the data. this can change how these plots look
# dramatically
qplot(crashes$date,binwidth = 5)
qplot(crashes$date, binwidth = 50)
# check is anything strange happening when look at plots. see the first and last
# why could this be?
#Are you more likley to crash at the end of the month?
?as.Date
# what day of the month do crashes occur?
crashes$day <- format(x = crashes$date, format = "%d")
?ifelse
class(crashes$day)
crashes$day <- as.numeric(format(x = crashes$date, format = "%d"))
class(crashes$day)
crashes$half <- ifelse(test = crashes$day < 15, "First Half", "Second Half")
qplot(x = crashes$half)
qplot(x = crashes$half,
xlab = "",
ylab = "Total Number of Crashes",
main = "Are you more likely to crash at the end of the month?")
#How many crashes happen in the first half of the month?
sum(crashes$half == "First Half")
table(crashes$half)
table(crashes$half)/nrow(crashes)
c(sum(crashes$half == "First Half"),sum(crashes$half == "Second Half"))/nrow(crashes)
#Is this statistically significant?
?pbinom
pbinom(q = sum(crashes$half == "First Half"),
size = nrow(crashes),
prob = .5)
pbinom(q = sum(crashes$half == "First Half"),
size = nrow(crashes),
prob = .5,
log = TRUE)
exp(-4088.944)
#What's wrong with our definition of the first half of the month?
every_day <- data.frame()
class(every_day)
head(every_day)
?seq.Date
every_day <- data.frame(date = seq(from = min(crashes$date),
to = max(crashes$date),
by = "day"))
head(every_day)
every_day$day <- as.numeric(format(x = every_day$date, format = "%d"))
every_day$half <- ifelse(test = every_day$day < 15, "First Half", "Second Half")
table(every_day$half)
table(every_day$half)/nrow(every_day)
table(crashes$half)/nrow(crashes)
#Are you really more likely to crash at the end of the month?
#How likley are these values if you are equally likely to get a crash in each half?
pbinom(q = sum(crashes$half == "First Half"),
size = nrow(crashes),
prob = mean(every_day$half == "First Half"))
#Plot crashes per day
# assign a weight based on the half of the month
crashes$weight <- ifelse(crashes$half == "First Half",
1/sum(every_day$half == "First Half"),
1/sum(every_day$half == "Second Half"))
qplot(x = half, weight = crashes$weight)
qplot(x = crashes$half, weight = weight)
qplot(x = crashes$half, weight = weight, data = crashes)
qplot(x = half,
weight = weight,
data = crashes,
xlab = "",
ylab = "Total Number of Crashes per Day",
main = "Are you more likely to crash at the end of the month?")
#What about a ticket?
#Download DOF Parking Violations Data from NYC Open Data Portal: data.cityofnewyork.us
#tickets <- rbind(read.csv("https://data.cityofnewyork.us/api/views/pvqr-7yc4/rows.csv?accessType=DOWNLOAD"),
# read.csv("https://data.cityofnewyork.us/api/views/kiv2-tbus/rows.csv?accessType=DOWNLOAD"),
# read.csv("https://data.cityofnewyork.us/api/views/c284-tqph/rows.csv?accessType=DOWNLOAD"),
# read.csv("https://data.cityofnewyork.us/api/views/jt7v-77mi/rows.csv?accessType=DOWNLOAD"))
#These files are really big so we'll download a smaller version from github instead
#
# load in, assuming its in the folder we set as working directory before
tickets2013 <- read.csv(unz("tickets2013.zip","tickets2013"))
tickets2014 <- read.csv(unz("tickets2014.zip","tickets2014"))
tickets2015 <- read.csv(unz("tickets2015.zip","tickets2015"))
tickets <- rbind(tickets2013,tickets2014,tickets2015)
head(tickets)
tickets$Date <- as.Date(tickets$Date,"%Y-%m-%d")
tickets$day <- as.numeric(format(x = tickets$Date, format = "%d"))
tickets$half <- ifelse(test = tickets$day < 15, "First Half", "Second Half")
table(tickets$half)
table(tickets$half)/nrow(tickets)
pbinom(q = sum(tickets$half == "First Half"),
size = nrow(tickets),
prob = .5)
qplot(x = half,
data = tickets,
xlab = "",
ylab = "Total Number of Tickets",
main = "Are you more likely to get a ticket at the end of the month?")
#Is it statistically significant?
pbinom(q = sum(tickets$half == "First Half"),
size = nrow(tickets),
prob = sum(every_day$half == "First Half")/nrow(every_day))
#every_day <- data.frame(date = seq(from = min(tickets$Date),
# to = max(tickets$Date),
# by = "day"))
head(every_day)
every_day$day <- as.numeric(format(x = every_day$date, format = "%d"))
every_day$half <- ifelse(test = every_day$day < 15, "First Half", "Second Half")
tickets$weight <- ifelse(tickets$half == "First Half",
1/sum(every_day$half == "First Half"),
1/sum(every_day$half == "Second Half"))
#Is the difference actually meaningful? Does this look like a lot of evidence?
qplot(x = half,
weight = weight,
data = tickets,
xlab = "",
ylab = "Total Number of Tickets per Day",
main = "Are you more likely to get a ticket at the end of the month?")
pbinom(q = sum(tickets$half == "First Half"),
size = nrow(tickets),
prob = mean(every_day$day < 15))
# precincts, but too slow to run
pct <- c(1,5,6,7,9,10, 13, 17, 19, 20, 23, 24, 25, 26, 28, 30, 32, 33, 34,
40:50, 52, 60:63,66:73,75:79,81,83,84,88,90,94,100:115, 120:123)
#skip this
qplot(x = half,
weight = weight,
data = tickets[tickets %in% pct, ],
xlab = "",
ylab = "Total Number of Tickets per Day",
main = "Are you more likely to get a ticket at the end of the month?")
qplot(x = half,
weight = weight,
data = tickets,
xlab = "",
ylab = "Total Number of Tickets per Day",
main = "Are you more likely to get a ticket at the end of the month?") +
facet_wrap(~ Violation.Name)
qplot(x = half,
weight = weight,
data = tickets,
xlab = "",
ylab = "Total Number of Tickets per Day",
main = "Are you more likely to get a ticket at the end of the month?") +
facet_wrap(~ Violation.Name, scales = "free")
# removes constraint that all plots have same scale, which can be a bad idea
#Of course, this is just comparing month half. Maybe we've "smoothed" over the end of the month increase?
#What if we increase the number of bins?
qplot(x = day,
bins = 31,
data = tickets,
xlab = "",
ylab = "Total Number of Tickets",
main = "Are you more likely to get a ticket at the end of the month?") +
facet_wrap(~Violation.Name, scales = "free")
#We're running into binning issues again:
#The dips at the end are due to the fact that only some months have 31 days
#Instead of weighting 31 bins, what if we look at day/max days in the month?
tickets$month <- format(tickets$Date,"%m")
head(tickets$month)
tickets$month <- as.numeric(format(tickets$Date,"%m"))
tickets$max_day <- ifelse(tickets$month %in% c(4,6,9,11),30,31)
tickets$max_day[tickets$month == 2] <-28
qplot(x = day/max_day,
bins = 31,
data = tickets,
xlab = "",
ylab = "Total Number of Tickets",
main = "Are you more likely to get a ticket at the end of the month?") +
facet_wrap(~Violation.Name, scales = "free")
#The dips at the end are gone, but what are these dips in the middle?
#They're actually another artifact of binning!
qplot(x = day/max_day,
bins = 150,
data = tickets,
xlab = "",
ylab = "Total Number of Tickets",
main = "Are you more likely to get a ticket at the end of the month?") +
facet_wrap(~Violation.Name, scales = "free")
#It's actually easier to see in polar coordinates. This is called a Rose Plot
qplot(x = day/max_day,
bins = 150,
data = tickets,
xlab = "",
ylab = "Total Number of Tickets (Smoothed)",
main = "Are you more likely to get a ticket at the end of the month?") +
coord_polar() +
facet_wrap(~Violation.Name)
qplot(x = day/max_day,
bins = 150,
data = tickets[tickets$Violation.Name == "Red Light Camera",],
xlab = "",
ylab = "Total Number of Tickets (Smoothed)",
main = "Are you more likely to get a ticket at the end of the month?") +
coord_polar()
#How can we perform a significance test?
#We can use the Von Mises Score test:
head((tickets$day/tickets$max_day)*2*pi)
summary( (tickets$day/tickets$max_day)*2*pi)
test <- sum(cos( (tickets$day/tickets$max_day)*2*pi ))
test
#How likely is this value? We can simulate the probability under the null
#We can create fake data
fake_test <- sum(cos(runif(nrow(tickets),0,2*pi)))
fake_test <- numeric(100)
head(fake_test)
for(sim in seq_along(fake_test)){ fake_test[sim] <- sum(cos(runif(nrow(tickets),0,2*pi)))}
head(fake_test)
head(abs(fake_test) > abs(test))
sum(abs(fake_test) > abs(test))/100
#How meaningful is this value?
qplot(x = day/max_day,
data = tickets,
geom = "density",
xlab = "",
ylab = "Smoothed Number of Tickets per Day",
main = "Are you more likely to get a ticket at the end of the month?") +
coord_polar() +
facet_wrap(~Violation.Name)
### Add Hadley Wickham dataviz permuting graphs paper here
install.packages("nullabor")
library(nullabor)
library(dplyr)
tickets_sample <- sample_n(tickets[tickets$Violation.Name == "Double Parking",], size = 1000000)
d <- lineup(null_dist("day", dist = "uniform", params = c(min = 1, max = 31)),tickets_sample, n = 20)
#head(d)
qplot(x = day,
data = d,
geom = "density",
xlab = "",
ylab = "Smoothed Number of Tickets per Day",
main = "Are you more likely to get a ticket at the end of the month?") +
#coord_polar() +
facet_wrap(~.sample)
qplot(x = day,
data = d,
geom = "density",
xlab = "",
ylab = "Smoothed Number of Tickets per Day",
main = "Are you more likely to get a ticket at the end of the month?") +
coord_polar() +
facet_wrap(~.sample)
|
### experiment and create functions
### make Hist
makeHist <- function(year,length,startHeadcount,endHeadcount) {
tibble(
date = tk_make_timeseries(year, by = "quarter", length_out=length),
headcount=round(seq(startHeadcount,endHeadcount,length=length),0),
noise=round(runif(length, min=-50, max=50),0)
) %>%
mutate(headcount=headcount+noise) %>%
select(-noise)
}
### make Forecast
makeForecast <- function(dataHist,yearForecastStart,length,growthSlope,supplySlope) {
current<-dataHist$headcount[dataHist$date==max(dataHist$date)]
ending<-current+current*growthSlope/100
start<-current+(ending-current)/length
endingSupply<-current-current*supplySlope/100
startSupply<-current-(current-endingSupply)/length
tibble(
date = tk_make_timeseries(yearForecastStart, by = "quarter", length_out=length),
forecast=round(seq(start,ending,length=length),0),
forecastSupply=round(seq(startSupply,endingSupply,length=length),0),
) %>%
bind_rows(dataHist,.) %>%
mutate(forecast=ifelse(date==max(dataHist$date),headcount,forecast),
forecastSupply=ifelse(date==max(dataHist$date),headcount,forecastSupply))
}
#busAForecast<-makeForecast(busA,"2021",lengthOut,50,30)
makePlot <- function(dataForecast) {
demandLabelCoordinate<-min(dataForecast$forecast, na.rm=TRUE)+(max(dataForecast$forecast, na.rm=TRUE)-min(dataForecast$forecast, na.rm=TRUE))/2
supplyLabelCoordinate<-min(dataForecast$forecastSupply, na.rm=TRUE)+(max(dataForecast$forecastSupply, na.rm=TRUE)-min(dataForecast$forecastSupply, na.rm=TRUE))/2
dataForecast %>%
ggplot(aes(date, headcount)) +
geom_line(aes(x = date, y = headcount),na.rm=TRUE,color="#878787", size=1) +
geom_point(aes(x = date, y = headcount),na.rm=TRUE,color="#878787",size=1) +
geom_line(aes(x = date, y = forecast),na.rm=TRUE,color="#0868AC", size=1) +
geom_line(aes(x = date, y = forecastSupply),na.rm=TRUE,color="#FB8072", size=1) +
theme_fivethirtyeight() +
ylab("Headcount") +
#labs(title="Projected Supply and Demand") +
theme(axis.title.y = element_text()) +
annotate("text", x = median(dataForecast$date)+265, y = demandLabelCoordinate, label = "Demand", color="#0868AC") +
annotate("text", x = median(dataForecast$date)+265, y = supplyLabelCoordinate, label = "Supply", color="#FB8072")
}
#makePlot(busAForecast,"Business A")
### develop plotly chart
diversity<- data.frame(
period=c(rep('prevYear',4),rep('forecast',4)),
gender=rep(c('Female','Female','Male','Male'),2),
mgr=rep(c('Manager','Individual Contributor'),4),
count=c(91,1097,518,1341,227,874,719,1092)) %>%
mutate(gender=as.factor(gender)) %>%
mutate(gender=relevel(gender, "Male")) %>%
group_by(period,mgr) %>%
mutate(total=sum(count)) %>%
ungroup() %>%
mutate(countPect=round(count/total,3)) %>%
group_by(mgr,period) %>%
mutate(label_y = cumsum(countPect) - 0.5 * countPect)
str(diversity)
colors1<-c("#9970AB","#5AAE61")
myColors<-colors1
names(myColors) <- c('Male','Female')
percent <- function(x, digits = 1, format = "f", ...) {
paste0(formatC(100 * x, format = format, digits = digits, ...), "%")
}
#ggplot(diversity,aes(x = period, y = countPect, fill=forcats::fct_rev(gender))) +
ggplot(diversity,aes(x = period, y = countPect, fill=gender)) +
geom_bar(stat='identity') +
scale_fill_manual(values=myColors) +
coord_flip() +
facet_grid(. ~ mgr) +
scale_y_continuous("Percent of Total Headcount",labels = scales::percent_format()) +
scale_x_discrete('',labels=c('Projected \n (2023 Year End)','Current \n (2020 Year End)')) +
labs(title = 'Gender Representation', subtitle = 'Current and Projected') +
#theme(plot.title = element_text(hjust = 0.5)) +
theme(legend.position = c(.85, 0.9), legend.title = element_blank()) +
theme(plot.background = element_rect(colour = "black",size = 1)) +
geom_text(aes(y = label_y, label = percent(countPect)), colour = "white") +
geom_hline(yintercept=.25)
| /01_scripts/99_functions.R | no_license | chrisdreece/deiDashboard | R | false | false | 3,990 | r |
### experiment and create functions
### make Hist
makeHist <- function(year,length,startHeadcount,endHeadcount) {
tibble(
date = tk_make_timeseries(year, by = "quarter", length_out=length),
headcount=round(seq(startHeadcount,endHeadcount,length=length),0),
noise=round(runif(length, min=-50, max=50),0)
) %>%
mutate(headcount=headcount+noise) %>%
select(-noise)
}
### make Forecast
makeForecast <- function(dataHist,yearForecastStart,length,growthSlope,supplySlope) {
current<-dataHist$headcount[dataHist$date==max(dataHist$date)]
ending<-current+current*growthSlope/100
start<-current+(ending-current)/length
endingSupply<-current-current*supplySlope/100
startSupply<-current-(current-endingSupply)/length
tibble(
date = tk_make_timeseries(yearForecastStart, by = "quarter", length_out=length),
forecast=round(seq(start,ending,length=length),0),
forecastSupply=round(seq(startSupply,endingSupply,length=length),0),
) %>%
bind_rows(dataHist,.) %>%
mutate(forecast=ifelse(date==max(dataHist$date),headcount,forecast),
forecastSupply=ifelse(date==max(dataHist$date),headcount,forecastSupply))
}
#busAForecast<-makeForecast(busA,"2021",lengthOut,50,30)
makePlot <- function(dataForecast) {
demandLabelCoordinate<-min(dataForecast$forecast, na.rm=TRUE)+(max(dataForecast$forecast, na.rm=TRUE)-min(dataForecast$forecast, na.rm=TRUE))/2
supplyLabelCoordinate<-min(dataForecast$forecastSupply, na.rm=TRUE)+(max(dataForecast$forecastSupply, na.rm=TRUE)-min(dataForecast$forecastSupply, na.rm=TRUE))/2
dataForecast %>%
ggplot(aes(date, headcount)) +
geom_line(aes(x = date, y = headcount),na.rm=TRUE,color="#878787", size=1) +
geom_point(aes(x = date, y = headcount),na.rm=TRUE,color="#878787",size=1) +
geom_line(aes(x = date, y = forecast),na.rm=TRUE,color="#0868AC", size=1) +
geom_line(aes(x = date, y = forecastSupply),na.rm=TRUE,color="#FB8072", size=1) +
theme_fivethirtyeight() +
ylab("Headcount") +
#labs(title="Projected Supply and Demand") +
theme(axis.title.y = element_text()) +
annotate("text", x = median(dataForecast$date)+265, y = demandLabelCoordinate, label = "Demand", color="#0868AC") +
annotate("text", x = median(dataForecast$date)+265, y = supplyLabelCoordinate, label = "Supply", color="#FB8072")
}
#makePlot(busAForecast,"Business A")
### develop plotly chart
diversity<- data.frame(
period=c(rep('prevYear',4),rep('forecast',4)),
gender=rep(c('Female','Female','Male','Male'),2),
mgr=rep(c('Manager','Individual Contributor'),4),
count=c(91,1097,518,1341,227,874,719,1092)) %>%
mutate(gender=as.factor(gender)) %>%
mutate(gender=relevel(gender, "Male")) %>%
group_by(period,mgr) %>%
mutate(total=sum(count)) %>%
ungroup() %>%
mutate(countPect=round(count/total,3)) %>%
group_by(mgr,period) %>%
mutate(label_y = cumsum(countPect) - 0.5 * countPect)
str(diversity)
colors1<-c("#9970AB","#5AAE61")
myColors<-colors1
names(myColors) <- c('Male','Female')
percent <- function(x, digits = 1, format = "f", ...) {
paste0(formatC(100 * x, format = format, digits = digits, ...), "%")
}
#ggplot(diversity,aes(x = period, y = countPect, fill=forcats::fct_rev(gender))) +
ggplot(diversity,aes(x = period, y = countPect, fill=gender)) +
geom_bar(stat='identity') +
scale_fill_manual(values=myColors) +
coord_flip() +
facet_grid(. ~ mgr) +
scale_y_continuous("Percent of Total Headcount",labels = scales::percent_format()) +
scale_x_discrete('',labels=c('Projected \n (2023 Year End)','Current \n (2020 Year End)')) +
labs(title = 'Gender Representation', subtitle = 'Current and Projected') +
#theme(plot.title = element_text(hjust = 0.5)) +
theme(legend.position = c(.85, 0.9), legend.title = element_blank()) +
theme(plot.background = element_rect(colour = "black",size = 1)) +
geom_text(aes(y = label_y, label = percent(countPect)), colour = "white") +
geom_hline(yintercept=.25)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/format_triangle.R
\name{format_triangle_quarter}
\alias{format_triangle_quarter}
\title{Format labels for quarter Lexis triangles}
\usage{
format_triangle_quarter(x, age, break_max = 400, open_last = TRUE)
}
\arguments{
\item{x}{A vector of Lexis triangle labels.}
\item{age}{A vector of age groups, the same length
as \code{x}.}
\item{break_max}{An integer or \code{NULL}.
Defaults to 400.}
\item{open_last}{Whether the final age group
has no upper limit. Defaults to \code{TRUE}.}
}
\value{
A factor with the same length as
\code{x}.
}
\description{
Format labels for one-quarter (three-month)
Lexis triangles to be used with one-quarter
age groups and periods.
}
\details{
\code{age} gives the age group to which each triangle
in \code{x} belongs. All age groups in \code{age}
must have a width of one quarter,
except for any open age groups.
\code{open_last} determines whether the
allocation of triangles needs to
account for an open age group, and \code{break_max}
specifies the cut-off for the open age group.
See \code{\link{format_age_quarter}} for a description
of how \code{open_last} and \code{break_max}
control age groups.
When \code{break_max} is \code{NULL},
the return value from \code{format_triangle_year}
is identical to \code{x}. When \code{break_max}
is non-\code{NULL}, the return value is as follows.
\tabular{lll}{
\code{x} \tab \code{age} \tab return value \cr
\code{"Lower"} \tab \code{<= break_max} \tab \code{"Lower"} \cr
\code{"Lower"} \tab \code{> break_max} \tab \code{"Upper"} \cr
\code{"Lower"} \tab \code{NA} \tab \code{NA} \cr
\code{"Upper"} \tab \code{<= break_max} \tab \code{"Upper"} \cr
\code{"Upper"} \tab \code{> break_max} \tab \code{"Upper"} \cr
\code{"Upper"} \tab \code{NA} \tab \code{"Upper"} \cr
\code{NA} \tab \code{<= break_max} \tab \code{NA} \cr
\code{NA} \tab \code{> break_max} \tab \code{"Upper"} \cr
\code{NA} \tab \code{NA} \tab \code{NA} \cr
}
}
\examples{
## we construct 'x' and 'age' from
## dates information ourselves before
## calling 'format_triangle_quarter'
date_original <- c("2024-03-27", "2022-11-09")
dob_original <- "2020-01-01"
x <- date_to_triangle_quarter(date = date_original,
dob = dob_original)
age <- date_to_age_quarter(date = date_original,
dob = dob_original)
format_triangle_quarter(x = x,
age = age)
## someone else has constructed
## 'x' and 'age' from
## dates information
x_processed <- c("Lower", "Lower", "Lower")
age_processed <- c("10", "16+", "5")
format_triangle_quarter(x = x_processed,
age = age_processed,
break_max = NULL)
## alternative value for 'break_max'
format_triangle_quarter(x = x_processed,
age = age_processed,
break_max = NULL)
}
\seealso{
Other functions for reformating
triangle labels are
\code{\link{format_triangle_year}},
\code{\link{format_triangle_multi}},
\code{\link{format_triangle_births}},
and \code{\link{format_triangle_month}}.
\code{\link{date_to_triangle_quarter}} creates
one-quarter Lexis triangles from dates.
}
| /man/format_triangle_quarter.Rd | permissive | bayesiandemography/demprep | R | false | true | 3,227 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/format_triangle.R
\name{format_triangle_quarter}
\alias{format_triangle_quarter}
\title{Format labels for quarter Lexis triangles}
\usage{
format_triangle_quarter(x, age, break_max = 400, open_last = TRUE)
}
\arguments{
\item{x}{A vector of Lexis triangle labels.}
\item{age}{A vector of age groups, the same length
as \code{x}.}
\item{break_max}{An integer or \code{NULL}.
Defaults to 400.}
\item{open_last}{Whether the final age group
has no upper limit. Defaults to \code{TRUE}.}
}
\value{
A factor with the same length as
\code{x}.
}
\description{
Format labels for one-quarter (three-month)
Lexis triangles to be used with one-quarter
age groups and periods.
}
\details{
\code{age} gives the age group to which each triangle
in \code{x} belongs. All age groups in \code{age}
must have a width of one quarter,
except for any open age groups.
\code{open_last} determines whether the
allocation of triangles needs to
account for an open age group, and \code{break_max}
specifies the cut-off for the open age group.
See \code{\link{format_age_quarter}} for a description
of how \code{open_last} and \code{break_max}
control age groups.
When \code{break_max} is \code{NULL},
the return value from \code{format_triangle_year}
is identical to \code{x}. When \code{break_max}
is non-\code{NULL}, the return value is as follows.
\tabular{lll}{
\code{x} \tab \code{age} \tab return value \cr
\code{"Lower"} \tab \code{<= break_max} \tab \code{"Lower"} \cr
\code{"Lower"} \tab \code{> break_max} \tab \code{"Upper"} \cr
\code{"Lower"} \tab \code{NA} \tab \code{NA} \cr
\code{"Upper"} \tab \code{<= break_max} \tab \code{"Upper"} \cr
\code{"Upper"} \tab \code{> break_max} \tab \code{"Upper"} \cr
\code{"Upper"} \tab \code{NA} \tab \code{"Upper"} \cr
\code{NA} \tab \code{<= break_max} \tab \code{NA} \cr
\code{NA} \tab \code{> break_max} \tab \code{"Upper"} \cr
\code{NA} \tab \code{NA} \tab \code{NA} \cr
}
}
\examples{
## we construct 'x' and 'age' from
## dates information ourselves before
## calling 'format_triangle_quarter'
date_original <- c("2024-03-27", "2022-11-09")
dob_original <- "2020-01-01"
x <- date_to_triangle_quarter(date = date_original,
dob = dob_original)
age <- date_to_age_quarter(date = date_original,
dob = dob_original)
format_triangle_quarter(x = x,
age = age)
## someone else has constructed
## 'x' and 'age' from
## dates information
x_processed <- c("Lower", "Lower", "Lower")
age_processed <- c("10", "16+", "5")
format_triangle_quarter(x = x_processed,
age = age_processed,
break_max = NULL)
## alternative value for 'break_max'
format_triangle_quarter(x = x_processed,
age = age_processed,
break_max = NULL)
}
\seealso{
Other functions for reformating
triangle labels are
\code{\link{format_triangle_year}},
\code{\link{format_triangle_multi}},
\code{\link{format_triangle_births}},
and \code{\link{format_triangle_month}}.
\code{\link{date_to_triangle_quarter}} creates
one-quarter Lexis triangles from dates.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/extractor.R
\name{tableXMLs}
\alias{tableXMLs}
\title{Return xml links by organization}
\usage{
tableXMLs(filepath = NULL)
}
\description{
Returns a list of two tables: a Publishers table with details on each publisher, and an XMLs table with links to all available xmls with the associated name and publisher.
}
| /IATIextractor/man/tableXMLs.Rd | no_license | Humanitarian-AI/IATI-Extractor | R | false | true | 391 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/extractor.R
\name{tableXMLs}
\alias{tableXMLs}
\title{Return xml links by organization}
\usage{
tableXMLs(filepath = NULL)
}
\description{
Returns a list of two tables: a Publishers table with details on each publisher, and an XMLs table with links to all available xmls with the associated name and publisher.
}
|
# example 1.1 of section 1.2.3
# (example 1.1 of section 1.2.3) : The data science process : Stages of a data science project : Modeling
# Title: Building a decision tree
library('rpart')
load('GCDData.RData')
model <- rpart(Good.Loan ~
Duration.in.month +
Installment.rate.in.percentage.of.disposable.income +
Credit.amount +
Other.installment.plans,
data=d,
control=rpart.control(maxdepth=4),
method="class")
# example 1.2 of section 1.2.4
# (example 1.2 of section 1.2.4) : The data science process : Stages of a data science project : Model evaluation and critique
# Title: Plotting the confusion matrix
creditdata <- d
resultframe <- data.frame(Good.Loan=creditdata$Good.Loan,
pred=predict(model, type="class"))
rtab <- table(resultframe) # Note: 1
rtab
## pred
## Good.Loan BadLoan GoodLoan
## BadLoan 41 259
## GoodLoan 13 687
sum(diag(rtab))/sum(rtab) # Note: 2
## [1] 0.728
sum(rtab[1,1])/sum(rtab[,1]) # Note: 3
## [1] 0.7592593
sum(rtab[1,1])/sum(rtab[1,]) # Note: 4
## [1] 0.1366667
sum(rtab[2,1])/sum(rtab[2,]) # Note: 5
## [1] 0.01857143
# Note 1:
# Create the confusion matrix. Rows represent
# actual loan status; columns represent predicted
# loan status. The diagonal entries represent
# correct predictions.
# Note 2:
# accuracyconfusion matrixOverall model accuracy: 73% of the predictions
# were correct.
# Note 3:
# precisionconfusion matrixModel precision: 76% of the applicants
# predicted as bad really did default.
# Note 4:
# recallconfusion matrixModel recall: the model found 14% of the
# defaulting loans.
# Note 5:
# false positive rateconfusion matrixFalse positive rate: 2% of the good applicants
# were mistakenly identified as bad.
# example 1.3 of section 1.3.1
# (example 1.3 of section 1.3.1) : The data science process : Setting expectations : Determining lower and upper bounds on model performance
# Title: Plotting the relation between disposable income and loan outcome
tab1 <- as.table(matrix(data=c(50,6,0,44),nrow=2,ncol=2))
dimnames(tab1) <- list('loan.as.pct.disposable.income'=
c('LT.15pct','GT.15pct'),
'loan.quality.pop1'=
c('goodloan','badloan'))
tab2 <- as.table(matrix(data=c(34,18,16,32),nrow=2,ncol=2))
dimnames(tab2) <- list('loan.as.pct.disposable.income'=
c('LT.15pct','GT.15pct'),
'loan.quality.pop2'=
c('goodloan','badloan'))
tab1
## loan.quality.pop1 # Note: 1
## loan.as.pct.disposable.income goodloan badloan
## LT.15pct 50 0
## GT.15pct 6 44
sum(diag(tab1))/sum(tab1) # Note: 2
## [1] 0.94
tab2
## loan.quality.pop2 # Note: 3
## loan.as.pct.disposable.income goodloan badloan
## LT.15pct 34 16
## GT.15pct 18 32
sum(diag(tab2))/sum(tab2)
## [1] 0.66 # Note: 4
# Note 1:
# The count of correct predictions is on the
# diagonal of tab1. In this first population, all
# the loans that were less than 15% of disposable
# income were good loans, and all but six of the
# loans that were greater than 15% of disposable
# income defaulted. So you know that
# loan.as.pct.disposable.income models loan quality
# well in this population. Or as statisticians might
# say, loan.as.pct.disposable.income “explains” the
# output (loan quality).
# Note 2:
# In fact, it’s 94% accurate.
# Note 3:
# In the second population, about a third of
# the loans that were less than 15% of disposable
# income defaulted, and over half of the loans that
# were greater than 15% of disposable income were
# good. So you know that
# loan.as.pct.disposable.income doesn’t model loan
# quality well in this population.
# Note 4:
# The rule of thumb is only 66%
# accurate.
# example 2.1 of section 2.1.1
# (example 2.1 of section 2.1.1) : Loading data into R : Working with data from files : Working with well-structured data from files or URLs
# Title: Reading the UCI car data
uciCar <- read.table( # Note: 1
'http://www.win-vector.com/dfiles/car.data.csv', # Note: 2
sep=',', # Note: 3
header=T # Note: 4
)
# Note 1:
# Command to read from a file or URL and store the result in a new data frame object
# called
# uciCar.
# Note 2:
# Filename or URL to get the data from.
# Note 3:
# Specify the column or field separator as a
# comma.
# Note 4:
# Tell R to expect a header line that defines
# the data column names.
# example 2.2 of section 2.1.1
# (example 2.2 of section 2.1.1) : Loading data into R : Working with data from files : Working with well-structured data from files or URLs
# Title: Exploring the car data
class(uciCar)
## [1] "data.frame" # Note: 1
summary(uciCar)
## buying maint doors
## high :432 high :432 2 :432
## low :432 low :432 3 :432
## med :432 med :432 4 :432
## vhigh:432 vhigh:432 5more:432
##
## persons lug_boot safety
## 2 :576 big :576 high:576
## 4 :576 med :576 low :576
## more:576 small:576 med :576
##
## rating
## acc : 384
## good : 69
## unacc:1210
## vgood: 65
dim(uciCar)
## [1] 1728 7 # Note: 2
# Note 1:
# The loaded object uciCar is of type data.frame.
# Note 2:
# The [1] is just an output sequence
# marker. The actual information is this: uciCar has
# 1728 rows and 7 columns. Always try to confirm you
# got a good parse by at least checking that the
# number of rows is exactly one fewer than the
# number of lines of text in the original file. The
# difference of one is because the column header
# counts as a line, but not as a data row.
# example 2.3 of section 2.1.2
# (example 2.3 of section 2.1.2) : Loading data into R : Working with data from files : Using R on less-structured data
# Title: Loading the credit dataset
d <- read.table(paste('http://archive.ics.uci.edu/ml/',
'machine-learning-databases/statlog/german/german.data',sep=''),
stringsAsFactors=F,header=F)
print(d[1:3,])
# example 2.4 of section 2.1.2
# (example 2.4 of section 2.1.2) : Loading data into R : Working with data from files : Using R on less-structured data
# Title: Setting column names
colnames(d) <- c('Status.of.existing.checking.account',
'Duration.in.month', 'Credit.history', 'Purpose',
'Credit.amount', 'Savings account/bonds',
'Present.employment.since',
'Installment.rate.in.percentage.of.disposable.income',
'Personal.status.and.sex', 'Other.debtors/guarantors',
'Present.residence.since', 'Property', 'Age.in.years',
'Other.installment.plans', 'Housing',
'Number.of.existing.credits.at.this.bank', 'Job',
'Number.of.people.being.liable.to.provide.maintenance.for',
'Telephone', 'foreign.worker', 'Good.Loan')
d$Good.Loan <- as.factor(ifelse(d$Good.Loan==1,'GoodLoan','BadLoan'))
print(d[1:3,])
# example 2.5 of section 2.1.2
# (example 2.5 of section 2.1.2) : Loading data into R : Working with data from files : Using R on less-structured data
# Title: Building a map to interpret loan use codes
mapping <- list(
'A40'='car (new)',
'A41'='car (used)',
'A42'='furniture/equipment',
'A43'='radio/television',
'A44'='domestic appliances',
...
)
# example 2.6 of section 2.1.2
# (example 2.6 of section 2.1.2) : Loading data into R : Working with data from files : Using R on less-structured data
# Title: Transforming the car data
for(i in 1:(dim(d))[2]) { # Note: 1
if(class(d[,i])=='character') {
d[,i] <- as.factor(as.character(mapping[d[,i]])) # Note: 2
}
}
# Note 1:
# (dim(d))[2] is the number of columns
# in the data frame d.
# Note 2:
# Note that the indexing operator [] is vectorized. Each step in the for loop remaps an
# entire column of data through our list.
# example 2.7 of section 2.1.2
# (example 2.7 of section 2.1.2) : Loading data into R : Working with data from files : Using R on less-structured data
# Title: Summary of Good.Loan and Purpose
table(d$Purpose,d$Good.Loan)
## BadLoan GoodLoan
## business 34 63
## car (new) 89 145
## car (used) 17 86
## domestic appliances 4 8
## education 22 28
## furniture/equipment 58 123
## others 5 7
## radio/television 62 218
## repairs 8 14
## retraining 1 8
# example 2.11 of section 2.2.2
# (example 2.11 of section 2.2.2) : Loading data into R : Working with relational databases : Loading data from a database into R
# Title: Loading data into R from a relational database
options( java.parameters = "-Xmx2g" ) # Note: 1
library(RJDBC)
drv <- JDBC("org.h2.Driver", # Note: 2
"h2-1.3.176.jar", # Note: 3
identifier.quote="'") # Note: 4
options<-";LOG=0;CACHE_SIZE=65536;LOCK_MODE=0;UNDO_LOG=0"
conn <- dbConnect(drv,paste("jdbc:h2:./H2DB",options,sep=''),"u","u")
dhus <- dbGetQuery(conn,"SELECT * FROM hus WHERE ORIGRANDGROUP<=1") # Note: 5
dpus <- dbGetQuery(conn,"SELECT pus.* FROM pus WHERE pus.SERIALNO IN \
(SELECT DISTINCT hus.SERIALNO FROM hus \
WHERE hus.ORIGRANDGROUP<=1)") # Note: 6
dbDisconnect(conn) # Note: 7
save(dhus,dpus,file='phsample.RData') # Note: 8
# Note 1:
# Set Java option for extra memory before DB
# drivers are loaded.
# Note 2:
# Specify the name of the database driver, same
# as in our XML database configuration.
# Note 3:
# Specify where to find the implementation of
# the database driver.
# Note 4:
# SQL column names with mixed-case
# capitalization, special characters, or that
# collide with reserved words must be quoted. We
# specify single-quote as the quote we’ll use when
# quoting column names, which may different than the
# quote we use for SQL literals.
# Note 5:
# Create a data frame called dhus from *
# (everything) from the database table hus, taking
# only rows where ORGINRANGGROUP <= 1. The
# ORGINRANDGROUP column is a random integer from 0
# through 999 that SQL Screwdriver adds to the rows
# during data load to facilitate sampling. In this
# case, we’re taking 2/1000 of the data rows to get
# a small sample.
# Note 6:
# Create a data frame called dpus from the
# database table pus, taking only records that have
# a household ID in the set of household IDs we
# selected from households table hus.
# Note 7:
# Disconnect for the database.
# Note 8:
# Save the two data frames into a file named
# phsample.RData, which can be read in with load().
# Try help("save") or help("load") for more
# details.
# example 2.12 of section 2.2.3
# (example 2.12 of section 2.2.3) : Loading data into R : Working with relational databases : Working with the PUMS data
# Title: Selecting a subset of the Census data
load('phsample.RData')
psub = subset(dpus,with(dpus,(PINCP>1000)&(ESR==1)&
(PINCP<=250000)&(PERNP>1000)&(PERNP<=250000)&
(WKHP>=40)&(AGEP>=20)&(AGEP<=50)&
(PWGTP1>0)&(COW %in% (1:7))&(SCHL %in% (1:24)))) # Note: 1
# Note 1:
# Subset of data rows matching detailed
# employment conditions
# example 2.13 of section 2.2.3
# (example 2.13 of section 2.2.3) : Loading data into R : Working with relational databases : Working with the PUMS data
# Title: Recoding variables
psub$SEX = as.factor(ifelse(psub$SEX==1,'M','F')) # Note: 1
psub$SEX = relevel(psub$SEX,'M') # Note: 2
cowmap <- c("Employee of a private for-profit",
"Private not-for-profit employee",
"Local government employee",
"State government employee",
"Federal government employee",
"Self-employed not incorporated",
"Self-employed incorporated")
psub$COW = as.factor(cowmap[psub$COW]) # Note: 3
psub$COW = relevel(psub$COW,cowmap[1])
schlmap = c( # Note: 4
rep("no high school diploma",15),
"Regular high school diploma",
"GED or alternative credential",
"some college credit, no degree",
"some college credit, no degree",
"Associate's degree",
"Bachelor's degree",
"Master's degree",
"Professional degree",
"Doctorate degree")
psub$SCHL = as.factor(schlmap[psub$SCHL])
psub$SCHL = relevel(psub$SCHL,schlmap[1])
dtrain = subset(psub,ORIGRANDGROUP >= 500) # Note: 5
dtest = subset(psub,ORIGRANDGROUP < 500) # Note: 6
# Note 1:
# Reencode sex from 1/2 to M/F.
# Note 2:
# Make the reference sex M, so F encodes a
# difference from M in models.
# Note 3:
# Reencode class of worker info into a more
# readable form.
# Note 4:
# Reencode education info into a more readable
# form and fewer levels (merge all levels below high
# school into same encoding).
# Note 5:
# Subset of data rows used for model
# training.
# Note 6:
# Subset of data rows used for model
# testing.
# example 2.14 of section 2.2.3
# (example 2.14 of section 2.2.3) : Loading data into R : Working with relational databases : Working with the PUMS data
# Title: Summarizing the classifications of work
summary(dtrain$COW)
## Employee of a private for-profit Federal government employee
## 423 21
## Local government employee Private not-for-profit employee
## 39 55
## Self-employed incorporated Self-employed not incorporated
## 17 16
## State government employee
## 24
# example 3.1 of section 3.1
# (example 3.1 of section 3.1) : Exploring data : Using summary statistics to spot problems
# Title: The summary() command
custdata <- read.table('custdata.tsv',
header=TRUE,sep='\t')
summary(custdata)
## custid sex
## Min. : 2068 F:440
## 1st Qu.: 345667 M:560
## Median : 693403
## Mean : 698500
## 3rd Qu.:1044606
## Max. :1414286
##
## is.employed income # Note: 1
## Mode :logical Min. : -8700
## FALSE:73 1st Qu.: 14600
## TRUE :599 Median : 35000
## NA's :328 Mean : 53505
## 3rd Qu.: 67000
## Max. :615000
##
## marital.stat
## Divorced/Separated:155
## Married :516
## Never Married :233
## Widowed : 96
##
## health.ins # Note: 2
## Mode :logical
## FALSE:159
## TRUE :841
## NA's :0
##
## housing.type # Note: 3
## Homeowner free and clear :157
## Homeowner with mortgage/loan:412
## Occupied with no rent : 11
## Rented :364
## NA's : 56
##
## recent.move num.vehicles
## Mode :logical Min. :0.000
## FALSE:820 1st Qu.:1.000
## TRUE :124 Median :2.000
## NA's :56 Mean :1.916
## 3rd Qu.:2.000
## Max. :6.000
## NA's :56
##
## age state.of.res # Note: 4
## Min. : 0.0 California :100
## 1st Qu.: 38.0 New York : 71
## Median : 50.0 Pennsylvania: 70
## Mean : 51.7 Texas : 56
## 3rd Qu.: 64.0 Michigan : 52
## Max. :146.7 Ohio : 51
## (Other) :600
# Note 1:
# The variable is.employed is missing for
# about a third of the data. The variable income has negative values, which are
# potentially invalid.
# Note 2:
# About 84% of the customers have health
# insurance.
# Note 3:
# The variables housing.type, recent.move, and
# num.vehicles are each missing 56 values.
# Note 4:
# The average value of the variable age seems
# plausible, but the minimum and maximum values seem unlikely. The variable
# state.of.res is a categorical variable; summary() reports how many customers are in
# each state (for the first few states).
# example 3.3 of section 3.1.1
# (example 3.3 of section 3.1.1) : Exploring data : Using summary statistics to spot problems : Typical problems revealed by data summaries
# Title: Examples of invalid values and outliers
summary(custdata$income)
## Min. 1st Qu. Median Mean 3rd Qu.
## -8700 14600 35000 53500 67000 # Note: 1
## Max.
## 615000
summary(custdata$age)
## Min. 1st Qu. Median Mean 3rd Qu.
## 0.0 38.0 50.0 51.7 64.0 # Note: 2
## Max.
## 146.7
# Note 1:
# Negative values for income could indicate
# bad data. They might also have a special meaning, like “amount of
# debt.” Either way, you should check how prevalent the issue is,
# and decide what to do: Do you drop the data with negative income? Do you
# convert negative values to zero?
# Note 2:
# Customers of age zero, or customers of an
# age greater than about 110 are outliers. They fall out of the range of
# expected customer values. Outliers could be data input errors.
# They could be special sentinel values: zero might mean “age unknown” or
# “refuse to state.” And some of your customers might be especially
# long-lived.
# example 3.4 of section 3.1.1
# (example 3.4 of section 3.1.1) : Exploring data : Using summary statistics to spot problems : Typical problems revealed by data summaries
# Title: Looking at the data range of a variable
summary(custdata$income)
## Min. 1st Qu. Median Mean 3rd Qu.
## -8700 14600 35000 53500 67000 # Note: 1
## Max.
## 615000
# Note 1:
# Income ranges from zero to over half a million
# dollars; a very wide range.
# example 3.5 of section 3.1.1
# (example 3.5 of section 3.1.1) : Exploring data : Using summary statistics to spot problems : Typical problems revealed by data summaries
# Title: Checking units sounds silly, but mistakes can lead to spectacular errors if not caught
Income = custdata$income/1000
summary(Income) # Note: 1
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## -8.7 14.6 35.0 53.5 67.0 615.0
# Note 1:
# The variable Income is defined as Income = custdata$income/1000. But suppose you didn’t know
# that. Looking only at the summary, the values could plausibly be
# interpreted to mean either “hourly wage” or “yearly income in units
# of $1000.”
# example 3.6 of section 3.2.1
# (example 3.6 of section 3.2.1) : Exploring data : Spotting problems using graphics and visualization : Visually checking distributions for a single variable
# Title: Plotting a histogram
library(ggplot2) # Note: 1
ggplot(custdata) +
geom_histogram(aes(x=age),
binwidth=5, fill="gray") # Note: 2
# Note 1:
# Load the ggplot2 library, if you haven’t
# already done so.
# Note 2:
# binwidth parameterThe binwidth parameter tells the
# geom_histogram call how to make bins of five-year intervals (default is
# datarange/30). The fill parameter specifies the color of the histogram
# bars (default: black).
# example 3.7 of section 3.2.1
# (example 3.7 of section 3.2.1) : Exploring data : Spotting problems using graphics and visualization : Visually checking distributions for a single variable
# Title: Producing a density plot
library(scales) # Note: 1
ggplot(custdata) + geom_density(aes(x=income)) +
scale_x_continuous(labels=dollar) # Note: 2
# Note 1:
# The scales package brings in the dollar
# scale notation.
# Note 2:
# Set the x-axis labels to
# dollars.
# example 3.8 of section 3.2.1
# (example 3.8 of section 3.2.1) : Exploring data : Spotting problems using graphics and visualization : Visually checking distributions for a single variable
# Title: Creating a log-scaled density plot
ggplot(custdata) + geom_density(aes(x=income)) +
scale_x_log10(breaks=c(100,1000,10000,100000), labels=dollar) + # Note: 1
annotation_logticks(sides="bt") # Note: 2
# Note 1:
# Set the x-axis to be in log10 scale, with
# manually set tick points and labels as dollars.
# Note 2:
# Add log-scaled tick marks to the top and
# bottom of the graph.
# informalexample 3.2 of section 3.2.1
# (informalexample 3.2 of section 3.2.1) : Exploring data : Spotting problems using graphics and visualization : Visually checking distributions for a single variable
ggplot(custdata) + geom_bar(aes(x=marital.stat), fill="gray")
# example 3.9 of section 3.2.1
# (example 3.9 of section 3.2.1) : Exploring data : Spotting problems using graphics and visualization : Visually checking distributions for a single variable
# Title: Producing a horizontal bar chart
ggplot(custdata) +
geom_bar(aes(x=state.of.res), fill="gray") + # Note: 1
coord_flip() + # Note: 2
theme(axis.text.y=element_text(size=rel(0.8))) # Note: 3
# Note 1:
# Plot bar chart as before: state.of.res
# is on x axis, count is on y-axis.
# Note 2:
# Flip the x and y axes: state.of.res is
# now on the y-axis.
# Note 3:
# Reduce the size of the y-axis tick
# labels to 80% of default size for legibility.
# example 3.10 of section 3.2.1
# (example 3.10 of section 3.2.1) : Exploring data : Spotting problems using graphics and visualization : Visually checking distributions for a single variable
# Title: Producing a bar chart with sorted categories
statesums <- table(custdata$state.of.res) # Note: 1
statef <- as.data.frame(statesums) # Note: 2
colnames(statef)<-c("state.of.res", "count") # Note: 3
summary(statef) # Note: 4
## state.of.res count
## Alabama : 1 Min. : 1.00
## Alaska : 1 1st Qu.: 5.00
## Arizona : 1 Median : 12.00
## Arkansas : 1 Mean : 20.00
## California: 1 3rd Qu.: 26.25
## Colorado : 1 Max. :100.00
## (Other) :44
statef <- transform(statef,
state.of.res=reorder(state.of.res, count)) # Note: 5
summary(statef) # Note: 6
## state.of.res count
## Delaware : 1 Min. : 1.00
## North Dakota: 1 1st Qu.: 5.00
## Wyoming : 1 Median : 12.00
## Rhode Island: 1 Mean : 20.00
## Alaska : 1 3rd Qu.: 26.25
## Montana : 1 Max. :100.00
## (Other) :44
ggplot(statef)+ geom_bar(aes(x=state.of.res,y=count),
stat="identity", # Note: 7
fill="gray") +
coord_flip() + # Note: 8
theme(axis.text.y=element_text(size=rel(0.8)))
# Note 1:
# The table() command aggregates the data by state of residence—exactly the information the bar
# chart plots.
# Note 2:
# Convert the table to a data frame. The default column names are Var1 and Freq.
# Note 3:
# Rename the columns for readability.
# Note 4:
# Notice that the default ordering for the
# state.of.res variable is alphabetical.
# Note 5:
# Use the reorder() function to set the
# state.of.res variable to be count ordered. Use the transform() function
# to apply the transformation to the state.of.res data frame.
# Note 6:
# The state.of.res variable is now count
# ordered.
# Note 7:
# Since the data is being passed to
# geom_bar pre-aggregated, specify both the x and
# y variables, and use stat="identity" to plot the
# data exactly as given.
# Note 8:
# Flip the axes and reduce the size of the
# label text as before.
# example 3.11 of section 3.2.2
# (example 3.11 of section 3.2.2) : Exploring data : Spotting problems using graphics and visualization : Visually checking relationships between two variables
# Title: Producing a line plot
x <- runif(100) # Note: 1
y <- x^2 + 0.2*x # Note: 2
ggplot(data.frame(x=x,y=y), aes(x=x,y=y)) + geom_line() # Note: 3
# Note 1:
# First, generate the data for this example.
# The x variable is uniformly randomly distributed
# between 0 and 1.
# Note 2:
# The y variable is a
# quadratic function of x.
# Note 3:
# Plot the line plot.
# example 3.12 of section 3.2.2
# (example 3.12 of section 3.2.2) : Exploring data : Spotting problems using graphics and visualization : Visually checking relationships between two variables
# Title: Examining the correlation between age and income
custdata2 <- subset(custdata,
(custdata$age > 0 & custdata$age < 100
& custdata$income > 0)) # Note: 1
cor(custdata2$age, custdata2$income) # Note: 2
## [1] -0.02240845 # Note: 3
# Note 1:
# Only consider a subset of data with
# reasonable age and income values.
# Note 2:
# Get correlation of age and income.
# Note 3:
# Resulting correlation.
# informalexample 3.3 of section 3.2.2
# (informalexample 3.3 of section 3.2.2) : Exploring data : Spotting problems using graphics and visualization : Visually checking relationships between two variables
ggplot(custdata2, aes(x=age, y=income)) +
geom_point() + ylim(0, 200000)
# informalexample 3.4 of section 3.2.2
# (informalexample 3.4 of section 3.2.2) : Exploring data : Spotting problems using graphics and visualization : Visually checking relationships between two variables
ggplot(custdata2, aes(x=age, y=income)) + geom_point() +
stat_smooth(method="lm") +
ylim(0, 200000)
# informalexample 3.5 of section 3.2.2
# (informalexample 3.5 of section 3.2.2) : Exploring data : Spotting problems using graphics and visualization : Visually checking relationships between two variables
ggplot(custdata2, aes(x=age, y=income)) +
geom_point() + geom_smooth() +
ylim(0, 200000)
# example 3.13 of section 3.2.2
# (example 3.13 of section 3.2.2) : Exploring data : Spotting problems using graphics and visualization : Visually checking relationships between two variables
# Title: Plotting the distribution of health.ins as a function of age
ggplot(custdata2, aes(x=age, y=as.numeric(health.ins))) + # Note: 1
geom_point(position=position_jitter(w=0.05, h=0.05)) + # Note: 2
geom_smooth() # Note: 3
# Note 1:
# The Boolean variable health.ins must be
# converted to a 0/1 variable using as.numeric.
# Note 2:
# Since y values can
# only be 0 or 1, add a small jitter to get a sense of data
# density.
# Note 3:
# Add smoothing curve.
# example 3.14 of section 3.2.2
# (example 3.14 of section 3.2.2) : Exploring data : Spotting problems using graphics and visualization : Visually checking relationships between two variables
# Title: Producing a hexbin plot
library(hexbin) # Note: 1
ggplot(custdata2, aes(x=age, y=income)) +
geom_hex(binwidth=c(5, 10000)) + # Note: 2
geom_smooth(color="white", se=F) + # Note: 3
ylim(0,200000)
# Note 1:
# Load hexbin library.
# Note 2:
# Create hexbin with age binned into 5-year
# increments, income in increments of $10,000.
# Note 3:
# Add smoothing curve in white; suppress
# standard error ribbon (se=F).
# example 3.15 of section 3.2.2
# (example 3.15 of section 3.2.2) : Exploring data : Spotting problems using graphics and visualization : Visually checking relationships between two variables
# Title: Specifying different styles of bar chart
ggplot(custdata) + geom_bar(aes(x=marital.stat,
fill=health.ins)) # Note: 1
ggplot(custdata) + geom_bar(aes(x=marital.stat,
fill=health.ins),
position="dodge") # Note: 2
ggplot(custdata) + geom_bar(aes(x=marital.stat,
fill=health.ins),
position="fill") # Note: 3
# Note 1:
# Stacked bar chart, the
# default
# Note 2:
# Side-by-side bar chart
# Note 3:
# Filled bar chart
# example 3.16 of section 3.2.2
# (example 3.16 of section 3.2.2) : Exploring data : Spotting problems using graphics and visualization : Visually checking relationships between two variables
# Title: Plotting data with a rug
ggplot(custdata, aes(x=marital.stat)) +
geom_bar(aes(fill=health.ins), position="fill") +
geom_point(aes(y=-0.05), size=0.75, alpha=0.3, # Note: 1
position=position_jitter(h=0.01)) # Note: 2
# Note 1:
# Set the points just under the y-axis,
# three-quarters of default size, and make them slightly transparent with
# the alpha parameter.
# Note 2:
# Jitter the points slightly for
# legibility.
# example 3.17 of section 3.2.2
# (example 3.17 of section 3.2.2) : Exploring data : Spotting problems using graphics and visualization : Visually checking relationships between two variables
# Title: Plotting a bar chart with and without facets
ggplot(custdata2) + # Note: 1
geom_bar(aes(x=housing.type, fill=marital.stat ),
position="dodge") +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) # Note: 2
ggplot(custdata2) + # Note: 3
geom_bar(aes(x=marital.stat), position="dodge",
fill="darkgray") +
facet_wrap(~housing.type, scales="free_y") + # Note: 4
theme(axis.text.x = element_text(angle = 45, hjust = 1)) # Note: 5
# Note 1:
# Side-by-side bar chart.
# Note 2:
# coord_flip commandTilt the x-axis labels so they
# don’t overlap. You can also use coord_flip() to rotate the graph, as we
# saw previously. Some prefer coord_flip() because the theme() layer is
# complicated to use.
# Note 3:
# The faceted bar chart.
# Note 4:
# Facet the graph by housing.type. The scales="free_y" argument specifies that each facet has
# an independently scaled y-axis (the default is that all facets have
# the same scales on both axes). The argument free_x would free the
# x-axis scaling, and the argument free frees both axes.
# Note 5:
# As of this writing,
# facet_wrap is incompatible with coord_flip, so we have to tilt the
# x-axis labels.
# example 4.1 of section 4.1.1
# (example 4.1 of section 4.1.1) : Managing data : Cleaning data : Treating missing values (NAs)
# Title: Checking locations of missing data
custdata <- read.table('custdata.tsv',
header=TRUE,sep='\t')
summary(custdata[is.na(custdata$housing.type), # Note: 1
c("recent.move","num.vehicles")]) # Note: 2
## recent.move num.vehicles # Note: 3
## Mode:logical Min. : NA
## NA's:56 1st Qu.: NA
## Median : NA
## Mean :NaN
## 3rd Qu.: NA
## Max. : NA
## NA's :56
# Note 1:
# Restrict to the rows where housing.type is
# NA.
# Note 2:
# Look only at the columns recent.move and
# num.vehicles.
# Note 3:
# The output: all NAs. All the missing data
# comes from the same rows.
# example 4.2 of section 4.1.1
# (example 4.2 of section 4.1.1) : Managing data : Cleaning data : Treating missing values (NAs)
# Title: Remapping NA to a level
custdata$is.employed.fix <- ifelse(is.na(custdata$is.employed), # Note: 1
"missing", # Note: 2
ifelse(custdata$is.employed==T, # Note: 3
"employed",
"not employed")) # Note: 4
summary(as.factor(custdata$is.employed.fix)) # Note: 5
## employed missing not employed
## 599 328 73
# Note 1:
# If is.employed value is missing...
# Note 2:
# ...assign the value "missing".
# Otherwise...
# Note 3:
# ...if is.employed==TRUE, assign the value
# "employed"...
# Note 4:
# ...or the value "not employed".
# Note 5:
# The transformation has turned the variable
# type from factor to string. You can change it back
# with the as.factor() function.
# informalexample 4.1 of section 4.1.1
# (informalexample 4.1 of section 4.1.1) : Managing data : Cleaning data : Treating missing values (NAs)
custdata$is.employed.fix <- ifelse(is.na(custdata$is.employed),
"not in active workforce",
ifelse(custdata$is.employed==T,
"employed",
"not employed"))
# informalexample 4.2 of section 4.1.1
# (informalexample 4.2 of section 4.1.1) : Managing data : Cleaning data : Treating missing values (NAs)
summary(custdata$Income)
## Min. 1st Qu. Median Mean 3rd Qu. Max. NA's
## 0 25000 45000 66200 82000 615000 328
# informalexample 4.3 of section 4.1.1
# (informalexample 4.3 of section 4.1.1) : Managing data : Cleaning data : Treating missing values (NAs)
meanIncome <- mean(custdata$Income, na.rm=T) # Note: 1
Income.fix <- ifelse(is.na(custdata$Income),
meanIncome,
custdata$Income)
summary(Income.fix)
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 0 35000 66200 66200 66200 615000
# Note 1:
# Don’t forget the argument "na.rm=T"!
# Otherwise, the mean() function will include the
# NAs by default, and meanIncome will be NA.
# example 4.3 of section 4.1.1
# (example 4.3 of section 4.1.1) : Managing data : Cleaning data : Treating missing values (NAs)
# Title: Converting missing numeric data to a level
breaks <-c(0, 10000, 50000, 100000, 250000, 1000000) # Note: 1
Income.groups <- cut(custdata$income,
breaks=breaks, include.lowest=T) # Note: 2
summary(Income.groups) # Note: 3
## [0,1e+04] (1e+04,5e+04] (5e+04,1e+05] (1e+05,2.5e+05] (2.5e+05,1e+06]
## 63 312 178 98 21
## NA's
## 328
Income.groups <- as.character(Income.groups) # Note: 4
Income.groups <- ifelse(is.na(Income.groups), # Note: 5
"no income", Income.groups)
summary(as.factor(Income.groups))
## (1e+04,5e+04] (1e+05,2.5e+05] (2.5e+05,1e+06] (5e+04,1e+05] [0,1e+04]
## 312 98 21 178 63
## no income
## 328
# Note 1:
# Select some income ranges of interest. To
# use the cut() function, the upper and lower bounds
# should encompass the full income range of the
# data.
# Note 2:
# Cut the data into income ranges. The
# include.lowest=T argument makes sure that zero
# income data is included in the lowest income range
# category. By default it would be excluded.
# Note 3:
# The cut() function produces factor
# variables. Note the NAs are preserved.
# Note 4:
# To preserve the category names before adding
# a new category, convert the variables to strings.
# Note 5:
# Add the "no income" category to replace the
# NAs.
# example 4.4 of section 4.1.1
# (example 4.4 of section 4.1.1) : Managing data : Cleaning data : Treating missing values (NAs)
# Title: Tracking original NAs with an extra categorical variable
missingIncome <- is.na(custdata$Income) # Note: 1
Income.fix <- ifelse(is.na(custdata$Income), 0, custdata$Income) # Note: 2
# Note 1:
# The missingIncome variable lets you
# differentiate the two kinds of zeros in the data:
# the ones that you are about to add, and the ones
# that were already there.
# Note 2:
# Replace the NAs with zeros.
# example 4.5 of section 4.1.2
# (example 4.5 of section 4.1.2) : Managing data : Cleaning data : Data transformations
# Title: Normalizing income by state
medianincome <- aggregate(income~state.of.res,custdata,FUN=median)
colnames(medianincome) <- c('State','Median.Income')
summary(medianincome) # Note: 1
## State Median.Income
## : 1 Min. :37427
## Alabama : 1 1st Qu.:47483
## Alaska : 1 Median :52274
## Arizona : 1 Mean :52655
## Arkansas : 1 3rd Qu.:57195
## California: 1 Max. :68187
## (Other) :46
custdata <- merge(custdata, medianincome,
by.x="state.of.res", by.y="State") # Note: 2
summary(custdata[,c("state.of.res", "income", "Median.Income")]) # Note: 3
## state.of.res income Median.Income
## California :100 Min. : -8700 Min. :37427
## New York : 71 1st Qu.: 14600 1st Qu.:44819
## Pennsylvania: 70 Median : 35000 Median :50977
## Texas : 56 Mean : 53505 Mean :51161
## Michigan : 52 3rd Qu.: 67000 3rd Qu.:55559
## Ohio : 51 Max. :615000 Max. :68187
## (Other) :600
custdata$income.norm <- with(custdata, income/Median.Income) # Note: 4
summary(custdata$income.norm)
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## -0.1791 0.2729 0.6992 1.0820 1.3120 11.6600
# Note 1:
# medianincome is a data frame of median
# income by state.
# Note 2:
# Merge median income information into the
# custdata data frame by matching the column
# custdata$state.of.res to the column
# medianincome$State.
# Note 3:
# Median.Income is now part of custdata.
# Note 4:
# Normalize income by Median.Income.
# informalexample 4.4 of section 4.1.2
# (informalexample 4.4 of section 4.1.2) : Managing data : Cleaning data : Data transformations
custdata$income.lt.20K <- custdata$income < 20000
summary(custdata$income.lt.20K)
## Mode FALSE TRUE NA's
## logical 678 322 0
# example 4.6 of section 4.1.2
# (example 4.6 of section 4.1.2) : Managing data : Cleaning data : Data transformations
# Title: Converting age into ranges
brks <- c(0, 25, 65, Inf) # Note: 1
custdata$age.range <- cut(custdata$age,
breaks=brks, include.lowest=T) # Note: 2
summary(custdata$age.range) # Note: 3
## [0,25] (25,65] (65,Inf]
## 56 732 212
# Note 1:
# Select the age ranges of interest. The upper
# and lower bounds should encompass the full range
# of the data.
# Note 2:
# Cut the data into age ranges. The
# include.lowest=T argument makes sure that zero age
# data is included in the lowest age range category.
# By default it would be excluded.
# Note 3:
# The output of cut() is a factor variable.
# example 4.7 of section 4.1.2
# (example 4.7 of section 4.1.2) : Managing data : Cleaning data : Data transformations
# Title: Centering on mean age
summary(custdata$age)
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 0.0 38.0 50.0 51.7 64.0 146.7
meanage <- mean(custdata$age)
custdata$age.normalized <- custdata$age/meanage
summary(custdata$age.normalized)
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 0.0000 0.7350 0.9671 1.0000 1.2380 2.8370
# example 4.8 of section 4.1.2
# (example 4.8 of section 4.1.2) : Managing data : Cleaning data : Data transformations
# Title: Summarizing age
summary(custdata$age)
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 0.0 38.0 50.0 51.7 64.0 146.7
meanage <- mean(custdata$age) # Note: 1
stdage <- sd(custdata$age) # Note: 2
meanage
## [1] 51.69981
stdage
## [1] 18.86343
custdata$age.normalized <- (custdata$age-meanage)/stdage # Note: 3
summary(custdata$age.normalized)
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## -2.74100 -0.72630 -0.09011 0.00000 0.65210 5.03500
# Note 1:
# Take the mean.
# Note 2:
# Take the standard deviation.
# Note 3:
# Use the mean value as the origin (or
# reference point) and rescale the distance from the
# mean by the standard deviation.
# informalexample 4.5 of section 4.1.2
# (informalexample 4.5 of section 4.1.2) : Managing data : Cleaning data : Data transformations
signedlog10 <- function(x) {
ifelse(abs(x) <= 1, 0, sign(x)*log10(abs(x)))
}
# example 4.9 of section 4.2.2
# (example 4.9 of section 4.2.2) : Managing data : Sampling for modeling and validation : Creating a sample group column
# Title: Splitting into test and training using a random group mark
custdata$gp <- runif(dim(custdata)[1]) # Note: 1
testSet <- subset(custdata, custdata$gp <= 0.1) # Note: 2
trainingSet <- subset(custdata, custdata$gp > 0.1) # Note: 3
dim(testSet)[1]
## [1] 93
dim(trainingSet)[1]
## [1] 907
# Note 1:
# dim(custdata) returns the number of rows and
# columns of the data frame as a vector, so
# dim(custdata)[1] returns the number of rows.
# Note 2:
# Here we generate a test set of about 10% of
# the data (93 customers—a little over 9%, actually)
# and train on the remaining 90%.
# Note 3:
# Here we generate a training using the
# remaining data.
# example 4.10 of section 4.2.3
# (example 4.10 of section 4.2.3) : Managing data : Sampling for modeling and validation : Record grouping
# Title: Ensuring test/train split doesn’t split inside a household
hh <- unique(hhdata$household_id) # Note: 1
households <- data.frame(household_id = hh, gp = runif(length(hh))) # Note: 2
hhdata <- merge(hhdata, households, by="household_id") # Note: 3
# Note 1:
# Get all unique household IDs from your data
# frame.
# Note 2:
# Create a temporary data frame of household IDs
# and a uniformly random number from 0 to 1.
# Note 3:
# Merge new random sample group column back into
# original data frame.
# example 5.1 of section 5.2.1
# (example 5.1 of section 5.2.1) : Choosing and evaluating models : Evaluating models : Evaluating classification models
# Title: Building and applying a logistic regression spam model
spamD <- read.table('spamD.tsv',header=T,sep='\t')
spamTrain <- subset(spamD,spamD$rgroup>=10)
spamTest <- subset(spamD,spamD$rgroup<10)
spamVars <- setdiff(colnames(spamD),list('rgroup','spam'))
spamFormula <- as.formula(paste('spam=="spam"',
paste(spamVars,collapse=' + '),sep=' ~ '))
spamModel <- glm(spamFormula,family=binomial(link='logit'),
data=spamTrain)
spamTrain$pred <- predict(spamModel,newdata=spamTrain,
type='response')
spamTest$pred <- predict(spamModel,newdata=spamTest,
type='response')
print(with(spamTest,table(y=spam,glmPred=pred>0.5)))
## glmPred
## y FALSE TRUE
## non-spam 264 14
## spam 22 158
# example 5.2 of section 5.2.1
# (example 5.2 of section 5.2.1) : Choosing and evaluating models : Evaluating models : Evaluating classification models
# Title: Spam classifications
sample <- spamTest[c(7,35,224,327),c('spam','pred')]
print(sample)
## spam pred
## 115 spam 0.9903246227
## 361 spam 0.4800498077
## 2300 non-spam 0.0006846551
## 3428 non-spam 0.0001434345
# example 5.3 of section 5.2.1
# (example 5.3 of section 5.2.1) : Choosing and evaluating models : Evaluating models : Evaluating classification models
# Title: Spam confusion matrix
cM <- table(truth=spamTest$spam,prediction=spamTest$pred>0.5)
print(cM)
## prediction
## truth FALSE TRUE
## non-spam 264 14
## spam 22 158
# example 5.4 of section 5.2.1
# (example 5.4 of section 5.2.1) : Choosing and evaluating models : Evaluating models : Evaluating classification models
# Title: Entering data by hand
t <- as.table(matrix(data=c(288-1,17,1,13882-17),nrow=2,ncol=2))
rownames(t) <- rownames(cM)
colnames(t) <- colnames(cM)
print(t)
## FALSE TRUE
## non-spam 287 1
## spam 17 13865
# example 5.5 of section 5.2.2
# (example 5.5 of section 5.2.2) : Choosing and evaluating models : Evaluating models : Evaluating scoring models
# Title: Plotting residuals
d <- data.frame(y=(1:10)^2,x=1:10)
model <- lm(y~x,data=d)
d$prediction <- predict(model,newdata=d)
library('ggplot2')
ggplot(data=d) + geom_point(aes(x=x,y=y)) +
geom_line(aes(x=x,y=prediction),color='blue') +
geom_segment(aes(x=x,y=prediction,yend=y,xend=x)) +
scale_y_continuous('')
# example 5.6 of section 5.2.3
# (example 5.6 of section 5.2.3) : Choosing and evaluating models : Evaluating models : Evaluating probability models
# Title: Making a double density plot
ggplot(data=spamTest) +
geom_density(aes(x=pred,color=spam,linetype=spam))
# example 5.7 of section 5.2.3
# (example 5.7 of section 5.2.3) : Choosing and evaluating models : Evaluating models : Evaluating probability models
# Title: Plotting the receiver operating characteristic curve
library('ROCR')
eval <- prediction(spamTest$pred,spamTest$spam)
plot(performance(eval,"tpr","fpr"))
print(attributes(performance(eval,'auc'))$y.values[[1]])
## [1] 0.9660072
# example 5.8 of section 5.2.3
# (example 5.8 of section 5.2.3) : Choosing and evaluating models : Evaluating models : Evaluating probability models
# Title: Calculating log likelihood
sum(ifelse(spamTest$spam=='spam',
log(spamTest$pred),
log(1-spamTest$pred)))
## [1] -134.9478
sum(ifelse(spamTest$spam=='spam',
log(spamTest$pred),
log(1-spamTest$pred)))/dim(spamTest)[[1]]
## [1] -0.2946458
# example 5.9 of section 5.2.3
# (example 5.9 of section 5.2.3) : Choosing and evaluating models : Evaluating models : Evaluating probability models
# Title: Computing the null model’s log likelihood
pNull <- sum(ifelse(spamTest$spam=='spam',1,0))/dim(spamTest)[[1]]
sum(ifelse(spamTest$spam=='spam',1,0))*log(pNull) +
sum(ifelse(spamTest$spam=='spam',0,1))*log(1-pNull)
## [1] -306.8952
# example 5.10 of section 5.2.3
# (example 5.10 of section 5.2.3) : Choosing and evaluating models : Evaluating models : Evaluating probability models
# Title: Calculating entropy and conditional entropy
entropy <- function(x) { # Note: 1
xpos <- x[x>0]
scaled <- xpos/sum(xpos)
sum(-scaled*log(scaled,2))
}
print(entropy(table(spamTest$spam))) # Note: 2
## [1] 0.9667165
conditionalEntropy <- function(t) { # Note: 3
(sum(t[,1])*entropy(t[,1]) + sum(t[,2])*entropy(t[,2]))/sum(t)
}
print(conditionalEntropy(cM)) # Note: 4
## [1] 0.3971897
# Note 1:
# Define function that computes the entropy
# from list of outcome counts
# Note 2:
# Calculate entropy of spam/non-spam
# distribution
# Note 3:
# Function to calculate conditional or
# remaining entropy of spam distribution (rows)
# given prediction (columns)
# Note 4:
# Calculate conditional or remaining entropy
# of spam distribution given prediction
# example 5.11 of section 5.2.5
# (example 5.11 of section 5.2.5) : Choosing and evaluating models : Evaluating models : Evaluating clustering models
# Title: Clustering random data in the plane
set.seed(32297)
d <- data.frame(x=runif(100),y=runif(100))
clus <- kmeans(d,centers=5)
d$cluster <- clus$cluster
# example 5.12 of section 5.2.5
# (example 5.12 of section 5.2.5) : Choosing and evaluating models : Evaluating models : Evaluating clustering models
# Title: Plotting our clusters
library('ggplot2'); library('grDevices')
h <- do.call(rbind,
lapply(unique(clus$cluster),
function(c) { f <- subset(d,cluster==c); f[chull(f),]}))
ggplot() +
geom_text(data=d,aes(label=cluster,x=x,y=y,
color=cluster),size=3) +
geom_polygon(data=h,aes(x=x,y=y,group=cluster,fill=as.factor(cluster)),
alpha=0.4,linetype=0) +
theme(legend.position = "none")
# example 5.13 of section 5.2.5
# (example 5.13 of section 5.2.5) : Choosing and evaluating models : Evaluating models : Evaluating clustering models
# Title: Calculating the size of each cluster
table(d$cluster)
## 1 2 3 4 5
## 10 27 18 17 28
# example 5.14 of section 5.2.5
# (example 5.14 of section 5.2.5) : Choosing and evaluating models : Evaluating models : Evaluating clustering models
# Title: Calculating the typical distance between items in every pair of clusters
library('reshape2')
n <- dim(d)[[1]]
pairs <- data.frame(
ca = as.vector(outer(1:n,1:n,function(a,b) d[a,'cluster'])),
cb = as.vector(outer(1:n,1:n,function(a,b) d[b,'cluster'])),
dist = as.vector(outer(1:n,1:n,function(a,b)
sqrt((d[a,'x']-d[b,'x'])^2 + (d[a,'y']-d[b,'y'])^2)))
)
dcast(pairs,ca~cb,value.var='dist',mean)
## ca 1 2 3 4 5
## 1 1 0.1478480 0.6524103 0.3780785 0.4404508 0.7544134
## 2 2 0.6524103 0.2794181 0.5551967 0.4990632 0.5165320
## 3 3 0.3780785 0.5551967 0.2031272 0.6122986 0.4656730
## 4 4 0.4404508 0.4990632 0.6122986 0.2048268 0.8365336
## 5 5 0.7544134 0.5165320 0.4656730 0.8365336 0.2221314
# example 6.1 of section 6.1.1
# (example 6.1 of section 6.1.1) : Memorization methods : KDD and KDD Cup 2009 : Getting started with KDD Cup 2009 data
# Title: Preparing the KDD data for analysis
d <- read.table('orange_small_train.data.gz', # Note: 1
header=T,
sep='\t',
na.strings=c('NA','')) # Note: 2
churn <- read.table('orange_small_train_churn.labels.txt',
header=F,sep='\t') # Note: 3
d$churn <- churn$V1 # Note: 4
appetency <- read.table('orange_small_train_appetency.labels.txt',
header=F,sep='\t')
d$appetency <- appetency$V1 # Note: 5
upselling <- read.table('orange_small_train_upselling.labels.txt',
header=F,sep='\t')
d$upselling <- upselling$V1 # Note: 6
set.seed(729375) # Note: 7
d$rgroup <- runif(dim(d)[[1]])
dTrainAll <- subset(d,rgroup<=0.9)
dTest <- subset(d,rgroup>0.9) # Note: 8
outcomes=c('churn','appetency','upselling')
vars <- setdiff(colnames(dTrainAll),
c(outcomes,'rgroup'))
catVars <- vars[sapply(dTrainAll[,vars],class) %in%
c('factor','character')] # Note: 9
numericVars <- vars[sapply(dTrainAll[,vars],class) %in%
c('numeric','integer')] # Note: 10
rm(list=c('d','churn','appetency','upselling')) # Note: 11
outcome <- 'churn' # Note: 12
pos <- '1' # Note: 13
useForCal <- rbinom(n=dim(dTrainAll)[[1]],size=1,prob=0.1)>0 # Note: 14
dCal <- subset(dTrainAll,useForCal)
dTrain <- subset(dTrainAll,!useForCal)
# Note 1:
# Read the file of independent variables. All
# data from
# https://github.com/WinVector/zmPDSwR/tree/master/KDD2009.
# Note 2:
# Treat both NA and the empty string as missing
# data.
# Note 3:
# Read churn dependent variable.
# Note 4:
# Add churn as a new column.
# Note 5:
# Add appetency as a new column.
# Note 6:
# Add upselling as a new column.
# Note 7:
# By setting the seed to the pseudo-random
# number generator, we make our work reproducible:
# someone redoing it will see the exact same
# results.
# Note 8:
# Split data into train and test subsets.
# Note 9:
# Identify which features are categorical
# variables.
# Note 10:
# Identify which features are numeric
# variables.
# Note 11:
# Remove unneeded objects from workspace.
# Note 12:
# Choose which outcome to model (churn).
# Note 13:
# Choose which outcome is considered
# positive.
# Note 14:
# Further split training data into training and
# calibration.
# example 6.2 of section 6.2.1
# (example 6.2 of section 6.2.1) : Memorization methods : Building single-variable models : Using categorical features
# Title: Plotting churn grouped by variable 218 levels
table218 <- table(
Var218=dTrain[,'Var218'], # Note: 1
churn=dTrain[,outcome], # Note: 2
useNA='ifany') # Note: 3
print(table218)
## churn
## Var218 -1 1
## cJvF 19245 1220
## UYBR 17860 1618
## <NA> 423 152
# Note this listing was updated: 10-14-2014 as some of results in the book were
# accidentally from older code. Will update later listings as we go forward.
# Note 1:
# Tabulate levels of Var218.
# Note 2:
# Tabulate levels of churn outcome.
# Note 3:
# Include NA values in tabulation.
# example 6.3 of section 6.2.1
# (example 6.3 of section 6.2.1) : Memorization methods : Building single-variable models : Using categorical features
# Title: Churn rates grouped by variable 218 codes
print(table218[,2]/(table218[,1]+table218[,2]))
## cJvF UYBR <NA>
## 0.05994389 0.08223821 0.26523297
# example 6.4 of section 6.2.1
# (example 6.4 of section 6.2.1) : Memorization methods : Building single-variable models : Using categorical features
# Title: Function to build single-variable models for categorical variables
mkPredC <- function(outCol,varCol,appCol) { # Note: 1
pPos <- sum(outCol==pos)/length(outCol) # Note: 2
naTab <- table(as.factor(outCol[is.na(varCol)]))
pPosWna <- (naTab/sum(naTab))[pos] # Note: 3
vTab <- table(as.factor(outCol),varCol)
pPosWv <- (vTab[pos,]+1.0e-3*pPos)/(colSums(vTab)+1.0e-3) # Note: 4
pred <- pPosWv[appCol] # Note: 5
pred[is.na(appCol)] <- pPosWna # Note: 6
pred[is.na(pred)] <- pPos # Note: 7
pred # Note: 8
}
# Note 1:
# Given a vector of training outcomes (outCol),
# a categorical training variable (varCol), and a
# prediction variable (appCol), use outCol and
# varCol to build a single-variable model and then
# apply the model to appCol to get new
# predictions.
# Note 2:
# Get stats on how often outcome is positive
# during training.
# Note 3:
# Get stats on how often outcome is positive for
# NA values of variable during training.
# Note 4:
# Get stats on how often outcome is positive,
# conditioned on levels of training variable.
# Note 5:
# Make predictions by looking up levels of
# appCol.
# Note 6:
# Add in predictions for NA levels of
# appCol.
# Note 7:
# Add in predictions for levels of appCol that
# weren’t known during training.
# Note 8:
# Return vector of predictions.
# example 6.5 of section 6.2.1
# (example 6.5 of section 6.2.1) : Memorization methods : Building single-variable models : Using categorical features
# Title: Applying single-categorical variable models to all of our datasets
for(v in catVars) {
pi <- paste('pred',v,sep='')
dTrain[,pi] <- mkPredC(dTrain[,outcome],dTrain[,v],dTrain[,v])
dCal[,pi] <- mkPredC(dTrain[,outcome],dTrain[,v],dCal[,v])
dTest[,pi] <- mkPredC(dTrain[,outcome],dTrain[,v],dTest[,v])
}
# example 6.6 of section 6.2.1
# (example 6.6 of section 6.2.1) : Memorization methods : Building single-variable models : Using categorical features
# Title: Scoring categorical variables by AUC
library('ROCR')
calcAUC <- function(predcol,outcol) {
perf <- performance(prediction(predcol,outcol==pos),'auc')
as.numeric(perf@y.values)
}
for(v in catVars) {
pi <- paste('pred',v,sep='')
aucTrain <- calcAUC(dTrain[,pi],dTrain[,outcome])
if(aucTrain>=0.8) {
aucCal <- calcAUC(dCal[,pi],dCal[,outcome])
print(sprintf("%s, trainAUC: %4.3f calibrationAUC: %4.3f",
pi,aucTrain,aucCal))
}
}
## [1] "predVar200, trainAUC: 0.828 calibrationAUC: 0.527"
## [1] "predVar202, trainAUC: 0.829 calibrationAUC: 0.522"
## [1] "predVar214, trainAUC: 0.828 calibrationAUC: 0.527"
## [1] "predVar217, trainAUC: 0.898 calibrationAUC: 0.553"
# example 6.7 of section 6.2.2
# (example 6.7 of section 6.2.2) : Memorization methods : Building single-variable models : Using numeric features
# Title: Scoring numeric variables by AUC
mkPredN <- function(outCol,varCol,appCol) {
cuts <- unique(as.numeric(quantile(varCol,
probs=seq(0, 1, 0.1),na.rm=T)))
varC <- cut(varCol,cuts)
appC <- cut(appCol,cuts)
mkPredC(outCol,varC,appC)
}
for(v in numericVars) {
pi <- paste('pred',v,sep='')
dTrain[,pi] <- mkPredN(dTrain[,outcome],dTrain[,v],dTrain[,v])
dTest[,pi] <- mkPredN(dTrain[,outcome],dTrain[,v],dTest[,v])
dCal[,pi] <- mkPredN(dTrain[,outcome],dTrain[,v],dCal[,v])
aucTrain <- calcAUC(dTrain[,pi],dTrain[,outcome])
if(aucTrain>=0.55) {
aucCal <- calcAUC(dCal[,pi],dCal[,outcome])
print(sprintf("%s, trainAUC: %4.3f calibrationAUC: %4.3f",
pi,aucTrain,aucCal))
}
}
## [1] "predVar6, trainAUC: 0.557 calibrationAUC: 0.554"
## [1] "predVar7, trainAUC: 0.555 calibrationAUC: 0.565"
## [1] "predVar13, trainAUC: 0.568 calibrationAUC: 0.553"
## [1] "predVar73, trainAUC: 0.608 calibrationAUC: 0.616"
## [1] "predVar74, trainAUC: 0.574 calibrationAUC: 0.566"
## [1] "predVar81, trainAUC: 0.558 calibrationAUC: 0.542"
## [1] "predVar113, trainAUC: 0.557 calibrationAUC: 0.567"
## [1] "predVar126, trainAUC: 0.635 calibrationAUC: 0.629"
## [1] "predVar140, trainAUC: 0.561 calibrationAUC: 0.560"
## [1] "predVar189, trainAUC: 0.574 calibrationAUC: 0.599"
# example 6.8 of section 6.2.2
# (example 6.8 of section 6.2.2) : Memorization methods : Building single-variable models : Using numeric features
# Title: Plotting variable performance
library('ggplot2')
ggplot(data=dCal) +
geom_density(aes(x=predVar126,color=as.factor(churn)))
# example 6.9 of section 6.2.3
# (example 6.9 of section 6.2.3) : Memorization methods : Building single-variable models : Using cross-validation to estimate effects of overfitting
# Title: Running a repeated cross-validation experiment
var <- 'Var217'
aucs <- rep(0,100)
for(rep in 1:length(aucs)) { # Note: 1
useForCalRep <- rbinom(n=dim(dTrainAll)[[1]],size=1,prob=0.1)>0 # Note: 2
predRep <- mkPredC(dTrainAll[!useForCalRep,outcome], # Note: 3
dTrainAll[!useForCalRep,var],
dTrainAll[useForCalRep,var])
aucs[rep] <- calcAUC(predRep,dTrainAll[useForCalRep,outcome]) # Note: 4
}
mean(aucs)
## [1] 0.5556656
sd(aucs)
## [1] 0.01569345
# Note 1:
# For 100 iterations...
# Note 2:
# ...select a random subset of about 10% of the training data as hold-out set,...
# Note 3:
# ...use the random 90% of training data to train model and evaluate that model on hold-out
# set,...
# Note 4:
# ...calculate resulting model’s AUC using hold-out set; store that value and repeat.
# example 6.10 of section 6.2.3
# (example 6.10 of section 6.2.3) : Memorization methods : Building single-variable models : Using cross-validation to estimate effects of overfitting
# Title: Empirically cross-validating performance
fCross <- function() {
useForCalRep <- rbinom(n=dim(dTrainAll)[[1]],size=1,prob=0.1)>0
predRep <- mkPredC(dTrainAll[!useForCalRep,outcome],
dTrainAll[!useForCalRep,var],
dTrainAll[useForCalRep,var])
calcAUC(predRep,dTrainAll[useForCalRep,outcome])
}
aucs <- replicate(100,fCross())
# example 6.11 of section 6.3.1
# (example 6.11 of section 6.3.1) : Memorization methods : Building models using many variables : Variable selection
# Title: Basic variable selection
# Each variable we use represents a chance of explaining
# more of the outcome variation (a chance of building a better
# model) but also represents a possible source of noise and
# overfitting. To control this effect, we often preselect
# which subset of variables we’ll use to fit. Variable
# selection can be an important defensive modeling step even
# for types of models that “don’t need it” (as seen with
# decision trees in section 6.3.2). Listing 6.11 shows a
# hand-rolled variable selection loop where each variable is
# scored according to a deviance inspired score, where a
# variable is scored with a bonus proportional to the change
# in in scaled log likelihood of the training data. We could
# also try an AIC (Akaike information criterion) by
# subtracting a penalty proportional to the complexity of the
# variable (which in this case is 2^entropy for categorical
# variables and a stand-in of 1 for numeric variables). The
# score is a bit ad hoc, but tends to work well in selecting
# variables. Notice we’re using performance on the calibration
# set (not the training set) to pick variables. Note that we
# don’t use the test set for calibration; to do so lessens the
# reliability of the test set for model quality confirmation.
logLikelyhood <- function(outCol,predCol) { # Note: 1
sum(ifelse(outCol==pos,log(predCol),log(1-predCol)))
}
selVars <- c()
minStep <- 5
baseRateCheck <- logLikelyhood(dCal[,outcome],
sum(dCal[,outcome]==pos)/length(dCal[,outcome]))
for(v in catVars) { # Note: 2
pi <- paste('pred',v,sep='')
liCheck <- 2*((logLikelyhood(dCal[,outcome],dCal[,pi]) -
baseRateCheck))
if(liCheck>minStep) {
print(sprintf("%s, calibrationScore: %g",
pi,liCheck))
selVars <- c(selVars,pi)
}
}
for(v in numericVars) { # Note: 3
pi <- paste('pred',v,sep='')
liCheck <- 2*((logLikelyhood(dCal[,outcome],dCal[,pi]) -
baseRateCheck))
if(liCheck>=minStep) {
print(sprintf("%s, calibrationScore: %g",
pi,liCheck))
selVars <- c(selVars,pi)
}
}
# Note 1:
# Define a convenience function to compute log
# likelihood.
# Note 2:
# Run through categorical variables and pick
# based on a deviance improvement (related to
# difference in log likelihoods; see chapter
# 3).
# Note 3:
# Run through numeric variables and pick
# based on a deviance improvement.
# example 6.13 of section 6.3.2
# (example 6.13 of section 6.3.2) : Memorization methods : Building models using many variables : Using decision trees
# Title: Building a bad decision tree
library('rpart')
fV <- paste(outcome,'>0 ~ ',
paste(c(catVars,numericVars),collapse=' + '),sep='')
tmodel <- rpart(fV,data=dTrain)
print(calcAUC(predict(tmodel,newdata=dTrain),dTrain[,outcome]))
## [1] 0.9241265
print(calcAUC(predict(tmodel,newdata=dTest),dTest[,outcome]))
## [1] 0.5266172
print(calcAUC(predict(tmodel,newdata=dCal),dCal[,outcome]))
## [1] 0.5126917
# example 6.14 of section 6.3.2
# (example 6.14 of section 6.3.2) : Memorization methods : Building models using many variables : Using decision trees
# Title: Building another bad decision tree
tVars <- paste('pred',c(catVars,numericVars),sep='')
fV2 <- paste(outcome,'>0 ~ ',paste(tVars,collapse=' + '),sep='')
tmodel <- rpart(fV2,data=dTrain)
print(calcAUC(predict(tmodel,newdata=dTrain),dTrain[,outcome]))
## [1] 0.928669
print(calcAUC(predict(tmodel,newdata=dTest),dTest[,outcome]))
## [1] 0.5390648
print(calcAUC(predict(tmodel,newdata=dCal),dCal[,outcome]))
## [1] 0.5384152
# example 6.15 of section 6.3.2
# (example 6.15 of section 6.3.2) : Memorization methods : Building models using many variables : Using decision trees
# Title: Building yet another bad decision tree
tmodel <- rpart(fV2,data=dTrain,
control=rpart.control(cp=0.001,minsplit=1000,
minbucket=1000,maxdepth=5)
)
print(calcAUC(predict(tmodel,newdata=dTrain),dTrain[,outcome]))
## [1] 0.9421195
print(calcAUC(predict(tmodel,newdata=dTest),dTest[,outcome]))
## [1] 0.5794633
print(calcAUC(predict(tmodel,newdata=dCal),dCal[,outcome]))
## [1] 0.547967
# example 6.16 of section 6.3.2
# (example 6.16 of section 6.3.2) : Memorization methods : Building models using many variables : Using decision trees
# Title: Building a better decision tree
f <- paste(outcome,'>0 ~ ',paste(selVars,collapse=' + '),sep='')
tmodel <- rpart(f,data=dTrain,
control=rpart.control(cp=0.001,minsplit=1000,
minbucket=1000,maxdepth=5)
)
print(calcAUC(predict(tmodel,newdata=dTrain),dTrain[,outcome]))
## [1] 0.6906852
print(calcAUC(predict(tmodel,newdata=dTest),dTest[,outcome]))
## [1] 0.6843595
print(calcAUC(predict(tmodel,newdata=dCal),dCal[,outcome]))
## [1] 0.6669301
# example 6.17 of section 6.3.2
# (example 6.17 of section 6.3.2) : Memorization methods : Building models using many variables : Using decision trees
# Title: Printing the decision tree
print(tmodel)
## n= 40518
##
## node), split, n, deviance, yval
## * denotes terminal node
##
## 1) root 40518 2769.3550 0.07379436
## 2) predVar126< 0.07366888 18188 726.4097 0.04167583
## 4) predVar126< 0.04391312 8804 189.7251 0.02203544 *
## 5) predVar126>=0.04391312 9384 530.1023 0.06010230
## 10) predVar189< 0.08449448 8317 410.4571 0.05206204 *
## 11) predVar189>=0.08449448 1067 114.9166 0.12277410 *
## 3) predVar126>=0.07366888 22330 2008.9000 0.09995522
## 6) predVar212< 0.07944508 8386 484.2499 0.06153112
## 12) predVar73< 0.06813291 4084 167.5012 0.04285015 *
## 13) predVar73>=0.06813291 4302 313.9705 0.07926546 *
## 7) predVar212>=0.07944508 13944 1504.8230 0.12306370
## 14) predVar218< 0.07134103 6728 580.7390 0.09542212
## 28) predVar126< 0.1015407 3901 271.8426 0.07536529 *
## 29) predVar126>=0.1015407 2827 305.1617 0.12309870
## 58) predVar73< 0.07804522 1452 110.0826 0.08264463 *
## 59) predVar73>=0.07804522 1375 190.1935 0.16581820 *
## 15) predVar218>=0.07134103 7216 914.1502 0.14883590
## 30) predVar74< 0.0797246 2579 239.3579 0.10352850 *
## 31) predVar74>=0.0797246 4637 666.5538 0.17403490
## 62) predVar189< 0.06775545 1031 102.9486 0.11251210 *
## 63) predVar189>=0.06775545 3606 558.5871 0.19162510 *
# example 6.18 of section 6.3.2
# (example 6.18 of section 6.3.2) : Memorization methods : Building models using many variables : Using decision trees
# Title: Plotting the decision tree
par(cex=0.7)
plot(tmodel)
text(tmodel)
# example 6.19 of section 6.3.3
# (example 6.19 of section 6.3.3) : Memorization methods : Building models using many variables : Using nearest neighbor methods
# Title: Running k-nearest neighbors
library('class')
nK <- 200
knnTrain <- dTrain[,selVars] # Note: 1
knnCl <- dTrain[,outcome]==pos # Note: 2
knnPred <- function(df) { # Note: 3
knnDecision <- knn(knnTrain,df,knnCl,k=nK,prob=T)
ifelse(knnDecision==TRUE, # Note: 4
attributes(knnDecision)$prob,
1-(attributes(knnDecision)$prob))
}
print(calcAUC(knnPred(dTrain[,selVars]),dTrain[,outcome]))
## [1] 0.7443927
print(calcAUC(knnPred(dCal[,selVars]),dCal[,outcome]))
## [1] 0.7119394
print(calcAUC(knnPred(dTest[,selVars]),dTest[,outcome]))
## [1] 0.718256
# Note 1:
# Build a data frame with only the variables we
# wish to use for classification.
# Note 2:
# Build a vector with the known training
# outcomes.
# Note 3:
# Bind the knn() training function with our data
# in a new function.
# Note 4:
# Convert knn’s unfortunate convention of
# calculating probability as “proportion of the
# votes for the winning class” into the more useful
# “calculated probability of being a positive
# example.”
# example 6.20 of section 6.3.3
# (example 6.20 of section 6.3.3) : Memorization methods : Building models using many variables : Using nearest neighbor methods
# Title: Platting 200-nearest neighbor performance
dCal$kpred <- knnPred(dCal[,selVars])
ggplot(data=dCal) +
geom_density(aes(x=kpred,
color=as.factor(churn),linetype=as.factor(churn)))
# example 6.21 of section 6.3.3
# (example 6.21 of section 6.3.3) : Memorization methods : Building models using many variables : Using nearest neighbor methods
# Title: Plotting the receiver operating characteristic curve
plotROC <- function(predcol,outcol) {
perf <- performance(prediction(predcol,outcol==pos),'tpr','fpr')
pf <- data.frame(
FalsePositiveRate=perf@x.values[[1]],
TruePositiveRate=perf@y.values[[1]])
ggplot() +
geom_line(data=pf,aes(x=FalsePositiveRate,y=TruePositiveRate)) +
geom_line(aes(x=c(0,1),y=c(0,1)))
}
print(plotROC(knnPred(dTest[,selVars]),dTest[,outcome]))
# example 6.22 of section 6.3.3
# (example 6.22 of section 6.3.3) : Memorization methods : Building models using many variables : Using nearest neighbor methods
# Title: Plotting the performance of a logistic regression model
gmodel <- glm(as.formula(f),data=dTrain,family=binomial(link='logit'))
print(calcAUC(predict(gmodel,newdata=dTrain),dTrain[,outcome]))
## [1] 0.7309537
print(calcAUC(predict(gmodel,newdata=dTest),dTest[,outcome]))
## [1] 0.7234645
print(calcAUC(predict(gmodel,newdata=dCal),dCal[,outcome]))
## [1] 0.7170824
# example 6.23 of section 6.3.4
# (example 6.23 of section 6.3.4) : Memorization methods : Building models using many variables : Using Naive Bayes
# Title: Building, applying, and evaluating a Naive Bayes model
pPos <- sum(dTrain[,outcome]==pos)/length(dTrain[,outcome])
nBayes <- function(pPos,pf) { # Note: 1
pNeg <- 1 - pPos
smoothingEpsilon <- 1.0e-5
scorePos <- log(pPos + smoothingEpsilon) +
rowSums(log(pf/pPos + smoothingEpsilon)) # Note: 2
scoreNeg <- log(pNeg + smoothingEpsilon) +
rowSums(log((1-pf)/(1-pPos) + smoothingEpsilon)) # Note: 3
m <- pmax(scorePos,scoreNeg)
expScorePos <- exp(scorePos-m)
expScoreNeg <- exp(scoreNeg-m) # Note: 4
expScorePos/(expScorePos+expScoreNeg) # Note: 5
}
pVars <- paste('pred',c(numericVars,catVars),sep='')
dTrain$nbpredl <- nBayes(pPos,dTrain[,pVars])
dCal$nbpredl <- nBayes(pPos,dCal[,pVars])
dTest$nbpredl <- nBayes(pPos,dTest[,pVars]) # Note: 6
print(calcAUC(dTrain$nbpredl,dTrain[,outcome]))
## [1] 0.9757348
print(calcAUC(dCal$nbpredl,dCal[,outcome]))
## [1] 0.5995206
print(calcAUC(dTest$nbpredl,dTest[,outcome]))
## [1] 0.5956515 # Note: 7
# Note 1:
# Define a function that performs the Naive
# Bayes prediction.
# Note 2:
# For each row, compute (with a smoothing term)
# the sum of log(P[positive &
# evidence_i]/P[positive]) across all columns. This
# is equivalent to the log of the product of
# P[evidence_i | positive] up to terms that don’t
# depend on the positive/negative outcome.
# Note 3:
# For each row, compute (with a smoothing term)
# the sum of log(P[negative &
# evidence_i]/P[negative]) across all columns. This
# is equivalent to the log of the product of
# P[evidence_i | negative] up to terms that don’t
# depend on the positive/negative outcome.
# Note 4:
# Exponentiate to turn sums back into products,
# but make sure we don’t cause a floating point
# overflow in doing so.
# Note 5:
# Use the fact that the predicted positive
# probability plus the predicted negative
# probability should sum to 1.0 to find and
# eliminate Z. Return the correctly scaled predicted
# odds of being positive as our forecast.
# Note 6:
# Apply the function to make the predictions.
# Note 7:
# Calculate the AUCs. Notice the
# overfit—fantastic performance on the training
# set that isn’t repeated on the calibration or test
# sets.
# example 6.24 of section 6.3.4
# (example 6.24 of section 6.3.4) : Memorization methods : Building models using many variables : Using Naive Bayes
# Title: Using a Naive Bayes package
library('e1071')
lVars <- c(catVars,numericVars)
ff <- paste('as.factor(',outcome,'>0) ~ ',
paste(lVars,collapse=' + '),sep='')
nbmodel <- naiveBayes(as.formula(ff),data=dTrain)
dTrain$nbpred <- predict(nbmodel,newdata=dTrain,type='raw')[,'TRUE']
dCal$nbpred <- predict(nbmodel,newdata=dCal,type='raw')[,'TRUE']
dTest$nbpred <- predict(nbmodel,newdata=dTest,type='raw')[,'TRUE']
calcAUC(dTrain$nbpred,dTrain[,outcome])
## [1] 0.4643591
calcAUC(dCal$nbpred,dCal[,outcome])
## [1] 0.5544484
calcAUC(dTest$nbpred,dTest[,outcome])
## [1] 0.5679519
# example 7.1 of section 7.1.1
# (example 7.1 of section 7.1.1) : Linear and logistic regression : Using linear regression : Understanding linear regression
# Title: Loading the PUMS data
load("psub.RData")
dtrain <- subset(psub,ORIGRANDGROUP >= 500)
dtest <- subset(psub,ORIGRANDGROUP < 500)
model <- lm(log(PINCP,base=10) ~ AGEP + SEX + COW + SCHL,data=dtrain)
dtest$predLogPINCP <- predict(model,newdata=dtest)
dtrain$predLogPINCP <- predict(model,newdata=dtrain)
# example 7.2 of section 7.1.3
# (example 7.2 of section 7.1.3) : Linear and logistic regression : Using linear regression : Making predictions
# Title: Plotting log income as a function of predicted log income
library('ggplot2')
ggplot(data=dtest,aes(x=predLogPINCP,y=log(PINCP,base=10))) +
geom_point(alpha=0.2,color="black") +
geom_smooth(aes(x=predLogPINCP,
y=log(PINCP,base=10)),color="black") +
geom_line(aes(x=log(PINCP,base=10),
y=log(PINCP,base=10)),color="blue",linetype=2) +
scale_x_continuous(limits=c(4,5)) +
scale_y_continuous(limits=c(3.5,5.5))
# example 7.3 of section 7.1.3
# (example 7.3 of section 7.1.3) : Linear and logistic regression : Using linear regression : Making predictions
# Title: Plotting residuals income as a function of predicted log income
ggplot(data=dtest,aes(x=predLogPINCP,
y=predLogPINCP-log(PINCP,base=10))) +
geom_point(alpha=0.2,color="black") +
geom_smooth(aes(x=predLogPINCP,
y=predLogPINCP-log(PINCP,base=10)),
color="black")
# example 7.4 of section 7.1.3
# (example 7.4 of section 7.1.3) : Linear and logistic regression : Using linear regression : Making predictions
# Title: Computing R-squared
rsq <- function(y,f) { 1 - sum((y-f)^2)/sum((y-mean(y))^2) }
rsq(log(dtrain$PINCP,base=10),predict(model,newdata=dtrain))
rsq(log(dtest$PINCP,base=10),predict(model,newdata=dtest))
# example 7.5 of section 7.1.3
# (example 7.5 of section 7.1.3) : Linear and logistic regression : Using linear regression : Making predictions
# Title: Calculating root mean square error
rmse <- function(y, f) { sqrt(mean( (y-f)^2 )) }
rmse(log(dtrain$PINCP,base=10),predict(model,newdata=dtrain))
rmse(log(dtest$PINCP,base=10),predict(model,newdata=dtest))
# example 7.6 of section 7.1.5
# (example 7.6 of section 7.1.5) : Linear and logistic regression : Using linear regression : Reading the model summary and characterizing coefficient quality
# Title: Summarizing residuals
summary(log(dtrain$PINCP,base=10) - predict(model,newdata=dtrain))
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## -1.29200 -0.14150 0.02458 0.00000 0.17630 0.62530
summary(log(dtest$PINCP,base=10) - predict(model,newdata=dtest))
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## -1.494000 -0.165300 0.018920 -0.004637 0.175500 0.868100
# informalexample 7.9 of section 7.1.5
# (informalexample 7.9 of section 7.1.5) : Linear and logistic regression : Using linear regression : Reading the model summary and characterizing coefficient quality
df <- dim(dtrain)[1] - dim(summary(model)$coefficients)[1]
# informalexample 7.10 of section 7.1.5
# (informalexample 7.10 of section 7.1.5) : Linear and logistic regression : Using linear regression : Reading the model summary and characterizing coefficient quality
modelResidualError <- sqrt(sum(residuals(model)^2)/df)
# example 7.7 of section 7.2.1
# (example 7.7 of section 7.2.1) : Linear and logistic regression : Using logistic regression : Understanding logistic regression
# Title: Loading the CDC data
load("NatalRiskData.rData")
train <- sdata[sdata$ORIGRANDGROUP<=5,]
test <- sdata[sdata$ORIGRANDGROUP>5,]
# example 7.8 of section 7.2.2
# (example 7.8 of section 7.2.2) : Linear and logistic regression : Using logistic regression : Building a logistic regression model
# Title: Building the model formula
complications <- c("ULD_MECO","ULD_PRECIP","ULD_BREECH")
riskfactors <- c("URF_DIAB", "URF_CHYPER", "URF_PHYPER",
"URF_ECLAM")
y <- "atRisk"
x <- c("PWGT",
"UPREVIS",
"CIG_REC",
"GESTREC3",
"DPLURAL",
complications,
riskfactors)
fmla <- paste(y, paste(x, collapse="+"), sep="~")
# example 7.9 of section 7.2.2
# (example 7.9 of section 7.2.2) : Linear and logistic regression : Using logistic regression : Building a logistic regression model
# Title: Fitting the logistic regression model
print(fmla)
## [1] "atRisk ~ PWGT+UPREVIS+CIG_REC+GESTREC3+DPLURAL+ULD_MECO+ULD_PRECIP+
## ULD_BREECH+URF_DIAB+URF_CHYPER+URF_PHYPER+URF_ECLAM"
model <- glm(fmla, data=train, family=binomial(link="logit"))
# example 7.10 of section 7.2.3
# (example 7.10 of section 7.2.3) : Linear and logistic regression : Using logistic regression : Making predictions
# Title: Applying the logistic regression model
train$pred <- predict(model, newdata=train, type="response")
test$pred <- predict(model, newdata=test, type="response")
# example 7.11 of section 7.2.3
# (example 7.11 of section 7.2.3) : Linear and logistic regression : Using logistic regression : Making predictions
# Title: Plotting distribution of prediction score grouped by known outcome
library('ggplot2')
ggplot(train, aes(x=pred, color=atRisk, linetype=atRisk)) +
geom_density()
# example 7.12 of section 7.2.3
# (example 7.12 of section 7.2.3) : Linear and logistic regression : Using logistic regression : Making predictions
# Title: Exploring modeling trade-offs
library(ROCR) # Note: 1
library(grid) # Note: 2
predObj <- prediction(train$pred, train$atRisk) # Note: 3
precObj <- performance(predObj, measure="prec") # Note: 4
recObj <- performance(predObj, measure="rec") # Note: 5
precision <- (precObj@y.values)[[1]] # Note: 6
prec.x <- (precObj@x.values)[[1]] # Note: 7
recall <- (recObj@y.values)[[1]]
rocFrame <- data.frame(threshold=prec.x, precision=precision,
recall=recall) # Note: 8
nplot <- function(plist) { # Note: 9
n <- length(plist)
grid.newpage()
pushViewport(viewport(layout=grid.layout(n,1)))
vplayout=function(x,y) {viewport(layout.pos.row=x, layout.pos.col=y)}
for(i in 1:n) {
print(plist[[i]], vp=vplayout(i,1))
}
}
pnull <- mean(as.numeric(train$atRisk)) # Note: 10
p1 <- ggplot(rocFrame, aes(x=threshold)) + # Note: 11
geom_line(aes(y=precision/pnull)) +
coord_cartesian(xlim = c(0,0.05), ylim=c(0,10) )
p2 <- ggplot(rocFrame, aes(x=threshold)) + # Note: 12
geom_line(aes(y=recall)) +
coord_cartesian(xlim = c(0,0.05) )
nplot(list(p1, p2)) # Note: 13
# Note 1:
# Load ROCR library.
# Note 2:
# Load grid library (you’ll need this for the
# nplot function below).
# Note 3:
# Create ROCR prediction object.
# Note 4:
# Create ROCR object to calculate precision as
# a function of threshold.
# Note 5:
# Create ROCR object to calculate recall as a
# function of threshold.
# Note 6:
# at ( @ ) symbol@ (at) symbolROCR objects are what R calls S4 objects;
# the slots (or fields) of an S4 object are stored
# as lists within the object. You extract the slots
# from an S4 object using @ notation.
# Note 7:
# The x values (thresholds) are the same in
# both predObj and recObj, so you only need to
# extract them once.
# Note 8:
# Build data frame with thresholds, precision,
# and recall.
# Note 9:
# Function to plot multiple plots on one page
# (stacked).
# Note 10:
# Calculate rate of at-risk births in the
# training set.
# Note 11:
# Plot enrichment rate as a function of
# threshold.
# Note 12:
# Plot recall as a function of
# threshold.
# Note 13:
# Show both plots simultaneously.
# example 7.13 of section 7.2.3
# (example 7.13 of section 7.2.3) : Linear and logistic regression : Using logistic regression : Making predictions
# Title: Evaluating our chosen model
ctab.test <- table(pred=test$pred>0.02, atRisk=test$atRisk) # Note: 1
ctab.test # Note: 2
## atRisk
## pred FALSE TRUE
## FALSE 9487 93
## TRUE 2405 116
precision <- ctab.test[2,2]/sum(ctab.test[2,])
precision
## [1] 0.04601349
recall <- ctab.test[2,2]/sum(ctab.test[,2])
recall
## [1] 0.5550239
enrich <- precision/mean(as.numeric(test$atRisk))
enrich
## [1] 2.664159
# Note 1:
# Build confusion matrix.
# Note 2:
# Rows contain predicted negatives and
# positives; columns contain actual negatives and
# positives.
# example 7.14 of section 7.2.4
# (example 7.14 of section 7.2.4) : Linear and logistic regression : Using logistic regression : Finding relations and extracting advice from logistic models
# Title: The model coefficients
coefficients(model)
## (Intercept) PWGT
## -4.41218940 0.00376166
## UPREVIS CIG_RECTRUE
## -0.06328943 0.31316930
## GESTREC3< 37 weeks DPLURALtriplet or higher
## 1.54518311 1.39419294
## DPLURALtwin ULD_MECOTRUE
## 0.31231871 0.81842627
## ULD_PRECIPTRUE ULD_BREECHTRUE
## 0.19172008 0.74923672
## URF_DIABTRUE URF_CHYPERTRUE
## -0.34646672 0.56002503
## URF_PHYPERTRUE URF_ECLAMTRUE
## 0.16159872 0.49806435
# example 7.15 of section 7.2.5
# (example 7.15 of section 7.2.5) : Linear and logistic regression : Using logistic regression : Reading the model summary and characterizing coefficients
# Title: The model summary
summary(model)
## Call:
## glm(formula = fmla, family = binomial(link = "logit"), data = train)
##
## Deviance Residuals:
## Min 1Q Median 3Q Max
## -0.9732 -0.1818 -0.1511 -0.1358 3.2641
##
## Coefficients:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) -4.412189 0.289352 -15.249 < 2e-16 ***
## PWGT 0.003762 0.001487 2.530 0.011417 *
## UPREVIS -0.063289 0.015252 -4.150 3.33e-05 ***
## CIG_RECTRUE 0.313169 0.187230 1.673 0.094398 .
## GESTREC3< 37 weeks 1.545183 0.140795 10.975 < 2e-16 ***
## DPLURALtriplet or higher 1.394193 0.498866 2.795 0.005194 **
## DPLURALtwin 0.312319 0.241088 1.295 0.195163
## ULD_MECOTRUE 0.818426 0.235798 3.471 0.000519 ***
## ULD_PRECIPTRUE 0.191720 0.357680 0.536 0.591951
## ULD_BREECHTRUE 0.749237 0.178129 4.206 2.60e-05 ***
## URF_DIABTRUE -0.346467 0.287514 -1.205 0.228187
## URF_CHYPERTRUE 0.560025 0.389678 1.437 0.150676
## URF_PHYPERTRUE 0.161599 0.250003 0.646 0.518029
## URF_ECLAMTRUE 0.498064 0.776948 0.641 0.521489
## ---
## Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
##
## (Dispersion parameter for binomial family taken to be 1)
##
## Null deviance: 2698.7 on 14211 degrees of freedom
## Residual deviance: 2463.0 on 14198 degrees of freedom
## AIC: 2491
##
## Number of Fisher Scoring iterations: 7
# example 7.16 of section 7.2.5
# (example 7.16 of section 7.2.5) : Linear and logistic regression : Using logistic regression : Reading the model summary and characterizing coefficients
# Title: Calculating deviance residuals
pred <- predict(model, newdata=train, type="response") # Note: 1
llcomponents <- function(y, py) { # Note: 2
y*log(py) + (1-y)*log(1-py)
}
edev <- sign(as.numeric(train$atRisk) - pred) * # Note: 3
sqrt(-2*llcomponents(as.numeric(train$atRisk), pred))
summary(edev)
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## -0.9732 -0.1818 -0.1511 -0.1244 -0.1358 3.2640
# Note 1:
# Create vector of predictions for training
# data.
# Note 2:
# Function to return the log likelihoods for
# each data point. Argument y is the true outcome
# (as a numeric variable, 0/1); argument py is the
# predicted probability.
# Note 3:
# Calculate deviance residuals.
# example 7.17 of section 7.2.5
# (example 7.17 of section 7.2.5) : Linear and logistic regression : Using logistic regression : Reading the model summary and characterizing coefficients
# Title: Computing deviance
loglikelihood <- function(y, py) { # Note: 1
sum(y * log(py) + (1-y)*log(1 - py))
}
pnull <- mean(as.numeric(train$atRisk)) # Note: 2
null.dev <- -2*loglikelihood(as.numeric(train$atRisk), pnull) # Note: 3
pnull
## [1] 0.01920912
null.dev
## [1] 2698.716
model$null.deviance # Note: 4
## [1] 2698.716
pred <- predict(model, newdata=train, type="response") # Note: 5
resid.dev <- -2*loglikelihood(as.numeric(train$atRisk), pred) # Note: 6
resid.dev
## [1] 2462.992
model$deviance # Note: 7
## [1] 2462.992
testy <- as.numeric(test$atRisk) # Note: 8
testpred <- predict(model, newdata=test,
type="response")
pnull.test <- mean(testy)
null.dev.test <- -2*loglikelihood(testy, pnull.test)
resid.dev.test <- -2*loglikelihood(testy, testpred)
pnull.test
## [1] 0.0172713
null.dev.test
## [1] 2110.91
resid.dev.test
## [1] 1947.094
# Note 1:
# Function to calculate the log likelihood of
# a dataset. Variable y is the outcome
# in numeric form (1 for positive examples, 0 for
# negative). Variable py is the
# predicted probability that
# y==1.
# Note 2:
# Calculate rate of positive examples in
# dataset.
# Note 3:
# Calculate null deviance.
# Note 4:
# For training data, the null deviance is
# stored in the slot model$null.deviance.
# Note 5:
# Predict probabilities for training
# data.
# Note 6:
# Calculate deviance of model for training
# data.
# Note 7:
# For training data, model deviance is stored
# in the slot model$deviance.
# Note 8:
# Calculate null deviance and residual
# deviance for test data.
# example 7.18 of section 7.2.5
# (example 7.18 of section 7.2.5) : Linear and logistic regression : Using logistic regression : Reading the model summary and characterizing coefficients
# Title: Calculating the significance of the observed fit
df.null <- dim(train)[[1]] - 1 # Note: 1
df.model <- dim(train)[[1]] - length(model$coefficients) # Note: 2
df.null
## [1] 14211
df.model
## [1] 14198
delDev <- null.dev - resid.dev # Note: 3
deldf <- df.null - df.model
p <- pchisq(delDev, deldf, lower.tail=F) # Note: 4
delDev
## [1] 235.724
deldf
## [1] 13
p
## [1] 5.84896e-43
# Note 1:
# Null model has (number of data points - 1)
# degrees of freedom.
# Note 2:
# Fitted model has (number of data points -
# number of coefficients) degrees of freedom.
# Note 3:
# Compute difference in deviances and
# difference in degrees of freedom.
# Note 4:
# Estimate probability of seeing the observed
# difference in deviances under null model (the
# p-value) using chi-squared distribution.
# example 7.19 of section 7.2.5
# (example 7.19 of section 7.2.5) : Linear and logistic regression : Using logistic regression : Reading the model summary and characterizing coefficients
# Title: Calculating the pseudo R-squared
pr2 <- 1-(resid.dev/null.dev)
print(pr2)
## [1] 0.08734674
pr2.test <- 1-(resid.dev.test/null.dev.test)
print(pr2.test)
## [1] 0.07760427
# example 7.20 of section 7.2.5
# (example 7.20 of section 7.2.5) : Linear and logistic regression : Using logistic regression : Reading the model summary and characterizing coefficients
# Title: Calculating the Akaike information criterion
aic <- 2*(length(model$coefficients) -
loglikelihood(as.numeric(train$atRisk), pred))
aic
## [1] 2490.992
# example 8.1 of section 8.1.2
# (example 8.1 of section 8.1.2) : Unsupervised methods : Cluster analysis : Preparing the data
# Title: Reading the protein data
protein <- read.table("protein.txt", sep="\t", header=TRUE)
summary(protein)
## Country RedMeat WhiteMeat Eggs
## Albania : 1 Min. : 4.400 Min. : 1.400 Min. :0.500
## Austria : 1 1st Qu.: 7.800 1st Qu.: 4.900 1st Qu.:2.700
## Belgium : 1 Median : 9.500 Median : 7.800 Median :2.900
## Bulgaria : 1 Mean : 9.828 Mean : 7.896 Mean :2.936
## Czechoslovakia: 1 3rd Qu.:10.600 3rd Qu.:10.800 3rd Qu.:3.700
## Denmark : 1 Max. :18.000 Max. :14.000 Max. :4.700
## (Other) :19
## Milk Fish Cereals Starch
## Min. : 4.90 Min. : 0.200 Min. :18.60 Min. :0.600
## 1st Qu.:11.10 1st Qu.: 2.100 1st Qu.:24.30 1st Qu.:3.100
## Median :17.60 Median : 3.400 Median :28.00 Median :4.700
## Mean :17.11 Mean : 4.284 Mean :32.25 Mean :4.276
## 3rd Qu.:23.30 3rd Qu.: 5.800 3rd Qu.:40.10 3rd Qu.:5.700
## Max. :33.70 Max. :14.200 Max. :56.70 Max. :6.500
##
## Nuts Fr.Veg
## Min. :0.700 Min. :1.400
## 1st Qu.:1.500 1st Qu.:2.900
## Median :2.400 Median :3.800
## Mean :3.072 Mean :4.136
## 3rd Qu.:4.700 3rd Qu.:4.900
## Max. :7.800 Max. :7.900
# example 8.2 of section 8.1.2
# (example 8.2 of section 8.1.2) : Unsupervised methods : Cluster analysis : Preparing the data
# Title: Rescaling the dataset
vars.to.use <- colnames(protein)[-1] # Note: 1
pmatrix <- scale(protein[,vars.to.use]) # Note: 2
pcenter <- attr(pmatrix, "scaled:center") # Note: 3
pscale <- attr(pmatrix, "scaled:scale")
# Note 1:
# Use all the columns except the first
# (Country).
# Note 2:
# The output of scale() is a matrix. For the
# purposes of this chapter, you can think of a
# matrix as a data frame with all numeric columns
# (this isn’t strictly true, but it’s close enough).
# Note 3:
# The scale() function annotates its output
# with two attributes—scaled:center returns the mean
# values of all the columns, and scaled:scale
# returns the standard deviations. You’ll store
# these away so you can “unscale” the data
# later.
# example 8.3 of section 8.1.3
# (example 8.3 of section 8.1.3) : Unsupervised methods : Cluster analysis : Hierarchical clustering with hclust
# Title: Hierarchical clustering
d <- dist(pmatrix, method="euclidean") # Note: 1
pfit <- hclust(d, method="ward.D") # Note: 2
plot(pfit, labels=protein$Country) # Note: 3
# Note 1:
# Create the distance matrix.
# Note 2:
# Do the clustering.
# Note 3:
# Plot the dendrogram.
# informalexample 8.5 of section 8.1.3
# (informalexample 8.5 of section 8.1.3) : Unsupervised methods : Cluster analysis : Hierarchical clustering with hclust
rect.hclust(pfit, k=5)
# example 8.4 of section 8.1.3
# (example 8.4 of section 8.1.3) : Unsupervised methods : Cluster analysis : Hierarchical clustering with hclust
# Title: Extracting the clusters found by hclust()
groups <- cutree(pfit, k=5)
print_clusters <- function(labels, k) { # Note: 1
for(i in 1:k) {
print(paste("cluster", i))
print(protein[labels==i,c("Country","RedMeat","Fish","Fr.Veg")])
}
}
print_clusters(groups, 5)
## [1] "cluster 1"
## Country RedMeat Fish Fr.Veg
## 1 Albania 10.1 0.2 1.7
## 4 Bulgaria 7.8 1.2 4.2
## 18 Romania 6.2 1.0 2.8
## 25 Yugoslavia 4.4 0.6 3.2
## [1] "cluster 2"
## Country RedMeat Fish Fr.Veg
## 2 Austria 8.9 2.1 4.3
## 3 Belgium 13.5 4.5 4.0
## 9 France 18.0 5.7 6.5
## 12 Ireland 13.9 2.2 2.9
## 14 Netherlands 9.5 2.5 3.7
## 21 Switzerland 13.1 2.3 4.9
## 22 UK 17.4 4.3 3.3
## 24 W Germany 11.4 3.4 3.8
## [1] "cluster 3"
## Country RedMeat Fish Fr.Veg
## 5 Czechoslovakia 9.7 2.0 4.0
## 7 E Germany 8.4 5.4 3.6
## 11 Hungary 5.3 0.3 4.2
## 16 Poland 6.9 3.0 6.6
## 23 USSR 9.3 3.0 2.9
## [1] "cluster 4"
## Country RedMeat Fish Fr.Veg
## 6 Denmark 10.6 9.9 2.4
## 8 Finland 9.5 5.8 1.4
## 15 Norway 9.4 9.7 2.7
## 20 Sweden 9.9 7.5 2.0
## [1] "cluster 5"
## Country RedMeat Fish Fr.Veg
## 10 Greece 10.2 5.9 6.5
## 13 Italy 9.0 3.4 6.7
## 17 Portugal 6.2 14.2 7.9
## 19 Spain 7.1 7.0 7.2
# Note 1:
# A convenience function for printing out the
# countries in each cluster, along with the values
# for red meat, fish, and fruit/vegetable
# consumption. We’ll use this function throughout
# this section. Note that the function is hardcoded
# for the protein dataset.
# example 8.5 of section 8.1.3
# (example 8.5 of section 8.1.3) : Unsupervised methods : Cluster analysis : Hierarchical clustering with hclust
# Title: Projecting the clusters on the first two principal components
library(ggplot2)
princ <- prcomp(pmatrix) # Note: 1
nComp <- 2
project <- predict(princ, newdata=pmatrix)[,1:nComp] # Note: 2
project.plus <- cbind(as.data.frame(project), # Note: 3
cluster=as.factor(groups),
country=protein$Country)
ggplot(project.plus, aes(x=PC1, y=PC2)) + # Note: 4
geom_point(aes(shape=cluster)) +
geom_text(aes(label=country),
hjust=0, vjust=1)
# Note 1:
# Calculate the principal components of the
# data.
# Note 2:
# The predict() function will rotate the data
# into the space described by the principal
# components. We only want the projection on the
# first two axes.
# Note 3:
# Create a data frame with the transformed
# data, along with the cluster label and country
# label of each point.
# Note 4:
# Plot it.
# example 8.6 of section 8.1.3
# (example 8.6 of section 8.1.3) : Unsupervised methods : Cluster analysis : Hierarchical clustering with hclust
# Title: Running clusterboot() on the protein data
library(fpc) # Note: 1
kbest.p<-5 # Note: 2
cboot.hclust <- clusterboot(pmatrix,clustermethod=hclustCBI, # Note: 3
method="ward.D", k=kbest.p)
summary(cboot.hclust$result) # Note: 4
## Length Class Mode
## result 7 hclust list
## noise 1 -none- logical
## nc 1 -none- numeric
## clusterlist 5 -none- list
## partition 25 -none- numeric
## clustermethod 1 -none- character
## nccl 1 -none- numeric
groups<-cboot.hclust$result$partition # Note: 5
print_clusters(groups, kbest.p) # Note: 6
## [1] "cluster 1"
## Country RedMeat Fish Fr.Veg
## 1 Albania 10.1 0.2 1.7
## 4 Bulgaria 7.8 1.2 4.2
## 18 Romania 6.2 1.0 2.8
## 25 Yugoslavia 4.4 0.6 3.2
## [1] "cluster 2"
## Country RedMeat Fish Fr.Veg
## 2 Austria 8.9 2.1 4.3
## 3 Belgium 13.5 4.5 4.0
## 9 France 18.0 5.7 6.5
## 12 Ireland 13.9 2.2 2.9
## 14 Netherlands 9.5 2.5 3.7
## 21 Switzerland 13.1 2.3 4.9
## 22 UK 17.4 4.3 3.3
## 24 W Germany 11.4 3.4 3.8
## [1] "cluster 3"
## Country RedMeat Fish Fr.Veg
## 5 Czechoslovakia 9.7 2.0 4.0
## 7 E Germany 8.4 5.4 3.6
## 11 Hungary 5.3 0.3 4.2
## 16 Poland 6.9 3.0 6.6
## 23 USSR 9.3 3.0 2.9
## [1] "cluster 4"
## Country RedMeat Fish Fr.Veg
## 6 Denmark 10.6 9.9 2.4
## 8 Finland 9.5 5.8 1.4
## 15 Norway 9.4 9.7 2.7
## 20 Sweden 9.9 7.5 2.0
## [1] "cluster 5"
## Country RedMeat Fish Fr.Veg
## 10 Greece 10.2 5.9 6.5
## 13 Italy 9.0 3.4 6.7
## 17 Portugal 6.2 14.2 7.9
## 19 Spain 7.1 7.0 7.2
cboot.hclust$bootmean # Note: 7
## [1] 0.7905000 0.7990913 0.6173056 0.9312857 0.7560000
cboot.hclust$bootbrd # Note: 8
## [1] 25 11 47 8 35
# Note 1:
# Load the fpc package. You may have to
# install it first. We’ll discuss installing R
# packages in appendix .
# Note 2:
# Set the desired number of clusters.
# Note 3:
# Run clusterboot() with hclust
# ('clustermethod=hclustCBI') using Ward’s method
# ('method="ward.D"') and kbest.p clusters
# ('k=kbest.p'). Return the results in an object
# called cboot.hclust.
# Note 4:
# The results of the clustering are in
# cboot.hclust$result. The output of the hclust()
# function is in cboot.hclust$result$result.
# Note 5:
# cboot.hclust$result$partition returns a
# vector of clusterlabels.
# Note 6:
# The clusters are the same as those produced
# by a direct call to hclust().
# Note 7:
# The vector of cluster stabilities.
# Note 8:
# The count of how many times each cluster was
# dissolved. By default clusterboot() runs 100
# bootstrap iterations.
# example 8.7 of section 8.1.3
# (example 8.7 of section 8.1.3) : Unsupervised methods : Cluster analysis : Hierarchical clustering with hclust
# Title: Calculating total within sum of squares
sqr_edist <- function(x, y) { # Note: 1
sum((x-y)^2)
}
wss.cluster <- function(clustermat) { # Note: 2
c0 <- apply(clustermat, 2, FUN=mean) # Note: 3
sum(apply(clustermat, 1, FUN=function(row){sqr_edist(row,c0)})) # Note: 4
}
wss.total <- function(dmatrix, labels) { # Note: 5
wsstot <- 0
k <- length(unique(labels))
for(i in 1:k)
wsstot <- wsstot + wss.cluster(subset(dmatrix, labels==i)) # Note: 6
wsstot
}
# Note 1:
# Function to calculate squared distance
# between two vectors.
# Note 2:
# Function to calculate the WSS for a single
# cluster, which is represented as a matrix (one row
# for every point).
# Note 3:
# Calculate the centroid of the cluster (the
# mean of all the points).
# Note 4:
# Calculate the squared difference of every
# point in the cluster from the centroid, and sum
# all the distances.
# Note 5:
# Function to compute the total WSS from a set
# of data points and cluster labels.
# Note 6:
# Extract each cluster, calculate the
# cluster’s WSS, and sum all the values.
# example 8.8 of section 8.1.3
# (example 8.8 of section 8.1.3) : Unsupervised methods : Cluster analysis : Hierarchical clustering with hclust
# Title: The Calinski-Harabasz index
totss <- function(dmatrix) { # Note: 1
grandmean <- apply(dmatrix, 2, FUN=mean)
sum(apply(dmatrix, 1, FUN=function(row){sqr_edist(row, grandmean)}))
}
ch_criterion <- function(dmatrix, kmax, method="kmeans") { # Note: 2
if(!(method %in% c("kmeans", "hclust"))) {
stop("method must be one of c('kmeans', 'hclust')")
}
npts <- dim(dmatrix)[1] # number of rows.
totss <- totss(dmatrix) # Note: 3
wss <- numeric(kmax)
crit <- numeric(kmax)
wss[1] <- (npts-1)*sum(apply(dmatrix, 2, var)) # Note: 4
for(k in 2:kmax) { # Note: 5
if(method=="kmeans") {
clustering<-kmeans(dmatrix, k, nstart=10, iter.max=100)
wss[k] <- clustering$tot.withinss
}else { # hclust # Note: 6
d <- dist(dmatrix, method="euclidean")
pfit <- hclust(d, method="ward.D")
labels <- cutree(pfit, k=k)
wss[k] <- wss.total(dmatrix, labels)
}
}
bss <- totss - wss # Note: 7
crit.num <- bss/(0:(kmax-1)) # Note: 8
crit.denom <- wss/(npts - 1:kmax) # Note: 9
list(crit = crit.num/crit.denom, wss = wss, totss = totss) # Note: 10
}
# Note 1:
# Convenience function to calculate the total
# sum of squares.
# Note 2:
# A function to calculate the CH index for a
# number of clusters from 1 to kmax.
# Note 3:
# The total sum of squares is independent of
# the clustering.
# Note 4:
# Calculate WSS for k=1 (which is really just
# total sum of squares).
# Note 5:
# Calculate WSS for k from 2 to kmax. kmeans()
# returns the total WSS as one of its
# outputs.
# Note 6:
# For hclust(), calculate total WSS by
# hand.
# Note 7:
# Calculate BSS for k from 1 to kmax.
# Note 8:
# Normalize BSS by k-1.
# Note 9:
# Normalize WSS by npts - k.
# Note 10:
# Return a vector of CH indices and of WSS for
# k from 1 to kmax. Also return total sum of
# squares.
# example 8.9 of section 8.1.3
# (example 8.9 of section 8.1.3) : Unsupervised methods : Cluster analysis : Hierarchical clustering with hclust
# Title: Evaluating clusterings with different numbers of clusters
library(reshape2) # Note: 1
clustcrit <- ch_criterion(pmatrix, 10, method="hclust") # Note: 2
critframe <- data.frame(k=1:10, ch=scale(clustcrit$crit), # Note: 3
wss=scale(clustcrit$wss))
critframe <- melt(critframe, id.vars=c("k"), # Note: 4
variable.name="measure",
value.name="score")
ggplot(critframe, aes(x=k, y=score, color=measure)) + # Note: 5
geom_point(aes(shape=measure)) + geom_line(aes(linetype=measure)) +
scale_x_continuous(breaks=1:10, labels=1:10)
# Note 1:
# Load the reshape2 package (for the melt()
# function).
# Note 2:
# Calculate both criteria for 1–10
# clusters.
# Note 3:
# Create a data frame with the number of
# clusters, the CH criterion, and the WSS criterion.
# We’ll scale both the CH and WSS criteria to
# similar ranges so that we can plot them both on
# the same graph.
# Note 4:
# Use the melt() function to put the data
# frame in a shape suitable for ggplot
# Note 5:
# Plot it.
# example 8.10 of section 8.1.4
# (example 8.10 of section 8.1.4) : Unsupervised methods : Cluster analysis : The k-means algorithm
# Title: Running k-means with k=5
pclusters <- kmeans(pmatrix, kbest.p, nstart=100, iter.max=100) # Note: 1
summary(pclusters) # Note: 2
## Length Class Mode
## cluster 25 -none- numeric
## centers 45 -none- numeric
## totss 1 -none- numeric
## withinss 5 -none- numeric
## tot.withinss 1 -none- numeric
## betweenss 1 -none- numeric
## size 5 -none- numeric
pclusters$centers # Note: 3
## RedMeat WhiteMeat Eggs Milk Fish
## 1 -0.807569986 -0.8719354 -1.55330561 -1.0783324 -1.0386379
## 2 0.006572897 -0.2290150 0.19147892 1.3458748 1.1582546
## 3 -0.570049402 0.5803879 -0.08589708 -0.4604938 -0.4537795
## 4 1.011180399 0.7421332 0.94084150 0.5700581 -0.2671539
## 5 -0.508801956 -1.1088009 -0.41248496 -0.8320414 0.9819154
## Cereals Starch Nuts Fr.Veg
## 1 1.7200335 -1.4234267 0.9961313 -0.64360439
## 2 -0.8722721 0.1676780 -0.9553392 -1.11480485
## 3 0.3181839 0.7857609 -0.2679180 0.06873983
## 4 -0.6877583 0.2288743 -0.5083895 0.02161979
## 5 0.1300253 -0.1842010 1.3108846 1.62924487
pclusters$size # Note: 4
## [1] 4 4 5 8 4
groups <- pclusters$cluster # Note: 5
print_clusters(groups, kbest.p) # Note: 6
## [1] "cluster 1"
## Country RedMeat Fish Fr.Veg
## 1 Albania 10.1 0.2 1.7
## 4 Bulgaria 7.8 1.2 4.2
## 18 Romania 6.2 1.0 2.8
## 25 Yugoslavia 4.4 0.6 3.2
## [1] "cluster 2"
## Country RedMeat Fish Fr.Veg
## 6 Denmark 10.6 9.9 2.4
## 8 Finland 9.5 5.8 1.4
## 15 Norway 9.4 9.7 2.7
## 20 Sweden 9.9 7.5 2.0
## [1] "cluster 3"
## Country RedMeat Fish Fr.Veg
## 5 Czechoslovakia 9.7 2.0 4.0
## 7 E Germany 8.4 5.4 3.6
## 11 Hungary 5.3 0.3 4.2
## 16 Poland 6.9 3.0 6.6
## 23 USSR 9.3 3.0 2.9
## [1] "cluster 4"
## Country RedMeat Fish Fr.Veg
## 2 Austria 8.9 2.1 4.3
## 3 Belgium 13.5 4.5 4.0
## 9 France 18.0 5.7 6.5
## 12 Ireland 13.9 2.2 2.9
## 14 Netherlands 9.5 2.5 3.7
## 21 Switzerland 13.1 2.3 4.9
## 22 UK 17.4 4.3 3.3
## 24 W Germany 11.4 3.4 3.8
## [1] "cluster 5"
## Country RedMeat Fish Fr.Veg
## 10 Greece 10.2 5.9 6.5
## 13 Italy 9.0 3.4 6.7
## 17 Portugal 6.2 14.2 7.9
## 19 Spain 7.1 7.0 7.2
# Note 1:
# Run kmeans() with five clusters (kbest.p=5),
# 100 random starts, and 100 maximum iterations per
# run.
# Note 2:
# kmeans() returns all the sum of squares
# measures.
# Note 3:
# pclusters$centers is a matrix whose rows are
# the centroids of the clusters. Note that
# pclusters$centers is in the scaled coordinates,
# not the original protein coordinates.
# Note 4:
# pclusters$size returns the number of points
# in each cluster. Generally (though not always) a
# good clustering will be fairly well balanced: no
# extremely small clusters and no extremely large
# ones.
# Note 5:
# pclusters$cluster is a vector of cluster
# labels.
# Note 6:
# In this case, kmeans() and hclust() returned
# the same clustering. This won’t always be
# true.
# example 8.11 of section 8.1.4
# (example 8.11 of section 8.1.4) : Unsupervised methods : Cluster analysis : The k-means algorithm
# Title: Plotting cluster criteria
clustering.ch <- kmeansruns(pmatrix, krange=1:10, criterion="ch") # Note: 1
clustering.ch$bestk # Note: 2
## [1] 2
clustering.asw <- kmeansruns(pmatrix, krange=1:10, criterion="asw") # Note: 3
clustering.asw$bestk
## [1] 3
clustering.ch$crit # Note: 4
## [1] 0.000000 14.094814 11.417985 10.418801 10.011797 9.964967
## [7] 9.861682 9.412089 9.166676 9.075569
clustcrit$crit # Note: 5
## [1] NaN 12.215107 10.359587 9.690891 10.011797 9.964967
## [7] 9.506978 9.092065 8.822406 8.695065
critframe <- data.frame(k=1:10, ch=scale(clustering.ch$crit), # Note: 6
asw=scale(clustering.asw$crit))
critframe <- melt(critframe, id.vars=c("k"),
variable.name="measure",
value.name="score")
ggplot(critframe, aes(x=k, y=score, color=measure)) +
geom_point(aes(shape=measure)) + geom_line(aes(linetype=measure)) +
scale_x_continuous(breaks=1:10, labels=1:10)
summary(clustering.ch) # Note: 7
## Length Class Mode
## cluster 25 -none- numeric
## centers 18 -none- numeric
## totss 1 -none- numeric
## withinss 2 -none- numeric
## tot.withinss 1 -none- numeric
## betweenss 1 -none- numeric
## size 2 -none- numeric
## crit 10 -none- numeric
## bestk 1 -none- numeric
# Note 1:
# Run kmeansruns() from 1–10 clusters, and the
# CH criterion. By default, kmeansruns() uses 100
# random starts and 100 maximum iterations per
# run.
# Note 2:
# The CH criterion picks two clusters.
# Note 3:
# Run kmeansruns() from 1–10 clusters, and the
# average silhouette width criterion. Average
# silhouette width picks 3 clusters.
# Note 4:
# The vector of criterion values is called
# crit.
# Note 5:
# Compare the CH values for kmeans() and
# hclust(). They’re not quite the same, because the
# two algorithms didn’t pick the same
# clusters.
# Note 6:
# Plot the values for the two criteria.
# Note 7:
# kmeansruns() also returns the output of
# kmeans for k=bestk.
# example 8.12 of section 8.1.4
# (example 8.12 of section 8.1.4) : Unsupervised methods : Cluster analysis : The k-means algorithm
# Title: Running clusterboot() with k-means
kbest.p<-5
cboot<-clusterboot(pmatrix, clustermethod=kmeansCBI,
runs=100,iter.max=100,
krange=kbest.p, seed=15555) # Note: 1
groups <- cboot$result$partition
print_clusters(cboot$result$partition, kbest.p)
## [1] "cluster 1"
## Country RedMeat Fish Fr.Veg
## 1 Albania 10.1 0.2 1.7
## 4 Bulgaria 7.8 1.2 4.2
## 18 Romania 6.2 1.0 2.8
## 25 Yugoslavia 4.4 0.6 3.2
## [1] "cluster 2"
## Country RedMeat Fish Fr.Veg
## 6 Denmark 10.6 9.9 2.4
## 8 Finland 9.5 5.8 1.4
## 15 Norway 9.4 9.7 2.7
## 20 Sweden 9.9 7.5 2.0
## [1] "cluster 3"
## Country RedMeat Fish Fr.Veg
## 5 Czechoslovakia 9.7 2.0 4.0
## 7 E Germany 8.4 5.4 3.6
## 11 Hungary 5.3 0.3 4.2
## 16 Poland 6.9 3.0 6.6
## 23 USSR 9.3 3.0 2.9
## [1] "cluster 4"
## Country RedMeat Fish Fr.Veg
## 2 Austria 8.9 2.1 4.3
## 3 Belgium 13.5 4.5 4.0
## 9 France 18.0 5.7 6.5
## 12 Ireland 13.9 2.2 2.9
## 14 Netherlands 9.5 2.5 3.7
## 21 Switzerland 13.1 2.3 4.9
## 22 UK 17.4 4.3 3.3
## 24 W Germany 11.4 3.4 3.8
## [1] "cluster 5"
## Country RedMeat Fish Fr.Veg
## 10 Greece 10.2 5.9 6.5
## 13 Italy 9.0 3.4 6.7
## 17 Portugal 6.2 14.2 7.9
## 19 Spain 7.1 7.0 7.2
cboot$bootmean
## [1] 0.8670000 0.8420714 0.6147024 0.7647341 0.7508333
cboot$bootbrd
## [1] 15 20 49 17 32
# Note 1:
# We’ve set the seed for the random generator
# so the results are reproducible.
# example 8.13 of section 8.1.5
# (example 8.13 of section 8.1.5) : Unsupervised methods : Cluster analysis : Assigning new points to clusters
# Title: A function to assign points to a cluster
assign_cluster <- function(newpt, centers, xcenter=0, xscale=1) { # Note: 1
xpt <- (newpt - xcenter)/xscale # Note: 2
dists <- apply(centers, 1, FUN=function(c0){sqr_edist(c0, xpt)}) # Note: 3
which.min(dists) # Note: 4
}
# Note 1:
# A function to assign a new data point newpt to
# a clustering described by centers, a matrix where
# each row is a cluster centroid. If the data was
# scaled (using scale()) before clustering, then
# xcenter and xscale are the scaled:center and
# scaled:scale attributes, respectively.
# Note 2:
# Center and scale the new data point.
# Note 3:
# Calculate how far the new data point is from
# each of the cluster centers.
# Note 4:
# Return the cluster number of the closest
# centroid.
# example 8.14 of section 8.1.5
# (example 8.14 of section 8.1.5) : Unsupervised methods : Cluster analysis : Assigning new points to clusters
# Title: An example of assigning points to cluster
rnorm.multidim <- function(n, mean, sd, colstr="x") { # Note: 1
ndim <- length(mean)
data <- NULL
for(i in 1:ndim) {
col <- rnorm(n, mean=mean[[i]], sd=sd[[i]])
data<-cbind(data, col)
}
cnames <- paste(colstr, 1:ndim, sep='')
colnames(data) <- cnames
data
}
mean1 <- c(1, 1, 1) # Note: 2
sd1 <- c(1, 2, 1)
mean2 <- c(10, -3, 5)
sd2 <- c(2, 1, 2)
mean3 <- c(-5, -5, -5)
sd3 <- c(1.5, 2, 1)
clust1 <- rnorm.multidim(100, mean1, sd1) # Note: 3
clust2 <- rnorm.multidim(100, mean2, sd2)
clust3 <- rnorm.multidim(100, mean3, sd3)
toydata <- rbind(clust3, rbind(clust1, clust2))
tmatrix <- scale(toydata) # Note: 4
tcenter <- attr(tmatrix, "scaled:center") # Note: 5
tscale<-attr(tmatrix, "scaled:scale")
kbest.t <- 3
tclusters <- kmeans(tmatrix, kbest.t, nstart=100, iter.max=100) # Note: 6
tclusters$size # Note: 7
## [1] 100 101 99
unscale <- function(scaledpt, centervec, scalevec) { # Note: 8
scaledpt*scalevec + centervec
}
unscale(tclusters$centers[1,], tcenter, tscale) # Note: 9
## x1 x2 x3
## 9.978961 -3.097584 4.864689
mean2
## [1] 10 -3 5
unscale(tclusters$centers[2,], tcenter, tscale) # Note: 10
## x1 x2 x3
## -4.979523 -4.927404 -4.908949
mean3
## [1] -5 -5 -5
unscale(tclusters$centers[3,], tcenter, tscale) # Note: 11
## x1 x2 x3
## 1.0003356 1.3037825 0.9571058
mean1
## [1] 1 1 1
assign_cluster(rnorm.multidim(1, mean1, sd1), # Note: 12
tclusters$centers,
tcenter, tscale)
## 3 # Note: 13
## 3
assign_cluster(rnorm.multidim(1, mean2, sd1), # Note: 14
tclusters$centers,
tcenter, tscale)
## 1 # Note: 15
## 1
assign_cluster(rnorm.multidim(1, mean3, sd1), # Note: 16
tclusters$centers,
tcenter, tscale)
## 2 # Note: 17
## 2
# Note 1:
# A function to generate n points drawn from a
# multidimensional Gaussian distribution with
# centroid mean and standard deviation sd. The
# dimension of the distribution is given by the
# length of the vector mean.
# Note 2:
# The parameters for three Gaussian
# distributions.
# Note 3:
# Create a dataset with 100 points each drawn
# from the above distributions.
# Note 4:
# Scale the dataset.
# Note 5:
# Store the centering and scaling parameters for
# future use.
# Note 6:
# Cluster the dataset, using k-means with three
# clusters.
# Note 7:
# The resulting clusters are about the right
# size.
# Note 8:
# A function to “unscale” data points (put them
# back in the coordinates of the original
# dataset).
# Note 9:
# Unscale the first centroid. It corresponds to
# our original distribution 2.
# Note 10:
# The second centroid corresponds to the
# original distribution 3.
# Note 11:
# The third centroid corresponds to the original
# distribution 1.
# Note 12:
# Generate a random point from the original
# distribution 1 and assign it to one of the
# discovered clusters.
# Note 13:
# It’s assigned to cluster 3, as we would
# expect.
# Note 14:
# Generate a random point from the original
# distribution 2 and assign it.
# Note 15:
# It’s assigned to cluster 1.
# Note 16:
# Generate a random point from the original
# distribution 3 and assign it.
# Note 17:
# It’s assigned to cluster 2.
# example 8.15 of section 8.2.3
# (example 8.15 of section 8.2.3) : Unsupervised methods : Association rules : Mining association rules with the arules package
# Title: Reading in the book data
library(arules) # Note: 1
bookbaskets <- read.transactions("bookdata.tsv.gz", format="single", # Note: 2
sep="\t", # Note: 3
cols=c("userid", "title"), # Note: 4
rm.duplicates=T) # Note: 5
# Note 1:
# Load the arules package.
# Note 2:
# Specify the file and the file format.
# Note 3:
# Specify the column separator (a tab).
# Note 4:
# Specify the column of transaction IDs and of
# item IDs, respectively.
# Note 5:
# Tell the function to look for and remove
# duplicate entries (for example, multiple entries
# for “The Hobbit” by the same user).
# example 8.16 of section 8.2.3
# (example 8.16 of section 8.2.3) : Unsupervised methods : Association rules : Mining association rules with the arules package
# Title: Examining the transaction data
class(bookbaskets) # Note: 1
## [1] "transactions"
## attr(,"package")
## [1] "arules"
bookbaskets # Note: 2
## transactions in sparse format with
## 92108 transactions (rows) and
## 220447 items (columns)
dim(bookbaskets) # Note: 3
## [1] 92108 220447
colnames(bookbaskets)[1:5] # Note: 4
## [1] " A Light in the Storm:[...]"
## [2] " Always Have Popsicles"
## [3] " Apple Magic"
## [4] " Ask Lily"
## [5] " Beyond IBM: Leadership Marketing and Finance for the 1990s"
rownames(bookbaskets)[1:5] # Note: 5
## [1] "10" "1000" "100001" "100002" "100004"
# Note 1:
# The object is of class transactions.
# Note 2:
# Printing the object tells you its
# dimensions.
# Note 3:
# You can also use dim() to see the dimensions
# of the matrix.
# Note 4:
# The columns are labeled by book
# title.
# Note 5:
# The rows are labeled by customer.
# informalexample 8.7 of section 8.2.3
# (informalexample 8.7 of section 8.2.3) : Unsupervised methods : Association rules : Mining association rules with the arules package
basketSizes <- size(bookbaskets)
summary(basketSizes)
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 1.0 1.0 1.0 11.1 4.0 10250.0
# example 8.17 of section 8.2.3
# (example 8.17 of section 8.2.3) : Unsupervised methods : Association rules : Mining association rules with the arules package
# Title: Examining the size distribution
quantile(basketSizes, probs=seq(0,1,0.1)) # Note: 1
## 0% 10% 20% 30% 40% 50% 60% 70% 80% 90% 100%
## 1 1 1 1 1 1 2 3 5 13 10253
library(ggplot2) # Note: 2
ggplot(data.frame(count=basketSizes)) +
geom_density(aes(x=count), binwidth=1) +
scale_x_log10()
# Note 1:
# Look at the basket size distribution, in 10%
# increments.
# Note 2:
# Plot the distribution to get a better
# look.
# informalexample 8.8 of section 8.2.3
# (informalexample 8.8 of section 8.2.3) : Unsupervised methods : Association rules : Mining association rules with the arules package
bookFreq <- itemFrequency(bookbaskets)
## summary(bookFreq)
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 1.086e-05 1.086e-05 1.086e-05 5.035e-05 3.257e-05 2.716e-02
sum(bookFreq)
## [1] 11.09909
# example 8.18 of section 8.2.3
# (example 8.18 of section 8.2.3) : Unsupervised methods : Association rules : Mining association rules with the arules package
# Title: Finding the ten most frequent books
bookCount <- (bookFreq/sum(bookFreq))*sum(basketSizes) # Note: 1
summary(bookCount)
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 1.000 1.000 1.000 4.637 3.000 2502.000
orderedBooks <- sort(bookCount, decreasing=T) # Note: 2
orderedBooks[1:10]
## Wild Animus
## 2502
## The Lovely Bones: A Novel
## 1295
## She's Come Undone
## 934
## The Da Vinci Code
## 905
## Harry Potter and the Sorcerer's Stone
## 832
## The Nanny Diaries: A Novel
## 821
## A Painted House
## 819
## Bridget Jones's Diary
## 772
## The Secret Life of Bees
## 762
## Divine Secrets of the Ya-Ya Sisterhood: A Novel
## 737
orderedBooks[1]/dim(bookbaskets)[1] # Note: 3
## Wild Animus
## 0.02716376
# Note 1:
# Get the absolute count of book
# occurrences.
# Note 2:
# Sort the count and list the 10 most popular
# books.
# Note 3:
# The most popular book in the dataset
# occurred in fewer than 3% of the baskets.
# informalexample 8.9 of section 8.2.3
# (informalexample 8.9 of section 8.2.3) : Unsupervised methods : Association rules : Mining association rules with the arules package
bookbaskets_use <- bookbaskets[basketSizes > 1]
dim(bookbaskets_use)
## [1] 40822 220447
# example 8.19 of section 8.2.3
# (example 8.19 of section 8.2.3) : Unsupervised methods : Association rules : Mining association rules with the arules package
# Title: Finding the association rules
rules <- apriori(bookbaskets_use, # Note: 1
parameter =list(support = 0.002, confidence=0.75))
summary(rules)
## set of 191 rules # Note: 2
##
## rule length distribution (lhs + rhs):sizes # Note: 3
## 2 3 4 5
## 11 100 66 14
##
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 2.000 3.000 3.000 3.435 4.000 5.000
##
## summary of quality measures: # Note: 4
## support confidence lift
## Min. :0.002009 Min. :0.7500 Min. : 40.89
## 1st Qu.:0.002131 1st Qu.:0.8113 1st Qu.: 86.44
## Median :0.002278 Median :0.8468 Median :131.36
## Mean :0.002593 Mean :0.8569 Mean :129.68
## 3rd Qu.:0.002695 3rd Qu.:0.9065 3rd Qu.:158.77
## Max. :0.005830 Max. :0.9882 Max. :321.89
##
## mining info: # Note: 5
## data ntransactions support confidence
## bookbaskets_use 40822 0.002 0.75
# Note 1:
# Call apriori() with a minimum support of
# 0.002 and a minimum confidence of 0.75.
# Note 2:
# The summary of the apriori() output reports
# the number of rules found;...
# Note 3:
# ...the distribution of rule lengths (in this
# example, most rules contain 3 items—2 on the left
# side, X (lhs), and one on the right side, Y
# (rhs));...
# Note 4:
# ...a summary of rule quality measures,
# including support and confidence;...
# Note 5:
# ...and some information on how apriori() was
# called.
# example 8.20 of section 8.2.3
# (example 8.20 of section 8.2.3) : Unsupervised methods : Association rules : Mining association rules with the arules package
# Title: Scoring rules
measures <- interestMeasure(rules, # Note: 1
measure=c("coverage", "fishersExactTest"), # Note: 2
transactions=bookbaskets_use) # Note: 3
summary(measures)
## coverage fishersExactTest
## Min. :0.002082 Min. : 0.000e+00
## 1st Qu.:0.002511 1st Qu.: 0.000e+00
## Median :0.002719 Median : 0.000e+00
## Mean :0.003039 Mean :5.080e-138
## 3rd Qu.:0.003160 3rd Qu.: 0.000e+00
## Max. :0.006982 Max. :9.702e-136
# Note 1:
# The call to interestMeasure() takes as
# arguments the discovered rules,...
# Note 2:
# ...a list of interest measures to
# apply,...
# Note 3:
# ...and a dataset to evaluate the interest
# measures over. This is usually the same set used
# to mine the rules, but it needn’t be. For
# instance, you can evaluate the rules over the full
# dataset, bookbaskets, to get coverage estimates
# that reflect all the customers, not just the ones
# who showed interest in more than one book.
# informalexample 8.10 of section 8.2.3
# (informalexample 8.10 of section 8.2.3) : Unsupervised methods : Association rules : Mining association rules with the arules package
inspect(head((sort(rules, by="confidence")), n=5))
# example 8.21 of section 8.2.3
# (example 8.21 of section 8.2.3) : Unsupervised methods : Association rules : Mining association rules with the arules package
# Title: Finding rules with restrictions
brules <- apriori(bookbaskets_use,
parameter =list(support = 0.001, # Note: 1
confidence=0.6),
appearance=list(rhs=c("The Lovely Bones: A Novel"), # Note: 2
default="lhs")) # Note: 3
summary(brules)
## set of 46 rules
##
## rule length distribution (lhs + rhs):sizes
## 3 4
## 44 2
##
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 3.000 3.000 3.000 3.043 3.000 4.000
##
## summary of quality measures:
## support confidence lift
## Min. :0.001004 Min. :0.6000 Min. :21.81
## 1st Qu.:0.001029 1st Qu.:0.6118 1st Qu.:22.24
## Median :0.001102 Median :0.6258 Median :22.75
## Mean :0.001132 Mean :0.6365 Mean :23.14
## 3rd Qu.:0.001219 3rd Qu.:0.6457 3rd Qu.:23.47
## Max. :0.001396 Max. :0.7455 Max. :27.10
##
## mining info:
## data ntransactions support confidence
## bookbaskets_use 40822 0.001 0.6
# Note 1:
# Relax the minimum support to 0.001 and the
# minimum confidence to 0.6.
# Note 2:
# Only The Lovely Bones
# is allowed to appear on the right side of the
# rules.
# Note 3:
# By default, all the books can go into the
# left side of the rules.
# example 8.22 of section 8.2.3
# (example 8.22 of section 8.2.3) : Unsupervised methods : Association rules : Mining association rules with the arules package
# Title: Inspecting rules
brulesConf <- sort(brules, by="confidence") # Note: 1
inspect(head(lhs(brulesConf), n=5)) # Note: 2
## items
## 1 {Divine Secrets of the Ya-Ya Sisterhood: A Novel,
## Lucky : A Memoir}
## 2 {Lucky : A Memoir,
## The Notebook}
## 3 {Lucky : A Memoir,
## Wild Animus}
## 4 {Midwives: A Novel,
## Wicked: The Life and Times of the Wicked Witch of the West}
## 5 {Lucky : A Memoir,
## Summer Sisters}
# Note 1:
# Sort the rules by confidence.
# Note 2:
# Use the lhs() function to get the left
# itemsets of each rule; then inspect the top
# five.
# example 8.23 of section 8.2.3
# (example 8.23 of section 8.2.3) : Unsupervised methods : Association rules : Mining association rules with the arules package
# Title: Inspecting rules with restrictions
brulesSub <- subset(brules, subset=!(lhs %in% "Lucky : A Memoir")) # Note: 1
brulesConf <- sort(brulesSub, by="confidence")
inspect(head(lhs(brulesConf), n=5))
## items
## 1 {Midwives: A Novel,
## Wicked: The Life and Times of the Wicked Witch of the West}
## 2 {She's Come Undone,
## The Secret Life of Bees,
## Wild Animus}
## 3 {A Walk to Remember,
## The Nanny Diaries: A Novel}
## 4 {Beloved,
## The Red Tent}
## 5 {The Da Vinci Code,
## The Reader}
# Note 1:
# Restrict to the subset of rules where
# Lucky is not in the left
# side.
# example 9.1 of section 9.1.1
# (example 9.1 of section 9.1.1) : Exploring advanced methods : Using bagging and random forests to reduce training variance : Using bagging to improve prediction
# Title: Preparing Spambase data and evaluating the performance of decision trees
spamD <- read.table('spamD.tsv',header=T,sep='\t') # Note: 1
spamTrain <- subset(spamD,spamD$rgroup>=10)
spamTest <- subset(spamD,spamD$rgroup<10)
spamVars <- setdiff(colnames(spamD),list('rgroup','spam'))
spamFormula <- as.formula(paste('spam=="spam"', # Note: 2
paste(spamVars,collapse=' + '),sep=' ~ '))
loglikelihood <- function(y, py) { # Note: 3
pysmooth <- ifelse(py==0, 1e-12,
ifelse(py==1, 1-1e-12, py))
sum(y * log(pysmooth) + (1-y)*log(1 - pysmooth))
}
accuracyMeasures <- function(pred, truth, name="model") { # Note: 4
dev.norm <- -2*loglikelihood(as.numeric(truth), pred)/length(pred) # Note: 5
ctable <- table(truth=truth,
pred=(pred>0.5)) # Note: 6
accuracy <- sum(diag(ctable))/sum(ctable)
precision <- ctable[2,2]/sum(ctable[,2])
recall <- ctable[2,2]/sum(ctable[2,])
f1 <- 2*precision*recall/(precision+recall)
data.frame(model=name, accuracy=accuracy, f1=f1, dev.norm)
}
library(rpart) # Note: 7
treemodel <- rpart(spamFormula, spamTrain)
accuracyMeasures(predict(treemodel, newdata=spamTrain), # Note: 8
spamTrain$spam=="spam",
name="tree, training")
accuracyMeasures(predict(treemodel, newdata=spamTest),
spamTest$spam=="spam",
name="tree, test")
# Note 1:
# Load the data and split into training (90% of data)
# and test (10% of data) sets.
# Note 2:
# Use all the features and do binary classification,
# where TRUE corresponds to spam documents.
# Note 3:
# A function to calculate log likelihood (for
# calculating deviance).
# Note 4:
# A function to calculate and return various measures
# on the model: normalized deviance, prediction accuracy, and f1, which is the
# harmonic mean of precision and recall.
# Note 5:
# Normalize the deviance by the number of data points
# so that we can compare the deviance across training and test
# sets.
# Note 6:
# Convert the class probability estimator into a
# classifier by labeling documents that score greater than 0.5 as
# spam.
# Note 7:
# Load the rpart library and fit a decision tree
# model.
# Note 8:
# Evaluate the decision tree model against the
# training and test sets.
# example 9.2 of section 9.1.1
# (example 9.2 of section 9.1.1) : Exploring advanced methods : Using bagging and random forests to reduce training variance : Using bagging to improve prediction
# Title: Bagging decision trees
ntrain <- dim(spamTrain)[1]
n <- ntrain # Note: 1
ntree <- 100
samples <- sapply(1:ntree, # Note: 2
FUN = function(iter)
{sample(1:ntrain, size=n, replace=T)})
treelist <-lapply(1:ntree, # Note: 3
FUN=function(iter)
{samp <- samples[,iter];
rpart(spamFormula, spamTrain[samp,])})
predict.bag <- function(treelist, newdata) { # Note: 4
preds <- sapply(1:length(treelist),
FUN=function(iter) {
predict(treelist[[iter]], newdata=newdata)})
predsums <- rowSums(preds)
predsums/length(treelist)
}
accuracyMeasures(predict.bag(treelist, newdata=spamTrain), # Note: 5
spamTrain$spam=="spam",
name="bagging, training")
accuracyMeasures(predict.bag(treelist, newdata=spamTest),
spamTest$spam=="spam",
name="bagging, test")
# Note 1:
# Use bootstrap samples the same size as the training
# set, with 100 trees.
# Note 2:
# Build the bootstrap samples by sampling the row indices of spamTrain with replacement. Each
# column of the matrix samples represents the row indices into spamTrain
# that comprise the bootstrap sample.
# Note 3:
# Train the individual decision trees and return them
# in a list. Note: this step can take a few minutes.
# Note 4:
# predict.bag assumes the underlying classifier returns decision probabilities, not
# decisions.
# Note 5:
# Evaluate the bagged decision trees against the
# training and test sets.
# example 9.3 of section 9.1.2
# (example 9.3 of section 9.1.2) : Exploring advanced methods : Using bagging and random forests to reduce training variance : Using random forests to further improve prediction
# Title: Using random forests
library(randomForest) # Note: 1
set.seed(5123512) # Note: 2
fmodel <- randomForest(x=spamTrain[,spamVars], # Note: 3
y=spamTrain$spam,
ntree=100, # Note: 4
nodesize=7, # Note: 5
importance=T) # Note: 6
accuracyMeasures(predict(fmodel, # Note: 7
newdata=spamTrain[,spamVars],type='prob')[,'spam'],
spamTrain$spam=="spam",name="random forest, train")
## model accuracy f1 dev.norm
## 1 random forest, train 0.9884142 0.9706611 0.1428786
accuracyMeasures(predict(fmodel,
newdata=spamTest[,spamVars],type='prob')[,'spam'],
spamTest$spam=="spam",name="random forest, test")
## model accuracy f1 dev.norm
## 1 random forest, test 0.9541485 0.8845029 0.3972416
# Note 1:
# Load the randomForest package.
# Note 2:
# Set the pseudo-random seed to a known value to try
# and make the random forest run repeatable.
# Note 3:
# Call the randomForest() function to build the model
# with explanatory variables as x and the category to be predicted as
# y.
# Note 4:
# Use 100 trees to be compatible with our bagging
# example. The default is 500 trees.
# Note 5:
# Specify that each node of a tree must have a minimum
# of 7 elements, to be compatible with the default minimum node size that rpart()
# uses on this training set.
# Note 6:
# Tell the algorithm to save information to be used for
# calculating variable importance (we’ll see this later).
# Note 7:
# Report the model quality.
# example 9.4 of section 9.1.2
# (example 9.4 of section 9.1.2) : Exploring advanced methods : Using bagging and random forests to reduce training variance : Using random forests to further improve prediction
# Title: randomForest variable importances
varImp <- importance(fmodel) # Note: 1
varImp[1:10, ] # Note: 2
## non-spam spam MeanDecreaseAccuracy
## word.freq.make 2.096811 3.7304353 4.334207
## word.freq.address 3.603167 3.9967031 4.977452
## word.freq.all 2.799456 4.9527834 4.924958
## word.freq.3d 3.000273 0.4125932 2.917972
## word.freq.our 9.037946 7.9421391 10.731509
## word.freq.over 5.879377 4.2402613 5.751371
## word.freq.remove 16.637390 13.9331691 17.753122
## word.freq.internet 7.301055 4.4458342 7.947515
## word.freq.order 3.937897 4.3587883 4.866540
## word.freq.mail 5.022432 3.4701224 6.103929
varImpPlot(fmodel, type=1) # Note: 3
# Note 1:
# Call importance() on the spam
# model.
# Note 2:
# The importance() function returns a matrix of
# importance measures (larger values = more important).
# Note 3:
# Plot the variable importance as measured by
# accuracy change.
# example 9.5 of section 9.1.2
# (example 9.5 of section 9.1.2) : Exploring advanced methods : Using bagging and random forests to reduce training variance : Using random forests to further improve prediction
# Title: Fitting with fewer variables
selVars <- names(sort(varImp[,1], decreasing=T))[1:25] # Note: 1
fsel <- randomForest(x=spamTrain[,selVars],y=spamTrain$spam, # Note: 2
ntree=100,
nodesize=7,
importance=T)
accuracyMeasures(predict(fsel,
newdata=spamTrain[,selVars],type='prob')[,'spam'],
spamTrain$spam=="spam",name="RF small, train")
## model accuracy f1 dev.norm
## 1 RF small, train 0.9876901 0.9688546 0.1506817
accuracyMeasures(predict(fsel,
newdata=spamTest[,selVars],type='prob')[,'spam'],
spamTest$spam=="spam",name="RF small, test")
## model accuracy f1 dev.norm
## 1 RF small, test 0.9497817 0.8738142 0.400825
# Note 1:
# Sort the variables by their importance, as
# measured by accuracy change.
# Note 2:
# Build a random forest model using only the 25
# most important variables.
# example 9.6 of section 9.2.2
# (example 9.6 of section 9.2.2) : Exploring advanced methods : Using generalized additive models (GAMs) to learn non-monotone relationships : A one-dimensional regression example
# Title: Preparing an artificial problem
set.seed(602957)
x <- rnorm(1000)
noise <- rnorm(1000, sd=1.5)
y <- 3*sin(2*x) + cos(0.75*x) - 1.5*(x^2 ) + noise
select <- runif(1000)
frame <- data.frame(y=y, x = x)
train <- frame[select > 0.1,]
test <-frame[select <= 0.1,]
# example 9.7 of section 9.2.2
# (example 9.7 of section 9.2.2) : Exploring advanced methods : Using generalized additive models (GAMs) to learn non-monotone relationships : A one-dimensional regression example
# Title: Linear regression applied to our artificial example
lin.model <- lm(y ~ x, data=train)
summary(lin.model)
## Call:
## lm(formula = y ~ x, data = train)
##
## Residuals:
## Min 1Q Median 3Q Max
## -17.698 -1.774 0.193 2.499 7.529
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) -0.8330 0.1161 -7.175 1.51e-12 ***
## x 0.7395 0.1197 6.180 9.74e-10 ***
## ---
## Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
##
## Residual standard error: 3.485 on 899 degrees of freedom
## Multiple R-squared: 0.04075, Adjusted R-squared: 0.03968
## F-statistic: 38.19 on 1 and 899 DF, p-value: 9.737e-10
#
# calculate the root mean squared error (rmse)
#
resid.lin <- train$y-predict(lin.model)
sqrt(mean(resid.lin^2))
## [1] 3.481091
# example 9.8 of section 9.2.2
# (example 9.8 of section 9.2.2) : Exploring advanced methods : Using generalized additive models (GAMs) to learn non-monotone relationships : A one-dimensional regression example
# Title: GAM applied to our artificial example
library(mgcv) # Note: 1
glin.model <- gam(y~s(x), data=train) # Note: 2
glin.model$converged # Note: 3
## [1] TRUE
summary(glin.model)
## Family: gaussian # Note: 4
## Link function: identity
##
## Formula:
## y ~ s(x)
##
## Parametric coefficients: # Note: 5
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) -0.83467 0.04852 -17.2 <2e-16 ***
## ---
## Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
##
## Approximate significance of smooth terms: # Note: 6
## edf Ref.df F p-value
## s(x) 8.685 8.972 497.8 <2e-16 ***
## ---
## Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
##
## R-sq.(adj) = 0.832 Deviance explained = 83.4% # Note: 7
## GCV score = 2.144 Scale est. = 2.121 n = 901
#
# calculate the root mean squared error (rmse)
#
resid.glin <- train$y-predict(glin.model)
sqrt(mean(resid.glin^2))
## [1] 1.448514
# Note 1:
# Load the mgcv package.
# Note 2:
# Build the model, specifying that x should be
# treated as a nonlinear variable.
# Note 3:
# The converged parameter tells you if the algorithm
# converged. You should only trust the output if this is TRUE.
# Note 4:
# Setting family=gaussian and link=identity tells you that the model was treated with the same
# distributions assumptions as a standard linear regression.
# Note 5:
# The parametric coefficients are the linear terms (in this example, only the constant term).
# This section of the summary tells you which linear terms were
# significantly different from 0.
# Note 6:
# The smooth terms are the nonlinear terms. This section of the summary tells you which
# nonlinear terms were significantly different from 0. It also tells you
# the effective degrees of freedom (edf) used up to build each smooth
# term. An edf near 1 indicates that the variable has an approximately
# linear relationship to the output.
# Note 7:
# “R-sq (adj)” is the adjusted R-squared. “Deviance
# explained” is the raw R-squared (0.834).
# example 9.9 of section 9.2.2
# (example 9.9 of section 9.2.2) : Exploring advanced methods : Using generalized additive models (GAMs) to learn non-monotone relationships : A one-dimensional regression example
# Title: Comparing linear regression and GAM performance
actual <- test$y
pred.lin <- predict(lin.model, newdata=test) # Note: 1
pred.glin <- predict(glin.model, newdata=test)
resid.lin <- actual-pred.lin
resid.glin <- actual-pred.glin
sqrt(mean(resid.lin^2)) # Note: 2
## [1] 2.792653
sqrt(mean(resid.glin^2))
## [1] 1.401399
cor(actual, pred.lin)^2 # Note: 3
## [1] 0.1543172
cor(actual, pred.glin)^2
## [1] 0.7828869
# Note 1:
# Call both models on the test
# data.
# Note 2:
# Compare the RMSE of the linear model and the GAM
# on the test data.
# Note 3:
# Compare the R-squared of the linear model and the
# GAM on test data.
# example 9.10 of section 9.2.3
# (example 9.10 of section 9.2.3) : Exploring advanced methods : Using generalized additive models (GAMs) to learn non-monotone relationships : Extracting the nonlinear relationships
# Title: Extracting a learned spline from a GAM
sx <- predict(glin.model, type="terms")
summary(sx)
## s(x)
## Min. :-17.527035
## 1st Qu.: -2.378636
## Median : 0.009427
## Mean : 0.000000
## 3rd Qu.: 2.869166
## Max. : 4.084999
xframe <- cbind(train, sx=sx[,1])
ggplot(xframe, aes(x=x)) + geom_point(aes(y=y), alpha=0.4) +
geom_line(aes(y=sx))
# example 9.11 of section 9.2.4
# (example 9.11 of section 9.2.4) : Exploring advanced methods : Using generalized additive models (GAMs) to learn non-monotone relationships : Using GAM on actual data
# Title: Applying linear regression (with and without GAM) to health data
library(mgcv)
library(ggplot2)
load("NatalBirthData.rData")
train <- sdata[sdata$ORIGRANDGROUP<=5,]
test <- sdata[sdata$ORIGRANDGROUP>5,]
form.lin <- as.formula("DBWT ~ PWGT + WTGAIN + MAGER + UPREVIS")
linmodel <- lm(form.lin, data=train) # Note: 1
summary(linmodel)
## Call:
## lm(formula = form.lin, data = train)
##
## Residuals:
## Min 1Q Median 3Q Max
## -3155.43 -272.09 45.04 349.81 2870.55
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 2419.7090 31.9291 75.784 < 2e-16 ***
## PWGT 2.1713 0.1241 17.494 < 2e-16 ***
## WTGAIN 7.5773 0.3178 23.840 < 2e-16 ***
## MAGER 5.3213 0.7787 6.834 8.6e-12 ***
## UPREVIS 12.8753 1.1786 10.924 < 2e-16 ***
## ---
## Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
##
## Residual standard error: 562.7 on 14381 degrees of freedom
## Multiple R-squared: 0.06596, Adjusted R-squared: 0.0657 # Note: 2
## F-statistic: 253.9 on 4 and 14381 DF, p-value: < 2.2e-16
form.glin <- as.formula("DBWT ~ s(PWGT) + s(WTGAIN) +
s(MAGER) + s(UPREVIS)")
glinmodel <- gam(form.glin, data=train) # Note: 3
glinmodel$converged # Note: 4
## [1] TRUE
summary(glinmodel)
## Family: gaussian
## Link function: identity
##
## Formula:
## DBWT ~ s(PWGT) + s(WTGAIN) + s(MAGER) + s(UPREVIS)
##
## Parametric coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 3276.948 4.623 708.8 <2e-16 ***
## ---
## Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
##
## Approximate significance of smooth terms:
## edf Ref.df F p-value
## s(PWGT) 5.374 6.443 68.981 < 2e-16 ***
## s(WTGAIN) 4.719 5.743 102.313 < 2e-16 ***
## s(MAGER) 7.742 8.428 6.959 1.82e-09 ***
## s(UPREVIS) 5.491 6.425 48.423 < 2e-16 ***
## ---
## Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
##
## R-sq.(adj) = 0.0927 Deviance explained = 9.42% # Note: 5
## GCV score = 3.0804e+05 Scale est. = 3.0752e+05 n = 14386
# Note 1:
# Build a linear model with four
# variables.
# Note 2:
# The model explains about 7% of the variance; all
# coefficients are significantly different from 0.
# Note 3:
# Build a GAM with the same
# variables.
# Note 4:
# Verify that the model has
# converged.
# Note 5:
# The model explains just under 10% of the variance;
# all variables have a nonlinear effect significantly different from
# 0.
# example 9.12 of section 9.2.4
# (example 9.12 of section 9.2.4) : Exploring advanced methods : Using generalized additive models (GAMs) to learn non-monotone relationships : Using GAM on actual data
# Title: Plotting GAM results
terms <- predict(glinmodel, type="terms") # Note: 1
tframe <- cbind(DBWT = train$DBWT, as.data.frame(terms)) # Note: 2
colnames(tframe) <- gsub('[()]', '', colnames(tframe)) # Note: 3
pframe <- cbind(tframe, train[,c("PWGT", "WTGAIN",
"MAGER", "UPREVIS")]) # Note: 4
p1 <- ggplot(pframe, aes(x=PWGT)) +
geom_point(aes(y=scale(sPWGT, scale=F))) + # Note: 5
geom_smooth(aes(y=scale(DBWT, scale=F))) # + # Note: 6
# [...] # Note: 7
# Note 1:
# Get the matrix of s()
# functions.
# Note 2:
# Bind in birth weight; convert to data
# frame.
# Note 3:
# Make the column names reference-friendly
# (“s(PWGT)” is converted to “sPWGT”, etc.).
# Note 4:
# Bind in the input variables.
# Note 5:
# Plot s(PWGT) shifted to be zero mean versus PWGT (mother’s weight) as points.
# Note 6:
# Plot the smoothing curve of DWBT (birth weight) shifted to be zero mean versus PWGT (mother’s
# weight).
# Note 7:
# Repeat for remaining variables (omitted for
# brevity).
# example 9.13 of section 9.2.4
# (example 9.13 of section 9.2.4) : Exploring advanced methods : Using generalized additive models (GAMs) to learn non-monotone relationships : Using GAM on actual data
# Title: Checking GAM model performance on hold-out data
pred.lin <- predict(linmodel, newdata=test) # Note: 1
pred.glin <- predict(glinmodel, newdata=test)
cor(pred.lin, test$DBWT)^2 # Note: 2
# [1] 0.0616812
cor(pred.glin, test$DBWT)^2
# [1] 0.08857426
# Note 1:
# Run both the linear model and the GAM on the test
# data.
# Note 2:
# Calculate R-squared for both
# models.
# example 9.14 of section 9.2.5
# (example 9.14 of section 9.2.5) : Exploring advanced methods : Using generalized additive models (GAMs) to learn non-monotone relationships : Using GAM for logistic regression
# Title: GLM logistic regression
form <- as.formula("DBWT < 2000 ~ PWGT + WTGAIN + MAGER + UPREVIS")
logmod <- glm(form, data=train, family=binomial(link="logit"))
# example 9.15 of section 9.2.5
# (example 9.15 of section 9.2.5) : Exploring advanced methods : Using generalized additive models (GAMs) to learn non-monotone relationships : Using GAM for logistic regression
# Title: GAM logistic regression
form2 <- as.formula("DBWT<2000~s(PWGT)+s(WTGAIN)+
s(MAGER)+s(UPREVIS)")
glogmod <- gam(form2, data=train, family=binomial(link="logit"))
glogmod$converged
## [1] TRUE
summary(glogmod)
## Family: binomial
## Link function: logit
##
## Formula:
## DBWT < 2000 ~ s(PWGT) + s(WTGAIN) + s(MAGER) + s(UPREVIS)
##
## Parametric coefficients:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) -3.94085 0.06794 -58 <2e-16 ***
## ---
## Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
##
## Approximate significance of smooth terms:
## edf Ref.df Chi.sq p-value
## s(PWGT) 1.905 2.420 2.463 0.36412 # Note: 1
## s(WTGAIN) 3.674 4.543 64.426 1.72e-12 ***
## s(MAGER) 1.003 1.005 8.335 0.00394 **
## s(UPREVIS) 6.802 7.216 217.631 < 2e-16 ***
## ---
## Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
##
## R-sq.(adj) = 0.0331 Deviance explained = 9.14% # Note: 2
## UBRE score = -0.76987 Scale est. = 1 n = 14386
# Note 1:
# Note that there’s no proof that the mother’s weight (PWGT) has a significant effect on
# outcome.
# Note 2:
# “Deviance explained” is the pseudo R-squared: 1 -
# (deviance/null.deviance).
# example 9.16 of section 9.3.1
# (example 9.16 of section 9.3.1) : Exploring advanced methods : Using kernel methods to increase data separation : Understanding kernel functions
# Title: An artificial kernel example
u <- c(1,2)
v <- c(3,4)
k <- function(u,v) { # Note: 1
u[1]*v[1] + u[2]*v[2] +
u[1]*u[1]*v[1]*v[1] + u[2]*u[2]*v[2]*v[2] +
u[1]*u[2]*v[1]*v[2]
}
phi <- function(x) { # Note: 2
x <- as.numeric(x)
c(x,x*x,combn(x,2,FUN=prod))
}
print(k(u,v)) # Note: 3
## [1] 108
print(phi(u))
## [1] 1 2 1 4 2
print(phi(v))
## [1] 3 4 9 16 12
print(as.numeric(phi(u) %*% phi(v))) # Note: 4
## [1] 108
# Note 1:
# Define a function of two vector variables
# (both two dimensional) as the sum of various products of terms.
# Note 2:
# Define a function of a single vector variable
# that returns a vector containing the original entries plus all products of
# entries.
# Note 3:
# Example evaluation of k(,).
# Note 4:
# Confirm phi() agrees with k(,). phi() is the certificate that shows k(,) is in fact a
# kernel.
# example 9.17 of section 9.3.2
# (example 9.17 of section 9.3.2) : Exploring advanced methods : Using kernel methods to increase data separation : Using an explicit kernel on a problem
# Title: Applying stepwise linear regression to PUMS data
dtrain <- subset(psub,ORIGRANDGROUP >= 500)
dtest <- subset(psub,ORIGRANDGROUP < 500) # Note: 1
m1 <- step( # Note: 2
lm(log(PINCP,base=10) ~ AGEP + SEX + COW + SCHL,
data=dtrain), # Note: 3
direction='both')
rmse <- function(y, f) { sqrt(mean( (y-f)^2 )) } # Note: 4
print(rmse(log(dtest$PINCP,base=10),
predict(m1,newdata=dtest))) # Note: 5
# [1] 0.2752171
# Note 1:
# Split data into test and training.
# Note 2:
# Ask that the linear regression model we’re building be
# stepwise improved, which is a powerful automated procedure for
# removing variables that don’t seem to have significant impacts
# (can improve generalization performance).
# Note 3:
# Build the basic linear regression model.
# Note 4:
# Define the RMSE function.
# Note 5:
# Calculate the RMSE between the prediction and the
# actuals.
# example 9.18 of section 9.3.2
# (example 9.18 of section 9.3.2) : Exploring advanced methods : Using kernel methods to increase data separation : Using an explicit kernel on a problem
# Title: Applying an example explicit kernel transform
phi <- function(x) { # Note: 1
x <- as.numeric(x)
c(x,x*x,combn(x,2,FUN=prod))
}
phiNames <- function(n) { # Note: 2
c(n,paste(n,n,sep=':'),
combn(n,2,FUN=function(x) {paste(x,collapse=':')}))
}
modelMatrix <- model.matrix(~ 0 + AGEP + SEX + COW + SCHL,psub) # Note: 3
colnames(modelMatrix) <- gsub('[^a-zA-Z0-9]+','_',
colnames(modelMatrix)) # Note: 4
pM <- t(apply(modelMatrix,1,phi)) # Note: 5
vars <- phiNames(colnames(modelMatrix))
vars <- gsub('[^a-zA-Z0-9]+','_',vars)
colnames(pM) <- vars # Note: 6
pM <- as.data.frame(pM)
pM$PINCP <- psub$PINCP
pM$ORIGRANDGROUP <- psub$ORIGRANDGROUP
pMtrain <- subset(pM,ORIGRANDGROUP >= 500)
pMtest <- subset(pM,ORIGRANDGROUP < 500) # Note: 7
# Note 1:
# Define our primal kernel function: map a
# vector to a copy of itself plus all square terms and cross-multiplied
# terms.
# Note 2:
# Define a function similar to our primal
# kernel, but working on variable names instead of values.
# Note 3:
# Convert data to a matrix where all
# categorical variables are encoded as multiple numeric indicators.
# Note 4:
# Remove problematic characters from matrix
# column names.
# Note 5:
# Apply the primal kernel function to every
# row of the matrix and transpose results so they’re written as rows (not as a
# list as returned by apply()).
# Note 6:
# Extend names from original matrix to
# names for compound variables in new matrix.
# Note 7:
# Add in outcomes, test/train split
# columns, and prepare new data for modeling.
# example 9.19 of section 9.3.2
# (example 9.19 of section 9.3.2) : Exploring advanced methods : Using kernel methods to increase data separation : Using an explicit kernel on a problem
# Title: Modeling using the explicit kernel transform
formulaStr2 <- paste('log(PINCP,base=10)',
paste(vars,collapse=' + '),
sep=' ~ ')
m2 <- lm(as.formula(formulaStr2),data=pMtrain)
coef2 <- summary(m2)$coefficients
interestingVars <- setdiff(rownames(coef2)[coef2[,'Pr(>|t|)']<0.01],
'(Intercept)')
interestingVars <- union(colnames(modelMatrix),interestingVars) # Note: 1
formulaStr3 <- paste('log(PINCP,base=10)',
paste(interestingVars,collapse=' + '),
sep=' ~ ')
m3 <- step(lm(as.formula(formulaStr3),data=pMtrain),direction='both') # Note: 2
print(rmse(log(pMtest$PINCP,base=10),predict(m3,newdata=pMtest))) # Note: 3
# [1] 0.2735955
# Note 1:
# Select a set of interesting variables by building an initial model using all of the new
# variables and retaining an interesting subset. This is an ad hoc
# move to speed up the stepwise regression by trying to quickly
# dispose of many useless derived variables. By introducing many new
# variables, the primal kernel method also introduces many new degrees
# of freedom, which can invite overfitting.
# Note 2:
# Stepwise regress on subset of variables to
# get new model.
# Note 3:
# Calculate the RMSE between the prediction and the actuals.
# example 9.20 of section 9.3.2
# (example 9.20 of section 9.3.2) : Exploring advanced methods : Using kernel methods to increase data separation : Using an explicit kernel on a problem
# Title: Inspecting the results of the explicit kernel model
print(summary(m3))
## Call:
## lm(formula = log(PINCP, base = 10) ~ AGEP + SEXM +
## COWPrivate_not_for_profit_employee +
## SCHLAssociate_s_degree + SCHLBachelor_s_degree +
## SCHLDoctorate_degree +
## SCHLGED_or_alternative_credential + SCHLMaster_s_degree +
## SCHLProfessional_degree + SCHLRegular_high_school_diploma +
## SCHLsome_college_credit_no_degree + AGEP_AGEP, data = pMtrain)
##
## Residuals:
## Min 1Q Median 3Q Max
## -1.29264 -0.14925 0.01343 0.17021 0.61968
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 2.9400460 0.2219310 13.248 < 2e-16 ***
## AGEP 0.0663537 0.0124905 5.312 1.54e-07 ***
## SEXM 0.0934876 0.0224236 4.169 3.52e-05 ***
## COWPrivate_not_for_profit_em -0.1187914 0.0379944 -3.127 0.00186 **
## SCHLAssociate_s_degree 0.2317211 0.0509509 4.548 6.60e-06 ***
## SCHLBachelor_s_degree 0.3844459 0.0417445 9.210 < 2e-16 ***
## SCHLDoctorate_degree 0.3190572 0.1569356 2.033 0.04250 *
## SCHLGED_or_alternative_creden 0.1405157 0.0766743 1.833 0.06737 .
## SCHLMaster_s_degree 0.4553550 0.0485609 9.377 < 2e-16 ***
## SCHLProfessional_degree 0.6525921 0.0845052 7.723 5.01e-14 ***
## SCHLRegular_high_school_diplo 0.1016590 0.0415834 2.445 0.01479 *
## SCHLsome_college_credit_no_de 0.1655906 0.0416345 3.977 7.85e-05 ***
## AGEP_AGEP -0.0007547 0.0001704 -4.428 1.14e-05 ***
## ---
## Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
##
## Residual standard error: 0.2649 on 582 degrees of freedom
## Multiple R-squared: 0.3541, Adjusted R-squared: 0.3408
## F-statistic: 26.59 on 12 and 582 DF, p-value: < 2.2e-16
# example 9.21 of section 9.4.2
# (example 9.21 of section 9.4.2) : Exploring advanced methods : Using SVMs to model complicated decision boundaries : Trying an SVM on artificial example data
# Title: Setting up the spirals data as an example classification problem
library('kernlab')
data('spirals') # Note: 1
sc <- specc(spirals, centers = 2) # Note: 2
s <- data.frame(x=spirals[,1],y=spirals[,2],
class=as.factor(sc)) # Note: 3
library('ggplot2')
ggplot(data=s) +
geom_text(aes(x=x,y=y,
label=class,color=class)) +
coord_fixed() +
theme_bw() + theme(legend.position='none') # Note: 4
# Note 1:
# Load the kernlab kernel and support vector
# machine package and then ask that the included example "spirals" be made
# available.
# Note 2:
# Use kernlab’s spectral clustering routine
# to identify the two different spirals in the example dataset.
# Note 3:
# Combine the spiral coordinates and the
# spiral label into a data frame.
# Note 4:
# Plot the spirals with class labels.
# example 9.22 of section 9.4.2
# (example 9.22 of section 9.4.2) : Exploring advanced methods : Using SVMs to model complicated decision boundaries : Trying an SVM on artificial example data
# Title: SVM with a poor choice of kernel
set.seed(2335246L)
s$group <- sample.int(100,size=dim(s)[[1]],replace=T)
sTrain <- subset(s,group>10)
sTest <- subset(s,group<=10) # Note: 1
# mSVMV <- ksvm(class~x+y,data=sTrain,kernel='vanilladot')
# had been using ksvm, but it seems to keep bad state in some cases
library('e1071')
mSVMV <- svm(class~x+y,data=sTrain,kernel='linear',type='nu-classification') # Note: 2
sTest$predSVMV <- predict(mSVMV,newdata=sTest,type='response') # Note: 3
ggplot() +
geom_text(data=sTest,aes(x=x,y=y,
label=predSVMV),size=12) +
geom_text(data=s,aes(x=x,y=y,
label=class,color=class),alpha=0.7) +
coord_fixed() +
theme_bw() + theme(legend.position='none') # Note: 4
# Note 1:
# Prepare to try to learn spiral class label
# from coordinates using a support vector machine.
# Note 2:
# Build the support vector model using a
# vanilladot kernel (not a very good kernel).
# Note 3:
# Use the model to predict class on held-out
# data.
# Note 4:
# Plot the predictions on top of a grey copy
# of all the data so we can see if predictions agree with the original
# markings.
# example 9.23 of section 9.4.2
# (example 9.23 of section 9.4.2) : Exploring advanced methods : Using SVMs to model complicated decision boundaries : Trying an SVM on artificial example data
# Title: SVM with a good choice of kernel
# mSVMG <- ksvm(class~x+y,data=sTrain,kernel='rbfdot')
# had been using ksvm, but it seems to be keeping bad state in some cases
mSVMG <- svm(class~x+y,data=sTrain,kernel='radial',type='nu-classification') # Note: 1
sTest$predSVMG <- predict(mSVMG,newdata=sTest,type='response')
ggplot() +
geom_text(data=sTest,aes(x=x,y=y,
label=predSVMG),size=12) +
geom_text(data=s,aes(x=x,y=y,
label=class,color=class),alpha=0.7) +
coord_fixed() +
theme_bw() + theme(legend.position='none')
# Note 1:
# This time use the "radial" or
# Gaussian kernel, which is a nice geometric similarity measure.
# example 9.24 of section 9.4.3
# (example 9.24 of section 9.4.3) : Exploring advanced methods : Using SVMs to model complicated decision boundaries : Using SVMs on real data
# Title: Revisiting the Spambase example with GLM
spamD <- read.table('spamD.tsv',header=T,sep='\t')
spamTrain <- subset(spamD,spamD$rgroup>=10)
spamTest <- subset(spamD,spamD$rgroup<10)
spamVars <- setdiff(colnames(spamD),list('rgroup','spam'))
spamFormula <- as.formula(paste('spam=="spam"',
paste(spamVars,collapse=' + '),sep=' ~ '))
spamModel <- glm(spamFormula,family=binomial(link='logit'),
data=spamTrain)
spamTest$pred <- predict(spamModel,newdata=spamTest,
type='response')
print(with(spamTest,table(y=spam,glPred=pred>=0.5)))
## glPred
## y FALSE TRUE
## non-spam 264 14
## spam 22 158
# example 9.25 of section 9.4.3
# (example 9.25 of section 9.4.3) : Exploring advanced methods : Using SVMs to model complicated decision boundaries : Using SVMs on real data
# Title: Applying an SVM to the Spambase example
library('kernlab')
spamFormulaV <- as.formula(paste('spam',
paste(spamVars,collapse=' + '),sep=' ~ '))
# may want to switch to library('e1071') svm() as had some state holding problems in some examles
svmM <- ksvm(spamFormulaV,data=spamTrain, # Note: 1
kernel='rbfdot', # Note: 2
C=10, # Note: 3
prob.model=T,cross=5, # Note: 4
class.weights=c('spam'=1,'non-spam'=10) # Note: 5
)
spamTest$svmPred <- predict(svmM,newdata=spamTest,type='response')
print(with(spamTest,table(y=spam,svmPred=svmPred)))
## svmPred
## y non-spam spam
## non-spam 269 9
## spam 27 153
# Note 1:
# Build a support vector model for the Spambase
# problem.
# Note 2:
# Ask for the radial dot or Gaussian kernel (in
# fact the default kernel).
# Note 3:
# Set the “soft margin penalty” high; prefer not moving training examples over getting a wider
# margin. Prefer a complex model that applies weakly to all the data
# over a simpler model that applies strongly on a subset of the
# data.
# Note 4:
# Ask that, in addition to a predictive model, an estimate of a model estimating class
# probabilities also be built. Not all SVM libraries support this
# operation, and the probabilities are essentially built after the
# model (through a cross-validation procedure) and may not be as high-quality
# as the model itself.
# Note 5:
# Explicitly control the trade-off between
# false positive and false negative errors. In this case, we say non-spam
# classified as spam (a false positive) should be considered an expensive
# mistake.
# example 9.26 of section 9.4.3
# (example 9.26 of section 9.4.3) : Exploring advanced methods : Using SVMs to model complicated decision boundaries : Using SVMs on real data
# Title: Printing the SVM results summary
print(svmM)
## Support Vector Machine object of class "ksvm"
##
## SV type: C-svc (classification)
## parameter : cost C = 10
##
## Gaussian Radial Basis kernel function.
## Hyperparameter : sigma = 0.0299836801848002
##
## Number of Support Vectors : 1118
##
## Objective Function Value : -4642.236
## Training error : 0.028482
## Cross validation error : 0.076998
## Probability model included.
# example 9.27 of section 9.4.3
# (example 9.27 of section 9.4.3) : Exploring advanced methods : Using SVMs to model complicated decision boundaries : Using SVMs on real data
# Title: Shifting decision point to perform an apples-to-apples comparison
sameCut <- sort(spamTest$pred)[length(spamTest$pred)-162] # Note: 1
print(with(spamTest,table(y=spam,glPred=pred>sameCut))) # Note: 2
## glPred
## y FALSE TRUE
## non-spam 267 11
## spam 29 151
# Note 1:
# Find out what GLM score threshold has 162
# examples above it.
# Note 2:
# Ask the GLM model for its predictions that
# are above the threshold. We’re essentially asking the model for its 162 best
# candidate spam prediction results.
# informalexample 10.1 of section 10.2.1
# (informalexample 10.1 of section 10.2.1) : Documentation and deployment : Using knitr to produce milestone documentation : What is knitr?
library(knitr)
knit('simple.Rmd')
# informalexample 10.2 of section 10.2.1
# (informalexample 10.2 of section 10.2.1) : Documentation and deployment : Using knitr to produce milestone documentation : What is knitr?
echo "library(knitr); knit('add.Rnw')" | R --vanilla # Note: 1
pdflatex add.tex # Note: 2
# Note 1:
# Use R in batch mode to create add.tex from
# add.Rnw.
# Note 2:
# Use LaTeX to create add.pdf from
# add.tex.
# example 10.7 of section 10.3.1
# (example 10.7 of section 10.3.1) : Documentation and deployment : Using comments and version control for running documentation : Writing effective comments
# Title: Example code comment
# Return the pseudo logarithm of x, which is close to
# sign(x)*log10(abs(x)) for x such that abs(x) is large
# and doesn't "blow up" near zero. Useful
# for transforming wide-range variables that may be negative
# (like profit/loss).
# See: http://www.win-vector.com/blog
# /2012/03/modeling-trick-the-signed-pseudo-logarithm/
# NB: This transform has the undesirable property of making most
# signed distributions appear bimodal around the origin, no matter
# what the underlying distribution really looks like.
# The argument x is assumed be numeric and can be a vector.
pseudoLog10 <- function(x) { asinh(x/2)/log(10) }
# example 10.8 of section 10.3.1
# (example 10.8 of section 10.3.1) : Documentation and deployment : Using comments and version control for running documentation : Writing effective comments
# Title: Useless comment
#######################################
# Function: addone
# Author: John Mount
# Version: 1.3.11
# Location: RSource/helperFns/addone.R
# Date: 10/31/13
# Arguments: x
# Purpose: Adds one
#######################################
addone <- function(x) { x + 1 }
# example 10.9 of section 10.3.1
# (example 10.9 of section 10.3.1) : Documentation and deployment : Using comments and version control for running documentation : Writing effective comments
# Title: Worse than useless comment
# adds one
addtwo <- function(x) { x + 2 }
# example 10.16 of section 10.4.1
# (example 10.16 of section 10.4.1) : Documentation and deployment : Deploying models : Deploying models as R HTTP services
# Title: Buzz model as an R-based HTTP service
library(Rook) # Note: 1
load('thRS500.Rdata') # Note: 2
library(randomForest) # Note: 3
numericPositions <- sapply(buzztrain[,varslist],is.numeric) # Note: 4
modelFn <- function(env) { # Note: 5
errors <- c()
warnings <- c()
val <- c()
row <- c()
tryCatch(
{
arg <- Multipart$parse(env) # Note: 6
row <- as.list(arg[varslist])
names(row) <- varslist
row[numericPositions] <- as.numeric(row[numericPositions])
frame <- data.frame(row)
val <- predict(fmodel,newdata=frame)
},
warning = function(w) { message(w)
warnings <<- c(warnings,as.character(w)) },
error = function(e) { message(e)
errors <<- c(errors,as.character(e)) }
)
body <- paste( # Note: 7
'val=',val,'\n',
'nerrors=',length(errors),'\n',
'nwarnings=',length(warnings),'\n',
'query=',env$QUERY_STRING,'\n',
'errors=',paste(errors,collapse=' '),'\n',
'warnings=',paste(warnings,collapse=' '),'\n',
'data row','\n',
paste(capture.output(print(row)),collapse='\n'),'\n',
sep='')
list(
status=ifelse(length(errors)<=0,200L,400L),
headers=list('Content-Type' = 'text/text'),
body=body )
}
s <- Rhttpd$new() # Note: 8
s$add(name="modelFn",app=modelFn) # Note: 9
s$start() # Note: 10
print(s)
## Server started on 127.0.0.1:20714
## [1] modelFn http://127.0.0.1:20714/custom/modelFn # Note: 11
##
## Call browse() with an index number or name to run an application.
# Note 1:
# Load the rook HTTP server library.
# Note 2:
# Load the saved buzz workspace (includes the
# random forest model).
# Note 3:
# Load the random forest library (loading the
# workspace doesn’t load the library).
# Note 4:
# Determine which variables are numeric (in the
# rook server, everything defaults to
# character).
# Note 5:
# Declare the modeling service.
# Note 6:
# This block does the actual work: parse data
# and apply the model.
# Note 7:
# Format results, place in a list, and
# return.
# Note 8:
# Start a new rook HTTP service.
# Note 9:
# Register our model function as an HTTP
# service.
# Note 10:
# Start the HTTP server.
# Note 11:
# This is the URL where the service is
# running.
# example 10.17 of section 10.4.1
# (example 10.17 of section 10.4.1) : Documentation and deployment : Deploying models : Deploying models as R HTTP services
# Title: Calling the buzz HTTP service
rowAsForm <- function(url,row) { # Note: 1
s <- paste('<HTML><HEAD></HEAD><BODY><FORM action="',url,
'" enctype="multipart/form-data" method="POST">\n',sep='')
s <- paste(s,'<input type="submit" value="Send"/>',sep='\n')
qpaste <- function(a,b) {
paste('<p> ',a,' <input type="text" name="',a,
'" value="',b,'"/> </p>',sep='') }
assignments <- mapply('qpaste',varslist,as.list(row)[varslist])
s <- paste(s,paste(assignments,collapse='\n'),sep='\n')
s <- paste(s,'</FORM></BODY></HTML>',sep='\n')
s
}
url <- 'http://127.0.0.1:20714/custom/modelFn' # Note: 2
cat(rowAsForm(url,buzztest[7,]),file='buzztest7.html') # Note: 3
# Note 1:
# Function to convert a row of dataset into a
# huge HTML form that transmits all of the variable
# values to HTTP server on submit (when the Send
# button is clicked).
# Note 2:
# The URL we started the rook HTTP server on;
# you’ll have to copy the URL address and port from
# what’s printed when you started the Rook
# service.
# Note 3:
# Write the form representing the variables for
# the seventh test example to a file.
# example 10.18 of section 10.4.2
# (example 10.18 of section 10.4.2) : Documentation and deployment : Deploying models : Deploying models by export
# Title: Exporting the random forest model
load('thRS500.Rdata') # Note: 1
library(randomForest) # Note: 2
extractTrees <- function(rfModel) { # Note: 3
ei <- function(i) {
ti <- getTree(rfModel,k=i,labelVar=T)
ti$nodeid <- 1:dim(ti)[[1]]
ti$treeid <- i
ti
}
nTrees <- rfModel$ntree
do.call('rbind',sapply(1:nTrees,ei,simplify=F))
}
write.table(extractTrees(fmodel), # Note: 4
file='rfmodel.tsv',row.names=F,sep='\t',quote=F)
# Note 1:
# Load the saved buzz workspace (includes the
# random forest model).
# Note 2:
# Load the random forest library (loading the
# workspace doesn’t load the library).
# Note 3:
# Define a function that joins the tree tables
# from the random forest getTree() method into one
# large table of trees.
# Note 4:
# Write the table of trees as a tab-separated
# values table (easy for other software to
# read).
# informalexample A.1 of section A.1.5
# (informalexample A.1 of section A.1.5) : Working with R and other tools : Installing the tools : R resources
install.packages('ctv',repos='https://cran.r-project.org')
library('ctv')
# install.views('TimeSeries') # can take a LONG time
# example A.1 of section A.2
# (example A.1 of section A.2) : Working with R and other tools : Starting with R
# Title: Trying a few R commands
1
## [1] 1
1/2
## [1] 0.5
'Joe'
## [1] "Joe"
"Joe"
## [1] "Joe"
"Joe"=='Joe'
## [1] TRUE
c()
## NULL
is.null(c())
## [1] TRUE
is.null(5)
## [1] FALSE
c(1)
## [1] 1
c(1,2)
## [1] 1 2
c("Apple",'Orange')
## [1] "Apple" "Orange"
length(c(1,2))
## [1] 2
vec <- c(1,2)
vec
## [1] 1 2
# informalexample A.2 of section A.2.1
# (informalexample A.2 of section A.2.1) : Working with R and other tools : Starting with R : Primary features of R
x <- 2
x < - 3
## [1] FALSE
print(x)
## [1] 2
# example A.2 of section A.2.1
# (example A.2 of section A.2.1) : Working with R and other tools : Starting with R : Primary features of R
# Title: Binding values to function arguments
divide <- function(numerator,denominator) { numerator/denominator }
divide(1,2)
## [1] 0.5
divide(2,1)
## [1] 2
divide(denominator=2,numerator=1)
## [1] 0.5
divide(denominator<-2,numerator<-1) # yields 2, a wrong answer
## [1] 2
# example A.3 of section A.2.1
# (example A.3 of section A.2.1) : Working with R and other tools : Starting with R : Primary features of R
# Title: Demonstrating side effects
x<-1
good <- function() { x <- 5}
good()
print(x)
## [1] 1
bad <- function() { x <<- 5}
bad()
print(x)
## [1] 5
# example A.4 of section A.2.1
# (example A.4 of section A.2.1) : Working with R and other tools : Starting with R : Primary features of R
# Title: R truth tables for Boolean operators
c(T,T,F,F) == c(T,F,T,F)
## [1] TRUE FALSE FALSE TRUE
c(T,T,F,F) & c(T,F,T,F)
## [1] TRUE FALSE FALSE FALSE
c(T,T,F,F) | c(T,F,T,F)
## [1] TRUE TRUE TRUE FALSE
# informalexample A.3 of section A.2.1
# (informalexample A.3 of section A.2.1) : Working with R and other tools : Starting with R : Primary features of R
add <- function(a,b) { a + b}
add(1,2)
## [1] 3
# informalexample A.4 of section A.2.1
# (informalexample A.4 of section A.2.1) : Working with R and other tools : Starting with R : Primary features of R
add(1,'fred')
## Error in a + b : non-numeric argument to binary operator
# example A.5 of section A.2.1
# (example A.5 of section A.2.1) : Working with R and other tools : Starting with R : Primary features of R
# Title: Call-by-value effect
vec <- c(1,2)
fun <- function(v) { v[[2]]<-5; print(v)}
fun(vec)
## [1] 1 5
print(vec)
## [1] 1 2
# informalexample A.5 of section A.2.2
# (informalexample A.5 of section A.2.2) : Working with R and other tools : Starting with R : Primary R data types
vec <- c(2,3)
vec[[2]] <- 5
print(vec)
## [1] 2 5
# example A.6 of section A.2.2
# (example A.6 of section A.2.2) : Working with R and other tools : Starting with R : Primary R data types
# Title: Examples of R indexing operators
x <- list('a'=6,b='fred')
names(x)
## [1] "a" "b"
x$a
## [1] 6
x$b
## [1] "fred"
x[['a']]
## $a
## [1] 6
x[c('a','a','b','b')]
## $a
## [1] 6
##
## $a
## [1] 6
##
## $b
## [1] "fred"
##
## $b
## [1] "fred"
# example A.7 of section A.2.2
# (example A.7 of section A.2.2) : Working with R and other tools : Starting with R : Primary R data types
# Title: R’s treatment of unexpected factor levels
factor('red',levels=c('red','orange'))
## [1] red
## Levels: red orange
factor('apple',levels=c('red','orange'))
## [1] <NA>
## Levels: red orange
# example A.8 of section A.2.2
# (example A.8 of section A.2.2) : Working with R and other tools : Starting with R : Primary R data types
# Title: Confirm lm() encodes new strings correctly.
d <- data.frame(x=factor(c('a','b','c')),
y=c(1,2,3))
m <- lm(y~0+x,data=d) # Note: 1
print(predict(m,
newdata=data.frame(x='b'))[[1]]) # Note: 2
# [1] 2
print(predict(m,
newdata=data.frame(x=factor('b',levels=c('b'))))[[1]]) # Note: 3
# [1] 2
# Note 1:
# Build a data frame and linear model mapping
# a,b,c to 1,2,3.
# Note 2:
# Show that model gets correct prediction for
# b as a string.
# Note 3:
# Show that model gets correct prediction for
# b as a factor, encoded with a different number of
# levels. This shows that lm() is correctly treating
# factors as strings.
# example A.9 of section A.2.3
# (example A.9 of section A.2.3) : Working with R and other tools : Starting with R : Loading data from HTTPS sources
# Title: Loading UCI car data directly from GitHub using HTTPS
require(RCurl) # Note: 1
urlBase <-
'https://raw.githubusercontent.com/WinVector/zmPDSwR/master/' # Note: 2
mkCon <- function(nm) { # Note: 3
textConnection(getURL(paste(urlBase,nm,sep='/')))
}
cars <- read.table(mkCon('car.data.csv'), # Note: 4
sep=',',header=T,comment.char='')
# Note 1:
# Bring in the RCurl library for more connection
# methods.
# Note 2:
# Form a valid HTTPS base URL for raw access to
# the GitHub repository.
# Note 3:
# Define a function that wraps a URL path
# fragment into a usable HTTPS connection.
# Note 4:
# Load the car data from GitHub over
# HTTPS.
# example A.10 of section A.3.2
# (example A.10 of section A.3.2) : Working with R and other tools : Using databases with R : Starting with SQuirreL SQL
# Title: Reading database data into R
install.packages('RJDBC',repos='https://cran.r-project.org') # Note: 1
library('RJDBC') # Note: 2
drv <- JDBC("org.h2.Driver","h2-1.3.170.jar",identifier.quote="'") # Note: 3
conn <- dbConnect(drv,"jdbc:h2:h2demodb_h2","u","u") # Note: 4
d <- dbGetQuery(conn,"SELECT * FROM example_table") # Note: 5
print(d) # Note: 6
## STATUSID NAME
## 1 1 Joe
## 2 2 Fred # Note: 7
# Note 1:
# Install the RJDBC package from the CRAN
# package repository.
# Note 2:
# Load the RJDBC library.
# Note 3:
# Use the RJDBC library to build a database
# driver.
# Note 4:
# Use the database driver to build a database
# connection. In our SQuirreL SQL example, we used
# the path /Users/johnmount/Downloads/h2demodb_h2.
# So the path fragment given here (h2demodb_h2)
# works only if R is working in the directory
# /Users/johnmount/Downloads. You would alter all of
# these paths and URLs to work for your own
# directories.
# Note 5:
# Run a SQL select query using the database
# connection to populate a data frame.
# Note 6:
# Print the result data frame.
# Note 7:
# The database table as an R data frame.
# example A.11 of section A.3.4
# (example A.11 of section A.3.4) : Working with R and other tools : Using databases with R : An example SQL data transformation task
# Title: Loading an Excel spreadsheet
library(gdata)
bookings <- read.xls('Workbook1.xlsx',sheet=1,pattern='date',
stringsAsFactors=F,as.is=T)
prices <- read.xls('Workbook1.xlsx',sheet=2,pattern='date',
stringsAsFactors=F,as.is=T)
# example A.12 of section A.3.4
# (example A.12 of section A.3.4) : Working with R and other tools : Using databases with R : An example SQL data transformation task
# Title: The hotel reservation and price data
print(bookings)
## date day.of.stay X1.before X2.before X3.before
## 1 2013-07-01 105 98 95 96
## 2 2013-07-02 103 100 98 95
## 3 2013-07-03 105 95 90 80
## 4 2013-07-04 105 105 107 98
print(prices)
## date day.of.stay X1.before X2.before X3.before
## 1 2013-07-01 $250.00 $200.00 $280.00 $300.00
## 2 2013-07-02 $200.00 $250.00 $290.00 $250.00
## 3 2013-07-03 $200.00 $200.00 $250.00 $275.00
## 4 2013-07-04 $250.00 $300.00 $300.00 $200.00
# example A.13 of section A.3.4
# (example A.13 of section A.3.4) : Working with R and other tools : Using databases with R : An example SQL data transformation task
# Title: Using melt to restructure data
library('reshape2')
bthin <- melt(bookings,id.vars=c('date'),
variable.name='daysBefore',value.name='bookings') # Note: 1
pthin <- melt(prices,id.vars=c('date'),
variable.name='daysBefore',value.name='price') # Note: 2
daysCodes <- c('day.of.stay', 'X1.before', 'X2.before', 'X3.before')
bthin$nDaysBefore <- match(bthin$daysBefore,daysCodes)-1 # Note: 3
pthin$nDaysBefore <- match(pthin$daysBefore,daysCodes)-1 # Note: 4
pthin$price <- as.numeric(gsub('\\$','',pthin$price)) # Note: 5
print(head(pthin))
## date daysBefore price nDaysBefore
## 1 2013-07-01 day.of.stay 250 0
## 2 2013-07-02 day.of.stay 200 0
## 3 2013-07-03 day.of.stay 200 0
## 4 2013-07-04 day.of.stay 250 0
## 5 2013-07-01 X1.before 200 1
## 6 2013-07-02 X1.before 250 1
# Note 1:
# Use melt to change columns that are not date
# (day.of.stay, Xn.before) to values stored in a new
# column called daysBefore. Each booking count
# becomes a new row (instead of having many
# different bookings in the same row).
# Note 2:
# Each price entry becomes a new row (instead
# of having many different prices in the same
# row).
# Note 3:
# Use match and dayCodes list to convert key
# strings to numeric nDaysBefore in our bookings
# data.
# Note 4:
# Use match and dayCodes list to convert key
# strings to numeric nDaysBefore in our price
# data.
# Note 5:
# Remove dollar sign and convert prices to
# numeric type.
# example A.14 of section A.3.4
# (example A.14 of section A.3.4) : Working with R and other tools : Using databases with R : An example SQL data transformation task
# Title: Assembling many rows using SQL
options(gsubfn.engine = "R") # Note: 1
library('sqldf')
joined <- sqldf(' # Note: 2
select # Note: 3
bCurrent.date as StayDate, # Note: 4
bCurrent.daysBefore as daysBefore,
bCurrent.nDaysBefore as nDaysBefore,
p.price as price,
bCurrent.bookings as bookingsCurrent,
bPrevious.bookings as bookingsPrevious,
bCurrent.bookings - bPrevious.bookings as pickup
from
bthin bCurrent # Note: 5
join
bthin bPrevious # Note: 6
on
bCurrent.date=bPrevious.date
and bCurrent.nDaysBefore+1=bPrevious.nDaysBefore # Note: 7
join
pthin p # Note: 8
on
bCurrent.date=p.date
and bCurrent.nDaysBefore=p.nDaysBefore # Note: 9
')
print(joined)
# Note 1:
# Prevent library(sqldf) from triggering a
# tcl/tk dependency which causes R to exit on OS X
# if X11 isn’t installed. See
# https://code.google.com/p/sqldf/ for
# troubleshooting details.
# Note 2:
# Create a new data frame of rows built out of
# triples of rows from pthin and bthin.
# Note 3:
# SQL statements typically start with the word
# “select.”
# Note 4:
# List of derived columns (and their new
# names) for our new data frame.
# Note 5:
# First data frame we’re pulling data from:
# bthin.
# Note 6:
# Second pull from bthin.
# Note 7:
# Conditions to match b1 rows to b2
# rows.
# Note 8:
# Third data frame we are pulling data from:
# pthin.
# Note 9:
# Conditions to match p to b2 (and implicitly
# b1).
# example A.15 of section A.3.4
# (example A.15 of section A.3.4) : Working with R and other tools : Using databases with R : An example SQL data transformation task
# Title: Showing our hotel model results
library('ggplot2')
ggplot(data=joined,aes(x=price,y=pickup)) +
geom_point() + geom_jitter() + geom_smooth(method='lm')
print(summary(lm(pickup~price,data=joined)))
#
#Call:
#lm(formula = pickup ~ price, data = joined)
#
#Residuals:
# Min 1Q Median 3Q Max
#-4.614 -2.812 -1.213 3.387 6.386
#
#Coefficients:
# Estimate Std. Error t value Pr(>|t|)
#(Intercept) 11.00765 7.98736 1.378 0.198
#price -0.02798 0.03190 -0.877 0.401
#
#Residual standard error: 4.21 on 10 degrees of freedom
#Multiple R-squared: 0.07144, Adjusted R-squared: -0.02142
#F-statistic: 0.7693 on 1 and 10 DF, p-value: 0.401
# example B.1 of section B.1.1
# (example B.1 of section B.1.1) : Important statistical concepts : Distributions : Normal distribution
# Title: Plotting the theoretical normal density
library(ggplot2)
x <- seq(from=-5, to=5, length.out=100) # the interval [-5 5]
f <- dnorm(x) # normal with mean 0 and sd 1
ggplot(data.frame(x=x,y=f), aes(x=x,y=y)) + geom_line()
# example B.2 of section B.1.1
# (example B.2 of section B.1.1) : Important statistical concepts : Distributions : Normal distribution
# Title: Plotting an empirical normal density
library(ggplot2)
# draw 1000 points from a normal with mean 0, sd 1
u <- rnorm(1000)
# plot the distribution of points,
# compared to normal curve as computed by dnorm() (dashed line)
ggplot(data.frame(x=u), aes(x=x)) + geom_density() +
geom_line(data=data.frame(x=x,y=f), aes(x=x,y=y), linetype=2)
# example B.3 of section B.1.1
# (example B.3 of section B.1.1) : Important statistical concepts : Distributions : Normal distribution
# Title: Working with the normal cdf
# --- estimate probabilities (areas) under the curve ---
# 50% of the observations will be less than the mean
pnorm(0)
# [1] 0.5
# about 2.3% of all observations are more than 2 standard
# deviations below the mean
pnorm(-2)
# [1] 0.02275013
# about 95.4% of all observations are within 2 standard deviations
# from the mean
pnorm(2) - pnorm(-2)
# [1] 0.9544997
# example B.4 of section B.1.1
# (example B.4 of section B.1.1) : Important statistical concepts : Distributions : Normal distribution
# Title: Plotting x < qnorm(0.75)
# --- return the quantiles corresponding to specific probabilities ---
# the median (50th percentile) of a normal is also the mean
qnorm(0.5)
# [1] 0
# calculate the 75th percentile
qnorm(0.75)
# [1] 0.6744898
pnorm(0.6744898)
# [1] 0.75
# --- Illustrate the 75th percentile ---
# create a graph of the normal distribution with mean 0, sd 1
x <- seq(from=-5, to=5, length.out=100)
f <- dnorm(x)
nframe <- data.frame(x=x,y=f)
# calculate the 75th percentile
line <- qnorm(0.75)
xstr <- sprintf("qnorm(0.75) = %1.3f", line)
# the part of the normal distribution to the left
# of the 75th percentile
nframe75 <- subset(nframe, nframe$x < line)
# Plot it.
# The shaded area is 75% of the area under the normal curve
ggplot(nframe, aes(x=x,y=y)) + geom_line() +
geom_area(data=nframe75, aes(x=x,y=y), fill="gray") +
geom_vline(aes(xintercept=line), linetype=2) +
geom_text(x=line, y=0, label=xstr, vjust=1)
# example B.5 of section B.1.3
# (example B.5 of section B.1.3) : Important statistical concepts : Distributions : Lognormal distribution
# Title: Demonstrating some properties of the lognormal distribution
# draw 1001 samples from a lognormal with meanlog 0, sdlog 1
u <- rlnorm(1001)
# the mean of u is higher than the median
mean(u)
# [1] 1.638628
median(u)
# [1] 1.001051
# the mean of log(u) is approx meanlog=0
mean(log(u))
# [1] -0.002942916
# the sd of log(u) is approx sdlog=1
sd(log(u))
# [1] 0.9820357
# generate the lognormal with meanlog=0, sdlog=1
x <- seq(from=0, to=25, length.out=500)
f <- dlnorm(x)
# generate a normal with mean=0, sd=1
x2 <- seq(from=-5,to=5, length.out=500)
f2 <- dnorm(x2)
# make data frames
lnormframe <- data.frame(x=x,y=f)
normframe <- data.frame(x=x2, y=f2)
dframe <- data.frame(u=u)
# plot densityplots with theoretical curves superimposed
p1 <- ggplot(dframe, aes(x=u)) + geom_density() +
geom_line(data=lnormframe, aes(x=x,y=y), linetype=2)
p2 <- ggplot(dframe, aes(x=log(u))) + geom_density() +
geom_line(data=normframe, aes(x=x,y=y), linetype=2)
# functions to plot multiple plots on one page
library(grid)
nplot <- function(plist) {
n <- length(plist)
grid.newpage()
pushViewport(viewport(layout=grid.layout(n,1)))
vplayout<-function(x,y) {viewport(layout.pos.row=x, layout.pos.col=y)}
for(i in 1:n) {
print(plist[[i]], vp=vplayout(i,1))
}
}
# this is the plot that leads this section.
nplot(list(p1, p2))
# example B.6 of section B.1.3
# (example B.6 of section B.1.3) : Important statistical concepts : Distributions : Lognormal distribution
# Title: Plotting the lognormal distribution
# the 50th percentile (or median) of the lognormal with
# meanlog=0 and sdlog=10
qlnorm(0.5)
# [1] 1
# the probability of seeing a value x less than 1
plnorm(1)
# [1] 0.5
# the probability of observing a value x less than 10:
plnorm(10)
# [1] 0.9893489
# -- show the 75th percentile of the lognormal
# use lnormframe from previous example: the
# theoretical lognormal curve
line <- qlnorm(0.75)
xstr <- sprintf("qlnorm(0.75) = %1.3f", line)
lnormframe75 <- subset(lnormframe, lnormframe$x < line)
# Plot it
# The shaded area is 75% of the area under the lognormal curve
ggplot(lnormframe, aes(x=x,y=y)) + geom_line() +
geom_area(data=lnormframe75, aes(x=x,y=y), fill="gray") +
geom_vline(aes(xintercept=line), linetype=2) +
geom_text(x=line, y=0, label=xstr, hjust= 0, vjust=1)
# example B.7 of section B.1.4
# (example B.7 of section B.1.4) : Important statistical concepts : Distributions : Binomial distribution
# Title: Plotting the binomial distribution
library(ggplot2)
#
# use dbinom to produce the theoretical curves
#
numflips <- 50
# x is the number of heads that we see
x <- 0:numflips
# probability of heads for several different coins
p <- c(0.05, 0.15, 0.5, 0.75)
plabels <- paste("p =", p)
# calculate the probability of seeing x heads in numflips flips
# for all the coins. This probably isn't the most elegant
# way to do this, but at least it's easy to read
flips <- NULL
for(i in 1:length(p)) {
coin <- p[i]
label <- plabels[i]
tmp <- data.frame(number.of.heads=x,
probability = dbinom(x, numflips, coin),
coin.type = label)
flips <- rbind(flips, tmp)
}
# plot it
# this is the plot that leads this section
ggplot(flips, aes(x=number.of.heads, y=probability)) +
geom_point(aes(color=coin.type, shape=coin.type)) +
geom_line(aes(color=coin.type))
# example B.8 of section B.1.4
# (example B.8 of section B.1.4) : Important statistical concepts : Distributions : Binomial distribution
# Title: Working with the theoretical binomial distribution
p = 0.5 # the percentage of females in this student population
class.size <- 20 # size of a classroom
numclasses <- 100 # how many classrooms we observe
# what might a typical outcome look like?
numFemales <- rbinom(numclasses, class.size, p) # Note: 1
# the theoretical counts (not necessarily integral)
probs <- dbinom(0:class.size, class.size, p)
tcount <- numclasses*probs
# the obvious way to plot this is with histogram or geom_bar
# but this might just look better
zero <- function(x) {0} # a dummy function that returns only 0
ggplot(data.frame(number.of.girls=numFemales, dummy=1),
aes(x=number.of.girls, y=dummy)) +
# count the number of times you see x heads
stat_summary(fun.y="sum", geom="point", size=2) + # Note: 2
stat_summary(fun.ymax="sum", fun.ymin="zero", geom="linerange") +
# superimpose the theoretical number of times you see x heads
geom_line(data=data.frame(x=0:class.size, y=probs),
aes(x=x, y=tcount), linetype=2) +
scale_x_continuous(breaks=0:class.size, labels=0:class.size) +
scale_y_continuous("number of classrooms")
# Note 1:
# Because we didn’t call set.seed, we
# expect different results each time we run this line.
# Note 2:
# stat_summary is one of the ways to
# control data aggregation during plotting. In this case, we’re using it to
# place the dot and bar measured from the empirical data in with the
# theoretical density curve.
# example B.9 of section B.1.4
# (example B.9 of section B.1.4) : Important statistical concepts : Distributions : Binomial distribution
# Title: Simulating a binomial distribution
# use rbinom to simulate flipping a coin of probability p N times
p75 <- 0.75 # a very unfair coin (mostly heads)
N <- 1000 # flip it several times
flips_v1 <- rbinom(N, 1, p75)
# Another way to generate unfair flips is to use runif:
# the probability that a uniform random number from [0 1)
# is less than p is exactly p. So "less than p" is "heads".
flips_v2 <- as.numeric(runif(N) < p75)
prettyprint_flips <- function(flips) {
outcome <- ifelse(flips==1, "heads", "tails")
table(outcome)
}
prettyprint_flips(flips_v1)
# outcome
# heads tails
# 756 244
prettyprint_flips(flips_v2)
# outcome
# heads tails
# 743 257
# example B.10 of section B.1.4
# (example B.10 of section B.1.4) : Important statistical concepts : Distributions : Binomial distribution
# Title: Working with the binomial distribution
# pbinom example
nflips <- 100
nheads <- c(25, 45, 50, 60) # number of heads
# what are the probabilities of observing at most that
# number of heads on a fair coin?
left.tail <- pbinom(nheads, nflips, 0.5)
sprintf("%2.2f", left.tail)
# [1] "0.00" "0.18" "0.54" "0.98"
# the probabilities of observing more than that
# number of heads on a fair coin?
right.tail <- pbinom(nheads, nflips, 0.5, lower.tail=F)
sprintf("%2.2f", right.tail)
# [1] "1.00" "0.82" "0.46" "0.02"
# as expected:
left.tail+right.tail
# [1] 1 1 1 1
# so if you flip a fair coin 100 times,
# you are guaranteed to see more than 10 heads,
# almost guaranteed to see fewer than 60, and
# probably more than 45.
# qbinom example
nflips <- 100
# what's the 95% "central" interval of heads that you
# would expect to observe on 100 flips of a fair coin?
left.edge <- qbinom(0.025, nflips, 0.5)
right.edge <- qbinom(0.025, nflips, 0.5, lower.tail=F)
c(left.edge, right.edge)
# [1] 40 60
# so with 95% probability you should see between 40 and 60 heads
# example B.11 of section B.1.4
# (example B.11 of section B.1.4) : Important statistical concepts : Distributions : Binomial distribution
# Title: Working with the binomial cdf
# because this is a discrete probability distribution,
# pbinom and qbinom are not exact inverses of each other
# this direction works
pbinom(45, nflips, 0.5)
# [1] 0.1841008
qbinom(0.1841008, nflips, 0.5)
# [1] 45
# this direction won't be exact
qbinom(0.75, nflips, 0.5)
# [1] 53
pbinom(53, nflips, 0.5)
# [1] 0.7579408
# example B.12 of section B.2.2
# (example B.12 of section B.2.2) : Important statistical concepts : Statistical theory : A/B tests
# Title: Building simulated A/B test data
set.seed(123515)
d <- rbind( # Note: 1
data.frame(group='A',converted=rbinom(100000,size=1,p=0.05)), # Note: 2
data.frame(group='B',converted=rbinom(10000,size=1,p=0.055)) # Note: 3
)
# Note 1:
# Build a data frame to store simulated
# examples.
# Note 2:
# Add 100,000 examples from the A group
# simulating a conversion rate of 5%.
# Note 3:
# Add 10,000 examples from the B group
# simulating a conversion rate of 5.5%.
# example B.13 of section B.2.2
# (example B.13 of section B.2.2) : Important statistical concepts : Statistical theory : A/B tests
# Title: Summarizing the A/B test into a contingency table
tab <- table(d)
print(tab)
## converted
## group 0 1
## A 94979 5021
## B 9398 602
# example B.14 of section B.2.2
# (example B.14 of section B.2.2) : Important statistical concepts : Statistical theory : A/B tests
# Title: Calculating the observed A and B rates
aConversionRate <- tab['A','1']/sum(tab['A',])
print(aConversionRate)
## [1] 0.05021
bConversionRate <- tab['B','1']/sum(tab['B',])
print(bConversionRate)
## [1] 0.0602
commonRate <- sum(tab[,'1'])/sum(tab)
print(commonRate)
## [1] 0.05111818
# example B.15 of section B.2.2
# (example B.15 of section B.2.2) : Important statistical concepts : Statistical theory : A/B tests
# Title: Calculating the significance of the observed difference in rates
fisher.test(tab)
## Fisher's Exact Test for Count Data
##
## data: tab
## p-value = 2.469e-05
## alternative hypothesis: true odds ratio is not equal to 1
## 95 percent confidence interval:
## 1.108716 1.322464
## sample estimates:
## odds ratio
## 1.211706
# example B.16 of section B.2.2
# (example B.16 of section B.2.2) : Important statistical concepts : Statistical theory : A/B tests
# Title: Computing frequentist significance
print(pbinom( # Note: 1
lower.tail=F, # Note: 2
q=tab['B','1']-1, # Note: 3
size=sum(tab['B',]), # Note: 4
prob=commonRate # Note: 5
))
## [1] 3.153319e-05
# Note 1:
# Use the pbinom() call to calculate how
# likely different observed counts are.
# Note 2:
# Signal we want the probability of being
# greater than a given q.
# Note 3:
# Ask for the probability of seeing at least as many conversions as our observed B groups
# did.
# Note 4:
# Specify the total number of trials as
# equal to what we saw in our B group.
# Note 5:
# Specify the conversion probability at the
# estimated common rate.
# example B.17 of section B.2.2
# (example B.17 of section B.2.2) : Important statistical concepts : Statistical theory : A/B tests
# Title: Bayesian estimate of the posterior tail mass
print(pbeta( # Note: 1
aConversionRate, # Note: 2
shape1=commonRate+tab['B','1'], # Note: 3
shape2=(1-commonRate)+tab['B','0'])) # Note: 4
## [1] 4.731817e-06
# Note 1:
# pbeta() functionUse pbeta() to estimate how likely
# different observed conversion rates are.
# Note 2:
# Ask for the probability of seeing a
# conversion rate no larger than aConversionRate.
# Note 3:
# Estimate conversion count as prior
# commonRate plus the B observations.
# Note 4:
# Estimate nonconversion count as prior
# 1-commonRate plus the B observations.
# example B.18 of section B.2.2
# (example B.18 of section B.2.2) : Important statistical concepts : Statistical theory : A/B tests
# Title: Plotting the posterior distribution of the B group
library('ggplot2')
plt <- data.frame(x=seq(from=0.04,to=0.07,length.out=301))
plt$density <- dbeta(plt$x,
shape1=commonRate+tab['B','1'],
shape2=(1-commonRate)+tab['B','0'])
ggplot(dat=plt) +
geom_line(aes(x=x,y=density)) +
geom_vline(aes(xintercept=bConversionRate)) +
geom_vline(aes(xintercept=aConversionRate),linetype=2)
# example B.19 of section B.2.3
# (example B.19 of section B.2.3) : Important statistical concepts : Statistical theory : Power of tests
# Title: Sample size estimate
estimate <- function(targetRate,difference,errorProb) {
ceiling(-log(errorProb)*targetRate/(difference^2))
}
est <- estimate(0.045,0.004,0.05)
print(est)
## [1] 8426
# example B.20 of section B.2.3
# (example B.20 of section B.2.3) : Important statistical concepts : Statistical theory : Power of tests
# Title: Exact binomial sample size calculation
errorProb <- function(targetRate,difference,size) { # Note: 1
pbinom(ceiling((targetRate-difference)*size),
size=size,prob=targetRate)
}
print(errorProb(0.045,0.004,est)) # Note: 2
## [1] 0.04153646
binSearchNonPositive <- function(fEventuallyNegative) { # Note: 3
low <- 1
high <- low+1
while(fEventuallyNegative(high)>0) {
high <- 2*high
}
while(high>low+1) {
m <- low + (high-low) %/% 2
if(fEventuallyNegative(m)>0) {
low <- m
} else {
high <- m
}
}
high
}
actualSize <- function(targetRate,difference,errorProb) {
binSearchNonPositive(function(n) {
errorProb(targetRate,difference,n) - errorProb })
}
size <- actualSize(0.045,0.004,0.05) # Note: 4
print(size)
## [1] 7623
print(errorProb(0.045,0.004,size))
## [1] 0.04983659
# Note 1:
# Define a function that calculates the
# probability of seeing a low number of conversions, assuming the actual
# conversion rate is targetRate and the size of the experiment is size. Low is
# considered be a count that’s at least difference*size below the expected value
# targetRate*size.
# Note 2:
# Calculate probability of a bad experiment using
# estimated experiment size. The failure odds are around 4% (under the 5% we’re
# designing for), which means the estimate size was slightly high.
# Note 3:
# Define a binary search that finds a non-positive
# value of a function that’s guaranteed to be eventually negative. This search
# works around the minor non-monotonicity in errorProb() (due to rounding
# issues).
# Note 4:
# Calculate the required sample size for our B
# experiment.
# example B.21 of section B.2.4
# (example B.21 of section B.2.4) : Important statistical concepts : Statistical theory : Specialized statistical tests
# Title: Building synthetic uncorrelated income example
set.seed(235236) # Note: 1
d <- data.frame(EarnedIncome=100000*rlnorm(100),
CapitalGains=100000*rlnorm(100)) # Note: 2
print(with(d,cor(EarnedIncome,CapitalGains)))
# [1] -0.01066116 # Note: 3
# Note 1:
# Set the pseudo-random seed to a known
# value so the demonstration is repeatable.
# Note 2:
# Generate our synthetic data.
# Note 3:
# The correlation is -0.01, which is very near 0—indicating (as designed) no relation.
# example B.22 of section B.2.4
# (example B.22 of section B.2.4) : Important statistical concepts : Statistical theory : Specialized statistical tests
# Title: Calculating the (non)significance of the observed correlation
with(d,cor(EarnedIncome,CapitalGains,method='spearman'))
# [1] 0.03083108
with(d,cor.test(EarnedIncome,CapitalGains,method='spearman'))
#
# Spearman's rank correlation rho
#
#data: EarnedIncome and CapitalGains
#S = 161512, p-value = 0.7604
#alternative hypothesis: true rho is not equal to 0
#sample estimates:
# rho
#0.03083108
# example B.23 of section B.3.1
# (example B.23 of section B.3.1) : Important statistical concepts : Examples of the statistical view of data : Sampling bias
# Title: Misleading significance result from biased observations
veryHighIncome <- subset(d, EarnedIncome+CapitalGains>=500000)
print(with(veryHighIncome,cor.test(EarnedIncome,CapitalGains,
method='spearman')))
#
# Spearman's rank correlation rho
#
#data: EarnedIncome and CapitalGains
#S = 1046, p-value < 2.2e-16
#alternative hypothesis: true rho is not equal to 0
#sample estimates:
# rho
#-0.8678571
# example B.24 of section B.3.1
# (example B.24 of section B.3.1) : Important statistical concepts : Examples of the statistical view of data : Sampling bias
# Title: Plotting biased view of income and capital gains
library(ggplot2)
ggplot(data=d,aes(x=EarnedIncome,y=CapitalGains)) +
geom_point() + geom_smooth(method='lm') +
coord_cartesian(xlim=c(0,max(d)),ylim=c(0,max(d))) # Note: 1
ggplot(data=veryHighIncome,aes(x=EarnedIncome,y=CapitalGains)) +
geom_point() + geom_smooth(method='lm') +
geom_point(data=subset(d,EarnedIncome+CapitalGains<500000),
aes(x=EarnedIncome,y=CapitalGains),
shape=4,alpha=0.5,color='red') +
geom_segment(x=0,xend=500000,y=500000,yend=0,
linetype=2,alpha=0.5,color='red') +
coord_cartesian(xlim=c(0,max(d)),ylim=c(0,max(d))) # Note: 2
print(with(subset(d,EarnedIncome+CapitalGains<500000),
cor.test(EarnedIncome,CapitalGains,method='spearman'))) # Note: 3
#
# Spearman's rank correlation rho
#
#data: EarnedIncome and CapitalGains
#S = 107664, p-value = 0.6357
#alternative hypothesis: true rho is not equal to 0
#sample estimates:
# rho
#-0.05202267
# Note 1:
# Plot all of the income data with linear
# trend line (and uncertainty band).
# Note 2:
# Plot the very high income data and linear
# trend line (also include cut-off and portrayal of suppressed data).
# Note 3:
# Compute correlation of suppressed
# data.
# example B.25 of section B.3.2
# (example B.25 of section B.3.2) : Important statistical concepts : Examples of the statistical view of data : Omitted variable bias
# Title: Summarizing our synthetic biological data
load('synth.RData')
print(summary(s))
## week Caco2A2BPapp FractionHumanAbsorption
## Min. : 1.00 Min. :6.994e-08 Min. :0.09347
## 1st Qu.: 25.75 1st Qu.:7.312e-07 1st Qu.:0.50343
## Median : 50.50 Median :1.378e-05 Median :0.86937
## Mean : 50.50 Mean :2.006e-05 Mean :0.71492
## 3rd Qu.: 75.25 3rd Qu.:4.238e-05 3rd Qu.:0.93908
## Max. :100.00 Max. :6.062e-05 Max. :0.99170
head(s)
## week Caco2A2BPapp FractionHumanAbsorption
## 1 1 6.061924e-05 0.11568186
## 2 2 6.061924e-05 0.11732401
## 3 3 6.061924e-05 0.09347046
## 4 4 6.061924e-05 0.12893540
## 5 5 5.461941e-05 0.19021858
## 6 6 5.370623e-05 0.14892154
# View(s) # Note: 1
# Note 1:
# Display a date in spreadsheet like
# window. View is one of the commands that has a much better implementation in
# RStudio than in basic R.
# example B.26 of section B.3.2
# (example B.26 of section B.3.2) : Important statistical concepts : Examples of the statistical view of data : Omitted variable bias
# Title: Building data that improves over time
set.seed(2535251)
s <- data.frame(week=1:100)
s$Caco2A2BPapp <- sort(sample(d$Caco2A2BPapp,100,replace=T),
decreasing=T)
sigmoid <- function(x) {1/(1+exp(-x))}
s$FractionHumanAbsorption <- # Note: 1
sigmoid(
7.5 + 0.5*log(s$Caco2A2BPapp) + # Note: 2
s$week/10 - mean(s$week/10) + # Note: 3
rnorm(100)/3 # Note: 4
)
write.table(s,'synth.csv',sep=',',
quote=F,row.names=F)
# Note 1:
# Build synthetic examples.
# Note 2:
# Add in Caco2 to absorption relation learned from original dataset. Note the relation is
# positive: better Caco2 always drives better absorption in our
# synthetic dataset. We’re log transforming Caco2, as it has over 3
# decades of range.
# Note 3:
# Add in a mean-0 term that depends on time to simulate the effects of improvements as the
# project moves forward.
# Note 4:
# Add in a mean-0 noise term.
# example B.27 of section B.3.2
# (example B.27 of section B.3.2) : Important statistical concepts : Examples of the statistical view of data : Omitted variable bias
# Title: A bad model (due to omitted variable bias)
print(summary(glm(data=s,
FractionHumanAbsorption~log(Caco2A2BPapp),
family=binomial(link='logit'))))
## Warning: non-integer #successes in a binomial glm!
##
## Call:
## glm(formula = FractionHumanAbsorption ~ log(Caco2A2BPapp),
## family = binomial(link = "logit"),
## data = s)
##
## Deviance Residuals:
## Min 1Q Median 3Q Max
## -0.609 -0.246 -0.118 0.202 0.557
##
## Coefficients:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) -10.003 2.752 -3.64 0.00028 ***
## log(Caco2A2BPapp) -0.969 0.257 -3.77 0.00016 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## (Dispersion parameter for binomial family taken to be 1)
##
## Null deviance: 43.7821 on 99 degrees of freedom
## Residual deviance: 9.4621 on 98 degrees of freedom
## AIC: 64.7
##
## Number of Fisher Scoring iterations: 6
# example B.28 of section B.3.2
# (example B.28 of section B.3.2) : Important statistical concepts : Examples of the statistical view of data : Omitted variable bias
# Title: A better model
print(summary(glm(data=s,
FractionHumanAbsorption~week+log(Caco2A2BPapp),
family=binomial(link='logit'))))
## Warning: non-integer #successes in a binomial glm!
##
## Call:
## glm(formula = FractionHumanAbsorption ~ week + log(Caco2A2BPapp),
## family = binomial(link = "logit"), data = s)
##
## Deviance Residuals:
## Min 1Q Median 3Q Max
## -0.3474 -0.0568 -0.0010 0.0709 0.3038
##
## Coefficients:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 3.1413 4.6837 0.67 0.5024
## week 0.1033 0.0386 2.68 0.0074 **
## log(Caco2A2BPapp) 0.5689 0.5419 1.05 0.2938
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## (Dispersion parameter for binomial family taken to be 1)
##
## Null deviance: 43.7821 on 99 degrees of freedom
## Residual deviance: 1.2595 on 97 degrees of freedom
## AIC: 47.82
##
## Number of Fisher Scoring iterations: 6
| /all.r | no_license | msisson/PDSwR | R | false | false | 207,383 | r | # example 1.1 of section 1.2.3
# (example 1.1 of section 1.2.3) : The data science process : Stages of a data science project : Modeling
# Title: Building a decision tree
library('rpart')
load('GCDData.RData')
model <- rpart(Good.Loan ~
Duration.in.month +
Installment.rate.in.percentage.of.disposable.income +
Credit.amount +
Other.installment.plans,
data=d,
control=rpart.control(maxdepth=4),
method="class")
# example 1.2 of section 1.2.4
# (example 1.2 of section 1.2.4) : The data science process : Stages of a data science project : Model evaluation and critique
# Title: Plotting the confusion matrix
creditdata <- d
resultframe <- data.frame(Good.Loan=creditdata$Good.Loan,
pred=predict(model, type="class"))
rtab <- table(resultframe) # Note: 1
rtab
## pred
## Good.Loan BadLoan GoodLoan
## BadLoan 41 259
## GoodLoan 13 687
sum(diag(rtab))/sum(rtab) # Note: 2
## [1] 0.728
sum(rtab[1,1])/sum(rtab[,1]) # Note: 3
## [1] 0.7592593
sum(rtab[1,1])/sum(rtab[1,]) # Note: 4
## [1] 0.1366667
sum(rtab[2,1])/sum(rtab[2,]) # Note: 5
## [1] 0.01857143
# Note 1:
# Create the confusion matrix. Rows represent
# actual loan status; columns represent predicted
# loan status. The diagonal entries represent
# correct predictions.
# Note 2:
# accuracyconfusion matrixOverall model accuracy: 73% of the predictions
# were correct.
# Note 3:
# precisionconfusion matrixModel precision: 76% of the applicants
# predicted as bad really did default.
# Note 4:
# recallconfusion matrixModel recall: the model found 14% of the
# defaulting loans.
# Note 5:
# false positive rateconfusion matrixFalse positive rate: 2% of the good applicants
# were mistakenly identified as bad.
# example 1.3 of section 1.3.1
# (example 1.3 of section 1.3.1) : The data science process : Setting expectations : Determining lower and upper bounds on model performance
# Title: Plotting the relation between disposable income and loan outcome
tab1 <- as.table(matrix(data=c(50,6,0,44),nrow=2,ncol=2))
dimnames(tab1) <- list('loan.as.pct.disposable.income'=
c('LT.15pct','GT.15pct'),
'loan.quality.pop1'=
c('goodloan','badloan'))
tab2 <- as.table(matrix(data=c(34,18,16,32),nrow=2,ncol=2))
dimnames(tab2) <- list('loan.as.pct.disposable.income'=
c('LT.15pct','GT.15pct'),
'loan.quality.pop2'=
c('goodloan','badloan'))
tab1
## loan.quality.pop1 # Note: 1
## loan.as.pct.disposable.income goodloan badloan
## LT.15pct 50 0
## GT.15pct 6 44
sum(diag(tab1))/sum(tab1) # Note: 2
## [1] 0.94
tab2
## loan.quality.pop2 # Note: 3
## loan.as.pct.disposable.income goodloan badloan
## LT.15pct 34 16
## GT.15pct 18 32
sum(diag(tab2))/sum(tab2)
## [1] 0.66 # Note: 4
# Note 1:
# The count of correct predictions is on the
# diagonal of tab1. In this first population, all
# the loans that were less than 15% of disposable
# income were good loans, and all but six of the
# loans that were greater than 15% of disposable
# income defaulted. So you know that
# loan.as.pct.disposable.income models loan quality
# well in this population. Or as statisticians might
# say, loan.as.pct.disposable.income “explains” the
# output (loan quality).
# Note 2:
# In fact, it’s 94% accurate.
# Note 3:
# In the second population, about a third of
# the loans that were less than 15% of disposable
# income defaulted, and over half of the loans that
# were greater than 15% of disposable income were
# good. So you know that
# loan.as.pct.disposable.income doesn’t model loan
# quality well in this population.
# Note 4:
# The rule of thumb is only 66%
# accurate.
# example 2.1 of section 2.1.1
# (example 2.1 of section 2.1.1) : Loading data into R : Working with data from files : Working with well-structured data from files or URLs
# Title: Reading the UCI car data
uciCar <- read.table( # Note: 1
'http://www.win-vector.com/dfiles/car.data.csv', # Note: 2
sep=',', # Note: 3
header=T # Note: 4
)
# Note 1:
# Command to read from a file or URL and store the result in a new data frame object
# called
# uciCar.
# Note 2:
# Filename or URL to get the data from.
# Note 3:
# Specify the column or field separator as a
# comma.
# Note 4:
# Tell R to expect a header line that defines
# the data column names.
# example 2.2 of section 2.1.1
# (example 2.2 of section 2.1.1) : Loading data into R : Working with data from files : Working with well-structured data from files or URLs
# Title: Exploring the car data
class(uciCar)
## [1] "data.frame" # Note: 1
summary(uciCar)
## buying maint doors
## high :432 high :432 2 :432
## low :432 low :432 3 :432
## med :432 med :432 4 :432
## vhigh:432 vhigh:432 5more:432
##
## persons lug_boot safety
## 2 :576 big :576 high:576
## 4 :576 med :576 low :576
## more:576 small:576 med :576
##
## rating
## acc : 384
## good : 69
## unacc:1210
## vgood: 65
dim(uciCar)
## [1] 1728 7 # Note: 2
# Note 1:
# The loaded object uciCar is of type data.frame.
# Note 2:
# The [1] is just an output sequence
# marker. The actual information is this: uciCar has
# 1728 rows and 7 columns. Always try to confirm you
# got a good parse by at least checking that the
# number of rows is exactly one fewer than the
# number of lines of text in the original file. The
# difference of one is because the column header
# counts as a line, but not as a data row.
# example 2.3 of section 2.1.2
# (example 2.3 of section 2.1.2) : Loading data into R : Working with data from files : Using R on less-structured data
# Title: Loading the credit dataset
d <- read.table(paste('http://archive.ics.uci.edu/ml/',
'machine-learning-databases/statlog/german/german.data',sep=''),
stringsAsFactors=F,header=F)
print(d[1:3,])
# example 2.4 of section 2.1.2
# (example 2.4 of section 2.1.2) : Loading data into R : Working with data from files : Using R on less-structured data
# Title: Setting column names
colnames(d) <- c('Status.of.existing.checking.account',
'Duration.in.month', 'Credit.history', 'Purpose',
'Credit.amount', 'Savings account/bonds',
'Present.employment.since',
'Installment.rate.in.percentage.of.disposable.income',
'Personal.status.and.sex', 'Other.debtors/guarantors',
'Present.residence.since', 'Property', 'Age.in.years',
'Other.installment.plans', 'Housing',
'Number.of.existing.credits.at.this.bank', 'Job',
'Number.of.people.being.liable.to.provide.maintenance.for',
'Telephone', 'foreign.worker', 'Good.Loan')
d$Good.Loan <- as.factor(ifelse(d$Good.Loan==1,'GoodLoan','BadLoan'))
print(d[1:3,])
# example 2.5 of section 2.1.2
# (example 2.5 of section 2.1.2) : Loading data into R : Working with data from files : Using R on less-structured data
# Title: Building a map to interpret loan use codes
mapping <- list(
'A40'='car (new)',
'A41'='car (used)',
'A42'='furniture/equipment',
'A43'='radio/television',
'A44'='domestic appliances',
...
)
# example 2.6 of section 2.1.2
# (example 2.6 of section 2.1.2) : Loading data into R : Working with data from files : Using R on less-structured data
# Title: Transforming the car data
for(i in 1:(dim(d))[2]) { # Note: 1
if(class(d[,i])=='character') {
d[,i] <- as.factor(as.character(mapping[d[,i]])) # Note: 2
}
}
# Note 1:
# (dim(d))[2] is the number of columns
# in the data frame d.
# Note 2:
# Note that the indexing operator [] is vectorized. Each step in the for loop remaps an
# entire column of data through our list.
# example 2.7 of section 2.1.2
# (example 2.7 of section 2.1.2) : Loading data into R : Working with data from files : Using R on less-structured data
# Title: Summary of Good.Loan and Purpose
table(d$Purpose,d$Good.Loan)
## BadLoan GoodLoan
## business 34 63
## car (new) 89 145
## car (used) 17 86
## domestic appliances 4 8
## education 22 28
## furniture/equipment 58 123
## others 5 7
## radio/television 62 218
## repairs 8 14
## retraining 1 8
# example 2.11 of section 2.2.2
# (example 2.11 of section 2.2.2) : Loading data into R : Working with relational databases : Loading data from a database into R
# Title: Loading data into R from a relational database
options( java.parameters = "-Xmx2g" ) # Note: 1
library(RJDBC)
drv <- JDBC("org.h2.Driver", # Note: 2
"h2-1.3.176.jar", # Note: 3
identifier.quote="'") # Note: 4
options<-";LOG=0;CACHE_SIZE=65536;LOCK_MODE=0;UNDO_LOG=0"
conn <- dbConnect(drv,paste("jdbc:h2:./H2DB",options,sep=''),"u","u")
dhus <- dbGetQuery(conn,"SELECT * FROM hus WHERE ORIGRANDGROUP<=1") # Note: 5
dpus <- dbGetQuery(conn,"SELECT pus.* FROM pus WHERE pus.SERIALNO IN \
(SELECT DISTINCT hus.SERIALNO FROM hus \
WHERE hus.ORIGRANDGROUP<=1)") # Note: 6
dbDisconnect(conn) # Note: 7
save(dhus,dpus,file='phsample.RData') # Note: 8
# Note 1:
# Set Java option for extra memory before DB
# drivers are loaded.
# Note 2:
# Specify the name of the database driver, same
# as in our XML database configuration.
# Note 3:
# Specify where to find the implementation of
# the database driver.
# Note 4:
# SQL column names with mixed-case
# capitalization, special characters, or that
# collide with reserved words must be quoted. We
# specify single-quote as the quote we’ll use when
# quoting column names, which may different than the
# quote we use for SQL literals.
# Note 5:
# Create a data frame called dhus from *
# (everything) from the database table hus, taking
# only rows where ORGINRANGGROUP <= 1. The
# ORGINRANDGROUP column is a random integer from 0
# through 999 that SQL Screwdriver adds to the rows
# during data load to facilitate sampling. In this
# case, we’re taking 2/1000 of the data rows to get
# a small sample.
# Note 6:
# Create a data frame called dpus from the
# database table pus, taking only records that have
# a household ID in the set of household IDs we
# selected from households table hus.
# Note 7:
# Disconnect for the database.
# Note 8:
# Save the two data frames into a file named
# phsample.RData, which can be read in with load().
# Try help("save") or help("load") for more
# details.
# example 2.12 of section 2.2.3
# (example 2.12 of section 2.2.3) : Loading data into R : Working with relational databases : Working with the PUMS data
# Title: Selecting a subset of the Census data
load('phsample.RData')
psub = subset(dpus,with(dpus,(PINCP>1000)&(ESR==1)&
(PINCP<=250000)&(PERNP>1000)&(PERNP<=250000)&
(WKHP>=40)&(AGEP>=20)&(AGEP<=50)&
(PWGTP1>0)&(COW %in% (1:7))&(SCHL %in% (1:24)))) # Note: 1
# Note 1:
# Subset of data rows matching detailed
# employment conditions
# example 2.13 of section 2.2.3
# (example 2.13 of section 2.2.3) : Loading data into R : Working with relational databases : Working with the PUMS data
# Title: Recoding variables
psub$SEX = as.factor(ifelse(psub$SEX==1,'M','F')) # Note: 1
psub$SEX = relevel(psub$SEX,'M') # Note: 2
cowmap <- c("Employee of a private for-profit",
"Private not-for-profit employee",
"Local government employee",
"State government employee",
"Federal government employee",
"Self-employed not incorporated",
"Self-employed incorporated")
psub$COW = as.factor(cowmap[psub$COW]) # Note: 3
psub$COW = relevel(psub$COW,cowmap[1])
schlmap = c( # Note: 4
rep("no high school diploma",15),
"Regular high school diploma",
"GED or alternative credential",
"some college credit, no degree",
"some college credit, no degree",
"Associate's degree",
"Bachelor's degree",
"Master's degree",
"Professional degree",
"Doctorate degree")
psub$SCHL = as.factor(schlmap[psub$SCHL])
psub$SCHL = relevel(psub$SCHL,schlmap[1])
dtrain = subset(psub,ORIGRANDGROUP >= 500) # Note: 5
dtest = subset(psub,ORIGRANDGROUP < 500) # Note: 6
# Note 1:
# Reencode sex from 1/2 to M/F.
# Note 2:
# Make the reference sex M, so F encodes a
# difference from M in models.
# Note 3:
# Reencode class of worker info into a more
# readable form.
# Note 4:
# Reencode education info into a more readable
# form and fewer levels (merge all levels below high
# school into same encoding).
# Note 5:
# Subset of data rows used for model
# training.
# Note 6:
# Subset of data rows used for model
# testing.
# example 2.14 of section 2.2.3
# (example 2.14 of section 2.2.3) : Loading data into R : Working with relational databases : Working with the PUMS data
# Title: Summarizing the classifications of work
summary(dtrain$COW)
## Employee of a private for-profit Federal government employee
## 423 21
## Local government employee Private not-for-profit employee
## 39 55
## Self-employed incorporated Self-employed not incorporated
## 17 16
## State government employee
## 24
# example 3.1 of section 3.1
# (example 3.1 of section 3.1) : Exploring data : Using summary statistics to spot problems
# Title: The summary() command
custdata <- read.table('custdata.tsv',
header=TRUE,sep='\t')
summary(custdata)
## custid sex
## Min. : 2068 F:440
## 1st Qu.: 345667 M:560
## Median : 693403
## Mean : 698500
## 3rd Qu.:1044606
## Max. :1414286
##
## is.employed income # Note: 1
## Mode :logical Min. : -8700
## FALSE:73 1st Qu.: 14600
## TRUE :599 Median : 35000
## NA's :328 Mean : 53505
## 3rd Qu.: 67000
## Max. :615000
##
## marital.stat
## Divorced/Separated:155
## Married :516
## Never Married :233
## Widowed : 96
##
## health.ins # Note: 2
## Mode :logical
## FALSE:159
## TRUE :841
## NA's :0
##
## housing.type # Note: 3
## Homeowner free and clear :157
## Homeowner with mortgage/loan:412
## Occupied with no rent : 11
## Rented :364
## NA's : 56
##
## recent.move num.vehicles
## Mode :logical Min. :0.000
## FALSE:820 1st Qu.:1.000
## TRUE :124 Median :2.000
## NA's :56 Mean :1.916
## 3rd Qu.:2.000
## Max. :6.000
## NA's :56
##
## age state.of.res # Note: 4
## Min. : 0.0 California :100
## 1st Qu.: 38.0 New York : 71
## Median : 50.0 Pennsylvania: 70
## Mean : 51.7 Texas : 56
## 3rd Qu.: 64.0 Michigan : 52
## Max. :146.7 Ohio : 51
## (Other) :600
# Note 1:
# The variable is.employed is missing for
# about a third of the data. The variable income has negative values, which are
# potentially invalid.
# Note 2:
# About 84% of the customers have health
# insurance.
# Note 3:
# The variables housing.type, recent.move, and
# num.vehicles are each missing 56 values.
# Note 4:
# The average value of the variable age seems
# plausible, but the minimum and maximum values seem unlikely. The variable
# state.of.res is a categorical variable; summary() reports how many customers are in
# each state (for the first few states).
# example 3.3 of section 3.1.1
# (example 3.3 of section 3.1.1) : Exploring data : Using summary statistics to spot problems : Typical problems revealed by data summaries
# Title: Examples of invalid values and outliers
summary(custdata$income)
## Min. 1st Qu. Median Mean 3rd Qu.
## -8700 14600 35000 53500 67000 # Note: 1
## Max.
## 615000
summary(custdata$age)
## Min. 1st Qu. Median Mean 3rd Qu.
## 0.0 38.0 50.0 51.7 64.0 # Note: 2
## Max.
## 146.7
# Note 1:
# Negative values for income could indicate
# bad data. They might also have a special meaning, like “amount of
# debt.” Either way, you should check how prevalent the issue is,
# and decide what to do: Do you drop the data with negative income? Do you
# convert negative values to zero?
# Note 2:
# Customers of age zero, or customers of an
# age greater than about 110 are outliers. They fall out of the range of
# expected customer values. Outliers could be data input errors.
# They could be special sentinel values: zero might mean “age unknown” or
# “refuse to state.” And some of your customers might be especially
# long-lived.
# example 3.4 of section 3.1.1
# (example 3.4 of section 3.1.1) : Exploring data : Using summary statistics to spot problems : Typical problems revealed by data summaries
# Title: Looking at the data range of a variable
summary(custdata$income)
## Min. 1st Qu. Median Mean 3rd Qu.
## -8700 14600 35000 53500 67000 # Note: 1
## Max.
## 615000
# Note 1:
# Income ranges from zero to over half a million
# dollars; a very wide range.
# example 3.5 of section 3.1.1
# (example 3.5 of section 3.1.1) : Exploring data : Using summary statistics to spot problems : Typical problems revealed by data summaries
# Title: Checking units sounds silly, but mistakes can lead to spectacular errors if not caught
Income = custdata$income/1000
summary(Income) # Note: 1
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## -8.7 14.6 35.0 53.5 67.0 615.0
# Note 1:
# The variable Income is defined as Income = custdata$income/1000. But suppose you didn’t know
# that. Looking only at the summary, the values could plausibly be
# interpreted to mean either “hourly wage” or “yearly income in units
# of $1000.”
# example 3.6 of section 3.2.1
# (example 3.6 of section 3.2.1) : Exploring data : Spotting problems using graphics and visualization : Visually checking distributions for a single variable
# Title: Plotting a histogram
library(ggplot2) # Note: 1
ggplot(custdata) +
geom_histogram(aes(x=age),
binwidth=5, fill="gray") # Note: 2
# Note 1:
# Load the ggplot2 library, if you haven’t
# already done so.
# Note 2:
# binwidth parameterThe binwidth parameter tells the
# geom_histogram call how to make bins of five-year intervals (default is
# datarange/30). The fill parameter specifies the color of the histogram
# bars (default: black).
# example 3.7 of section 3.2.1
# (example 3.7 of section 3.2.1) : Exploring data : Spotting problems using graphics and visualization : Visually checking distributions for a single variable
# Title: Producing a density plot
library(scales) # Note: 1
ggplot(custdata) + geom_density(aes(x=income)) +
scale_x_continuous(labels=dollar) # Note: 2
# Note 1:
# The scales package brings in the dollar
# scale notation.
# Note 2:
# Set the x-axis labels to
# dollars.
# example 3.8 of section 3.2.1
# (example 3.8 of section 3.2.1) : Exploring data : Spotting problems using graphics and visualization : Visually checking distributions for a single variable
# Title: Creating a log-scaled density plot
ggplot(custdata) + geom_density(aes(x=income)) +
scale_x_log10(breaks=c(100,1000,10000,100000), labels=dollar) + # Note: 1
annotation_logticks(sides="bt") # Note: 2
# Note 1:
# Set the x-axis to be in log10 scale, with
# manually set tick points and labels as dollars.
# Note 2:
# Add log-scaled tick marks to the top and
# bottom of the graph.
# informalexample 3.2 of section 3.2.1
# (informalexample 3.2 of section 3.2.1) : Exploring data : Spotting problems using graphics and visualization : Visually checking distributions for a single variable
ggplot(custdata) + geom_bar(aes(x=marital.stat), fill="gray")
# example 3.9 of section 3.2.1
# (example 3.9 of section 3.2.1) : Exploring data : Spotting problems using graphics and visualization : Visually checking distributions for a single variable
# Title: Producing a horizontal bar chart
ggplot(custdata) +
geom_bar(aes(x=state.of.res), fill="gray") + # Note: 1
coord_flip() + # Note: 2
theme(axis.text.y=element_text(size=rel(0.8))) # Note: 3
# Note 1:
# Plot bar chart as before: state.of.res
# is on x axis, count is on y-axis.
# Note 2:
# Flip the x and y axes: state.of.res is
# now on the y-axis.
# Note 3:
# Reduce the size of the y-axis tick
# labels to 80% of default size for legibility.
# example 3.10 of section 3.2.1
# (example 3.10 of section 3.2.1) : Exploring data : Spotting problems using graphics and visualization : Visually checking distributions for a single variable
# Title: Producing a bar chart with sorted categories
statesums <- table(custdata$state.of.res) # Note: 1
statef <- as.data.frame(statesums) # Note: 2
colnames(statef)<-c("state.of.res", "count") # Note: 3
summary(statef) # Note: 4
## state.of.res count
## Alabama : 1 Min. : 1.00
## Alaska : 1 1st Qu.: 5.00
## Arizona : 1 Median : 12.00
## Arkansas : 1 Mean : 20.00
## California: 1 3rd Qu.: 26.25
## Colorado : 1 Max. :100.00
## (Other) :44
statef <- transform(statef,
state.of.res=reorder(state.of.res, count)) # Note: 5
summary(statef) # Note: 6
## state.of.res count
## Delaware : 1 Min. : 1.00
## North Dakota: 1 1st Qu.: 5.00
## Wyoming : 1 Median : 12.00
## Rhode Island: 1 Mean : 20.00
## Alaska : 1 3rd Qu.: 26.25
## Montana : 1 Max. :100.00
## (Other) :44
ggplot(statef)+ geom_bar(aes(x=state.of.res,y=count),
stat="identity", # Note: 7
fill="gray") +
coord_flip() + # Note: 8
theme(axis.text.y=element_text(size=rel(0.8)))
# Note 1:
# The table() command aggregates the data by state of residence—exactly the information the bar
# chart plots.
# Note 2:
# Convert the table to a data frame. The default column names are Var1 and Freq.
# Note 3:
# Rename the columns for readability.
# Note 4:
# Notice that the default ordering for the
# state.of.res variable is alphabetical.
# Note 5:
# Use the reorder() function to set the
# state.of.res variable to be count ordered. Use the transform() function
# to apply the transformation to the state.of.res data frame.
# Note 6:
# The state.of.res variable is now count
# ordered.
# Note 7:
# Since the data is being passed to
# geom_bar pre-aggregated, specify both the x and
# y variables, and use stat="identity" to plot the
# data exactly as given.
# Note 8:
# Flip the axes and reduce the size of the
# label text as before.
# example 3.11 of section 3.2.2
# (example 3.11 of section 3.2.2) : Exploring data : Spotting problems using graphics and visualization : Visually checking relationships between two variables
# Title: Producing a line plot
x <- runif(100) # Note: 1
y <- x^2 + 0.2*x # Note: 2
ggplot(data.frame(x=x,y=y), aes(x=x,y=y)) + geom_line() # Note: 3
# Note 1:
# First, generate the data for this example.
# The x variable is uniformly randomly distributed
# between 0 and 1.
# Note 2:
# The y variable is a
# quadratic function of x.
# Note 3:
# Plot the line plot.
# example 3.12 of section 3.2.2
# (example 3.12 of section 3.2.2) : Exploring data : Spotting problems using graphics and visualization : Visually checking relationships between two variables
# Title: Examining the correlation between age and income
custdata2 <- subset(custdata,
(custdata$age > 0 & custdata$age < 100
& custdata$income > 0)) # Note: 1
cor(custdata2$age, custdata2$income) # Note: 2
## [1] -0.02240845 # Note: 3
# Note 1:
# Only consider a subset of data with
# reasonable age and income values.
# Note 2:
# Get correlation of age and income.
# Note 3:
# Resulting correlation.
# informalexample 3.3 of section 3.2.2
# (informalexample 3.3 of section 3.2.2) : Exploring data : Spotting problems using graphics and visualization : Visually checking relationships between two variables
ggplot(custdata2, aes(x=age, y=income)) +
geom_point() + ylim(0, 200000)
# informalexample 3.4 of section 3.2.2
# (informalexample 3.4 of section 3.2.2) : Exploring data : Spotting problems using graphics and visualization : Visually checking relationships between two variables
ggplot(custdata2, aes(x=age, y=income)) + geom_point() +
stat_smooth(method="lm") +
ylim(0, 200000)
# informalexample 3.5 of section 3.2.2
# (informalexample 3.5 of section 3.2.2) : Exploring data : Spotting problems using graphics and visualization : Visually checking relationships between two variables
ggplot(custdata2, aes(x=age, y=income)) +
geom_point() + geom_smooth() +
ylim(0, 200000)
# example 3.13 of section 3.2.2
# (example 3.13 of section 3.2.2) : Exploring data : Spotting problems using graphics and visualization : Visually checking relationships between two variables
# Title: Plotting the distribution of health.ins as a function of age
ggplot(custdata2, aes(x=age, y=as.numeric(health.ins))) + # Note: 1
geom_point(position=position_jitter(w=0.05, h=0.05)) + # Note: 2
geom_smooth() # Note: 3
# Note 1:
# The Boolean variable health.ins must be
# converted to a 0/1 variable using as.numeric.
# Note 2:
# Since y values can
# only be 0 or 1, add a small jitter to get a sense of data
# density.
# Note 3:
# Add smoothing curve.
# example 3.14 of section 3.2.2
# (example 3.14 of section 3.2.2) : Exploring data : Spotting problems using graphics and visualization : Visually checking relationships between two variables
# Title: Producing a hexbin plot
library(hexbin) # Note: 1
ggplot(custdata2, aes(x=age, y=income)) +
geom_hex(binwidth=c(5, 10000)) + # Note: 2
geom_smooth(color="white", se=F) + # Note: 3
ylim(0,200000)
# Note 1:
# Load hexbin library.
# Note 2:
# Create hexbin with age binned into 5-year
# increments, income in increments of $10,000.
# Note 3:
# Add smoothing curve in white; suppress
# standard error ribbon (se=F).
# example 3.15 of section 3.2.2
# (example 3.15 of section 3.2.2) : Exploring data : Spotting problems using graphics and visualization : Visually checking relationships between two variables
# Title: Specifying different styles of bar chart
ggplot(custdata) + geom_bar(aes(x=marital.stat,
fill=health.ins)) # Note: 1
ggplot(custdata) + geom_bar(aes(x=marital.stat,
fill=health.ins),
position="dodge") # Note: 2
ggplot(custdata) + geom_bar(aes(x=marital.stat,
fill=health.ins),
position="fill") # Note: 3
# Note 1:
# Stacked bar chart, the
# default
# Note 2:
# Side-by-side bar chart
# Note 3:
# Filled bar chart
# example 3.16 of section 3.2.2
# (example 3.16 of section 3.2.2) : Exploring data : Spotting problems using graphics and visualization : Visually checking relationships between two variables
# Title: Plotting data with a rug
ggplot(custdata, aes(x=marital.stat)) +
geom_bar(aes(fill=health.ins), position="fill") +
geom_point(aes(y=-0.05), size=0.75, alpha=0.3, # Note: 1
position=position_jitter(h=0.01)) # Note: 2
# Note 1:
# Set the points just under the y-axis,
# three-quarters of default size, and make them slightly transparent with
# the alpha parameter.
# Note 2:
# Jitter the points slightly for
# legibility.
# example 3.17 of section 3.2.2
# (example 3.17 of section 3.2.2) : Exploring data : Spotting problems using graphics and visualization : Visually checking relationships between two variables
# Title: Plotting a bar chart with and without facets
ggplot(custdata2) + # Note: 1
geom_bar(aes(x=housing.type, fill=marital.stat ),
position="dodge") +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) # Note: 2
ggplot(custdata2) + # Note: 3
geom_bar(aes(x=marital.stat), position="dodge",
fill="darkgray") +
facet_wrap(~housing.type, scales="free_y") + # Note: 4
theme(axis.text.x = element_text(angle = 45, hjust = 1)) # Note: 5
# Note 1:
# Side-by-side bar chart.
# Note 2:
# coord_flip commandTilt the x-axis labels so they
# don’t overlap. You can also use coord_flip() to rotate the graph, as we
# saw previously. Some prefer coord_flip() because the theme() layer is
# complicated to use.
# Note 3:
# The faceted bar chart.
# Note 4:
# Facet the graph by housing.type. The scales="free_y" argument specifies that each facet has
# an independently scaled y-axis (the default is that all facets have
# the same scales on both axes). The argument free_x would free the
# x-axis scaling, and the argument free frees both axes.
# Note 5:
# As of this writing,
# facet_wrap is incompatible with coord_flip, so we have to tilt the
# x-axis labels.
# example 4.1 of section 4.1.1
# (example 4.1 of section 4.1.1) : Managing data : Cleaning data : Treating missing values (NAs)
# Title: Checking locations of missing data
custdata <- read.table('custdata.tsv',
header=TRUE,sep='\t')
summary(custdata[is.na(custdata$housing.type), # Note: 1
c("recent.move","num.vehicles")]) # Note: 2
## recent.move num.vehicles # Note: 3
## Mode:logical Min. : NA
## NA's:56 1st Qu.: NA
## Median : NA
## Mean :NaN
## 3rd Qu.: NA
## Max. : NA
## NA's :56
# Note 1:
# Restrict to the rows where housing.type is
# NA.
# Note 2:
# Look only at the columns recent.move and
# num.vehicles.
# Note 3:
# The output: all NAs. All the missing data
# comes from the same rows.
# example 4.2 of section 4.1.1
# (example 4.2 of section 4.1.1) : Managing data : Cleaning data : Treating missing values (NAs)
# Title: Remapping NA to a level
custdata$is.employed.fix <- ifelse(is.na(custdata$is.employed), # Note: 1
"missing", # Note: 2
ifelse(custdata$is.employed==T, # Note: 3
"employed",
"not employed")) # Note: 4
summary(as.factor(custdata$is.employed.fix)) # Note: 5
## employed missing not employed
## 599 328 73
# Note 1:
# If is.employed value is missing...
# Note 2:
# ...assign the value "missing".
# Otherwise...
# Note 3:
# ...if is.employed==TRUE, assign the value
# "employed"...
# Note 4:
# ...or the value "not employed".
# Note 5:
# The transformation has turned the variable
# type from factor to string. You can change it back
# with the as.factor() function.
# informalexample 4.1 of section 4.1.1
# (informalexample 4.1 of section 4.1.1) : Managing data : Cleaning data : Treating missing values (NAs)
custdata$is.employed.fix <- ifelse(is.na(custdata$is.employed),
"not in active workforce",
ifelse(custdata$is.employed==T,
"employed",
"not employed"))
# informalexample 4.2 of section 4.1.1
# (informalexample 4.2 of section 4.1.1) : Managing data : Cleaning data : Treating missing values (NAs)
summary(custdata$Income)
## Min. 1st Qu. Median Mean 3rd Qu. Max. NA's
## 0 25000 45000 66200 82000 615000 328
# informalexample 4.3 of section 4.1.1
# (informalexample 4.3 of section 4.1.1) : Managing data : Cleaning data : Treating missing values (NAs)
meanIncome <- mean(custdata$Income, na.rm=T) # Note: 1
Income.fix <- ifelse(is.na(custdata$Income),
meanIncome,
custdata$Income)
summary(Income.fix)
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 0 35000 66200 66200 66200 615000
# Note 1:
# Don’t forget the argument "na.rm=T"!
# Otherwise, the mean() function will include the
# NAs by default, and meanIncome will be NA.
# example 4.3 of section 4.1.1
# (example 4.3 of section 4.1.1) : Managing data : Cleaning data : Treating missing values (NAs)
# Title: Converting missing numeric data to a level
breaks <-c(0, 10000, 50000, 100000, 250000, 1000000) # Note: 1
Income.groups <- cut(custdata$income,
breaks=breaks, include.lowest=T) # Note: 2
summary(Income.groups) # Note: 3
## [0,1e+04] (1e+04,5e+04] (5e+04,1e+05] (1e+05,2.5e+05] (2.5e+05,1e+06]
## 63 312 178 98 21
## NA's
## 328
Income.groups <- as.character(Income.groups) # Note: 4
Income.groups <- ifelse(is.na(Income.groups), # Note: 5
"no income", Income.groups)
summary(as.factor(Income.groups))
## (1e+04,5e+04] (1e+05,2.5e+05] (2.5e+05,1e+06] (5e+04,1e+05] [0,1e+04]
## 312 98 21 178 63
## no income
## 328
# Note 1:
# Select some income ranges of interest. To
# use the cut() function, the upper and lower bounds
# should encompass the full income range of the
# data.
# Note 2:
# Cut the data into income ranges. The
# include.lowest=T argument makes sure that zero
# income data is included in the lowest income range
# category. By default it would be excluded.
# Note 3:
# The cut() function produces factor
# variables. Note the NAs are preserved.
# Note 4:
# To preserve the category names before adding
# a new category, convert the variables to strings.
# Note 5:
# Add the "no income" category to replace the
# NAs.
# example 4.4 of section 4.1.1
# (example 4.4 of section 4.1.1) : Managing data : Cleaning data : Treating missing values (NAs)
# Title: Tracking original NAs with an extra categorical variable
missingIncome <- is.na(custdata$Income) # Note: 1
Income.fix <- ifelse(is.na(custdata$Income), 0, custdata$Income) # Note: 2
# Note 1:
# The missingIncome variable lets you
# differentiate the two kinds of zeros in the data:
# the ones that you are about to add, and the ones
# that were already there.
# Note 2:
# Replace the NAs with zeros.
# example 4.5 of section 4.1.2
# (example 4.5 of section 4.1.2) : Managing data : Cleaning data : Data transformations
# Title: Normalizing income by state
medianincome <- aggregate(income~state.of.res,custdata,FUN=median)
colnames(medianincome) <- c('State','Median.Income')
summary(medianincome) # Note: 1
## State Median.Income
## : 1 Min. :37427
## Alabama : 1 1st Qu.:47483
## Alaska : 1 Median :52274
## Arizona : 1 Mean :52655
## Arkansas : 1 3rd Qu.:57195
## California: 1 Max. :68187
## (Other) :46
custdata <- merge(custdata, medianincome,
by.x="state.of.res", by.y="State") # Note: 2
summary(custdata[,c("state.of.res", "income", "Median.Income")]) # Note: 3
## state.of.res income Median.Income
## California :100 Min. : -8700 Min. :37427
## New York : 71 1st Qu.: 14600 1st Qu.:44819
## Pennsylvania: 70 Median : 35000 Median :50977
## Texas : 56 Mean : 53505 Mean :51161
## Michigan : 52 3rd Qu.: 67000 3rd Qu.:55559
## Ohio : 51 Max. :615000 Max. :68187
## (Other) :600
custdata$income.norm <- with(custdata, income/Median.Income) # Note: 4
summary(custdata$income.norm)
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## -0.1791 0.2729 0.6992 1.0820 1.3120 11.6600
# Note 1:
# medianincome is a data frame of median
# income by state.
# Note 2:
# Merge median income information into the
# custdata data frame by matching the column
# custdata$state.of.res to the column
# medianincome$State.
# Note 3:
# Median.Income is now part of custdata.
# Note 4:
# Normalize income by Median.Income.
# informalexample 4.4 of section 4.1.2
# (informalexample 4.4 of section 4.1.2) : Managing data : Cleaning data : Data transformations
custdata$income.lt.20K <- custdata$income < 20000
summary(custdata$income.lt.20K)
## Mode FALSE TRUE NA's
## logical 678 322 0
# example 4.6 of section 4.1.2
# (example 4.6 of section 4.1.2) : Managing data : Cleaning data : Data transformations
# Title: Converting age into ranges
brks <- c(0, 25, 65, Inf) # Note: 1
custdata$age.range <- cut(custdata$age,
breaks=brks, include.lowest=T) # Note: 2
summary(custdata$age.range) # Note: 3
## [0,25] (25,65] (65,Inf]
## 56 732 212
# Note 1:
# Select the age ranges of interest. The upper
# and lower bounds should encompass the full range
# of the data.
# Note 2:
# Cut the data into age ranges. The
# include.lowest=T argument makes sure that zero age
# data is included in the lowest age range category.
# By default it would be excluded.
# Note 3:
# The output of cut() is a factor variable.
# example 4.7 of section 4.1.2
# (example 4.7 of section 4.1.2) : Managing data : Cleaning data : Data transformations
# Title: Centering on mean age
summary(custdata$age)
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 0.0 38.0 50.0 51.7 64.0 146.7
meanage <- mean(custdata$age)
custdata$age.normalized <- custdata$age/meanage
summary(custdata$age.normalized)
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 0.0000 0.7350 0.9671 1.0000 1.2380 2.8370
# example 4.8 of section 4.1.2
# (example 4.8 of section 4.1.2) : Managing data : Cleaning data : Data transformations
# Title: Summarizing age
summary(custdata$age)
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 0.0 38.0 50.0 51.7 64.0 146.7
meanage <- mean(custdata$age) # Note: 1
stdage <- sd(custdata$age) # Note: 2
meanage
## [1] 51.69981
stdage
## [1] 18.86343
custdata$age.normalized <- (custdata$age-meanage)/stdage # Note: 3
summary(custdata$age.normalized)
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## -2.74100 -0.72630 -0.09011 0.00000 0.65210 5.03500
# Note 1:
# Take the mean.
# Note 2:
# Take the standard deviation.
# Note 3:
# Use the mean value as the origin (or
# reference point) and rescale the distance from the
# mean by the standard deviation.
# informalexample 4.5 of section 4.1.2
# (informalexample 4.5 of section 4.1.2) : Managing data : Cleaning data : Data transformations
signedlog10 <- function(x) {
ifelse(abs(x) <= 1, 0, sign(x)*log10(abs(x)))
}
# example 4.9 of section 4.2.2
# (example 4.9 of section 4.2.2) : Managing data : Sampling for modeling and validation : Creating a sample group column
# Title: Splitting into test and training using a random group mark
custdata$gp <- runif(dim(custdata)[1]) # Note: 1
testSet <- subset(custdata, custdata$gp <= 0.1) # Note: 2
trainingSet <- subset(custdata, custdata$gp > 0.1) # Note: 3
dim(testSet)[1]
## [1] 93
dim(trainingSet)[1]
## [1] 907
# Note 1:
# dim(custdata) returns the number of rows and
# columns of the data frame as a vector, so
# dim(custdata)[1] returns the number of rows.
# Note 2:
# Here we generate a test set of about 10% of
# the data (93 customers—a little over 9%, actually)
# and train on the remaining 90%.
# Note 3:
# Here we generate a training using the
# remaining data.
# example 4.10 of section 4.2.3
# (example 4.10 of section 4.2.3) : Managing data : Sampling for modeling and validation : Record grouping
# Title: Ensuring test/train split doesn’t split inside a household
hh <- unique(hhdata$household_id) # Note: 1
households <- data.frame(household_id = hh, gp = runif(length(hh))) # Note: 2
hhdata <- merge(hhdata, households, by="household_id") # Note: 3
# Note 1:
# Get all unique household IDs from your data
# frame.
# Note 2:
# Create a temporary data frame of household IDs
# and a uniformly random number from 0 to 1.
# Note 3:
# Merge new random sample group column back into
# original data frame.
# example 5.1 of section 5.2.1
# (example 5.1 of section 5.2.1) : Choosing and evaluating models : Evaluating models : Evaluating classification models
# Title: Building and applying a logistic regression spam model
spamD <- read.table('spamD.tsv',header=T,sep='\t')
spamTrain <- subset(spamD,spamD$rgroup>=10)
spamTest <- subset(spamD,spamD$rgroup<10)
spamVars <- setdiff(colnames(spamD),list('rgroup','spam'))
spamFormula <- as.formula(paste('spam=="spam"',
paste(spamVars,collapse=' + '),sep=' ~ '))
spamModel <- glm(spamFormula,family=binomial(link='logit'),
data=spamTrain)
spamTrain$pred <- predict(spamModel,newdata=spamTrain,
type='response')
spamTest$pred <- predict(spamModel,newdata=spamTest,
type='response')
print(with(spamTest,table(y=spam,glmPred=pred>0.5)))
## glmPred
## y FALSE TRUE
## non-spam 264 14
## spam 22 158
# example 5.2 of section 5.2.1
# (example 5.2 of section 5.2.1) : Choosing and evaluating models : Evaluating models : Evaluating classification models
# Title: Spam classifications
sample <- spamTest[c(7,35,224,327),c('spam','pred')]
print(sample)
## spam pred
## 115 spam 0.9903246227
## 361 spam 0.4800498077
## 2300 non-spam 0.0006846551
## 3428 non-spam 0.0001434345
# example 5.3 of section 5.2.1
# (example 5.3 of section 5.2.1) : Choosing and evaluating models : Evaluating models : Evaluating classification models
# Title: Spam confusion matrix
cM <- table(truth=spamTest$spam,prediction=spamTest$pred>0.5)
print(cM)
## prediction
## truth FALSE TRUE
## non-spam 264 14
## spam 22 158
# example 5.4 of section 5.2.1
# (example 5.4 of section 5.2.1) : Choosing and evaluating models : Evaluating models : Evaluating classification models
# Title: Entering data by hand
t <- as.table(matrix(data=c(288-1,17,1,13882-17),nrow=2,ncol=2))
rownames(t) <- rownames(cM)
colnames(t) <- colnames(cM)
print(t)
## FALSE TRUE
## non-spam 287 1
## spam 17 13865
# example 5.5 of section 5.2.2
# (example 5.5 of section 5.2.2) : Choosing and evaluating models : Evaluating models : Evaluating scoring models
# Title: Plotting residuals
d <- data.frame(y=(1:10)^2,x=1:10)
model <- lm(y~x,data=d)
d$prediction <- predict(model,newdata=d)
library('ggplot2')
ggplot(data=d) + geom_point(aes(x=x,y=y)) +
geom_line(aes(x=x,y=prediction),color='blue') +
geom_segment(aes(x=x,y=prediction,yend=y,xend=x)) +
scale_y_continuous('')
# example 5.6 of section 5.2.3
# (example 5.6 of section 5.2.3) : Choosing and evaluating models : Evaluating models : Evaluating probability models
# Title: Making a double density plot
ggplot(data=spamTest) +
geom_density(aes(x=pred,color=spam,linetype=spam))
# example 5.7 of section 5.2.3
# (example 5.7 of section 5.2.3) : Choosing and evaluating models : Evaluating models : Evaluating probability models
# Title: Plotting the receiver operating characteristic curve
library('ROCR')
eval <- prediction(spamTest$pred,spamTest$spam)
plot(performance(eval,"tpr","fpr"))
print(attributes(performance(eval,'auc'))$y.values[[1]])
## [1] 0.9660072
# example 5.8 of section 5.2.3
# (example 5.8 of section 5.2.3) : Choosing and evaluating models : Evaluating models : Evaluating probability models
# Title: Calculating log likelihood
sum(ifelse(spamTest$spam=='spam',
log(spamTest$pred),
log(1-spamTest$pred)))
## [1] -134.9478
sum(ifelse(spamTest$spam=='spam',
log(spamTest$pred),
log(1-spamTest$pred)))/dim(spamTest)[[1]]
## [1] -0.2946458
# example 5.9 of section 5.2.3
# (example 5.9 of section 5.2.3) : Choosing and evaluating models : Evaluating models : Evaluating probability models
# Title: Computing the null model’s log likelihood
pNull <- sum(ifelse(spamTest$spam=='spam',1,0))/dim(spamTest)[[1]]
sum(ifelse(spamTest$spam=='spam',1,0))*log(pNull) +
sum(ifelse(spamTest$spam=='spam',0,1))*log(1-pNull)
## [1] -306.8952
# example 5.10 of section 5.2.3
# (example 5.10 of section 5.2.3) : Choosing and evaluating models : Evaluating models : Evaluating probability models
# Title: Calculating entropy and conditional entropy
entropy <- function(x) { # Note: 1
xpos <- x[x>0]
scaled <- xpos/sum(xpos)
sum(-scaled*log(scaled,2))
}
print(entropy(table(spamTest$spam))) # Note: 2
## [1] 0.9667165
conditionalEntropy <- function(t) { # Note: 3
(sum(t[,1])*entropy(t[,1]) + sum(t[,2])*entropy(t[,2]))/sum(t)
}
print(conditionalEntropy(cM)) # Note: 4
## [1] 0.3971897
# Note 1:
# Define function that computes the entropy
# from list of outcome counts
# Note 2:
# Calculate entropy of spam/non-spam
# distribution
# Note 3:
# Function to calculate conditional or
# remaining entropy of spam distribution (rows)
# given prediction (columns)
# Note 4:
# Calculate conditional or remaining entropy
# of spam distribution given prediction
# example 5.11 of section 5.2.5
# (example 5.11 of section 5.2.5) : Choosing and evaluating models : Evaluating models : Evaluating clustering models
# Title: Clustering random data in the plane
set.seed(32297)
d <- data.frame(x=runif(100),y=runif(100))
clus <- kmeans(d,centers=5)
d$cluster <- clus$cluster
# example 5.12 of section 5.2.5
# (example 5.12 of section 5.2.5) : Choosing and evaluating models : Evaluating models : Evaluating clustering models
# Title: Plotting our clusters
library('ggplot2'); library('grDevices')
h <- do.call(rbind,
lapply(unique(clus$cluster),
function(c) { f <- subset(d,cluster==c); f[chull(f),]}))
ggplot() +
geom_text(data=d,aes(label=cluster,x=x,y=y,
color=cluster),size=3) +
geom_polygon(data=h,aes(x=x,y=y,group=cluster,fill=as.factor(cluster)),
alpha=0.4,linetype=0) +
theme(legend.position = "none")
# example 5.13 of section 5.2.5
# (example 5.13 of section 5.2.5) : Choosing and evaluating models : Evaluating models : Evaluating clustering models
# Title: Calculating the size of each cluster
table(d$cluster)
## 1 2 3 4 5
## 10 27 18 17 28
# example 5.14 of section 5.2.5
# (example 5.14 of section 5.2.5) : Choosing and evaluating models : Evaluating models : Evaluating clustering models
# Title: Calculating the typical distance between items in every pair of clusters
library('reshape2')
n <- dim(d)[[1]]
pairs <- data.frame(
ca = as.vector(outer(1:n,1:n,function(a,b) d[a,'cluster'])),
cb = as.vector(outer(1:n,1:n,function(a,b) d[b,'cluster'])),
dist = as.vector(outer(1:n,1:n,function(a,b)
sqrt((d[a,'x']-d[b,'x'])^2 + (d[a,'y']-d[b,'y'])^2)))
)
dcast(pairs,ca~cb,value.var='dist',mean)
## ca 1 2 3 4 5
## 1 1 0.1478480 0.6524103 0.3780785 0.4404508 0.7544134
## 2 2 0.6524103 0.2794181 0.5551967 0.4990632 0.5165320
## 3 3 0.3780785 0.5551967 0.2031272 0.6122986 0.4656730
## 4 4 0.4404508 0.4990632 0.6122986 0.2048268 0.8365336
## 5 5 0.7544134 0.5165320 0.4656730 0.8365336 0.2221314
# example 6.1 of section 6.1.1
# (example 6.1 of section 6.1.1) : Memorization methods : KDD and KDD Cup 2009 : Getting started with KDD Cup 2009 data
# Title: Preparing the KDD data for analysis
d <- read.table('orange_small_train.data.gz', # Note: 1
header=T,
sep='\t',
na.strings=c('NA','')) # Note: 2
churn <- read.table('orange_small_train_churn.labels.txt',
header=F,sep='\t') # Note: 3
d$churn <- churn$V1 # Note: 4
appetency <- read.table('orange_small_train_appetency.labels.txt',
header=F,sep='\t')
d$appetency <- appetency$V1 # Note: 5
upselling <- read.table('orange_small_train_upselling.labels.txt',
header=F,sep='\t')
d$upselling <- upselling$V1 # Note: 6
set.seed(729375) # Note: 7
d$rgroup <- runif(dim(d)[[1]])
dTrainAll <- subset(d,rgroup<=0.9)
dTest <- subset(d,rgroup>0.9) # Note: 8
outcomes=c('churn','appetency','upselling')
vars <- setdiff(colnames(dTrainAll),
c(outcomes,'rgroup'))
catVars <- vars[sapply(dTrainAll[,vars],class) %in%
c('factor','character')] # Note: 9
numericVars <- vars[sapply(dTrainAll[,vars],class) %in%
c('numeric','integer')] # Note: 10
rm(list=c('d','churn','appetency','upselling')) # Note: 11
outcome <- 'churn' # Note: 12
pos <- '1' # Note: 13
useForCal <- rbinom(n=dim(dTrainAll)[[1]],size=1,prob=0.1)>0 # Note: 14
dCal <- subset(dTrainAll,useForCal)
dTrain <- subset(dTrainAll,!useForCal)
# Note 1:
# Read the file of independent variables. All
# data from
# https://github.com/WinVector/zmPDSwR/tree/master/KDD2009.
# Note 2:
# Treat both NA and the empty string as missing
# data.
# Note 3:
# Read churn dependent variable.
# Note 4:
# Add churn as a new column.
# Note 5:
# Add appetency as a new column.
# Note 6:
# Add upselling as a new column.
# Note 7:
# By setting the seed to the pseudo-random
# number generator, we make our work reproducible:
# someone redoing it will see the exact same
# results.
# Note 8:
# Split data into train and test subsets.
# Note 9:
# Identify which features are categorical
# variables.
# Note 10:
# Identify which features are numeric
# variables.
# Note 11:
# Remove unneeded objects from workspace.
# Note 12:
# Choose which outcome to model (churn).
# Note 13:
# Choose which outcome is considered
# positive.
# Note 14:
# Further split training data into training and
# calibration.
# example 6.2 of section 6.2.1
# (example 6.2 of section 6.2.1) : Memorization methods : Building single-variable models : Using categorical features
# Title: Plotting churn grouped by variable 218 levels
table218 <- table(
Var218=dTrain[,'Var218'], # Note: 1
churn=dTrain[,outcome], # Note: 2
useNA='ifany') # Note: 3
print(table218)
## churn
## Var218 -1 1
## cJvF 19245 1220
## UYBR 17860 1618
## <NA> 423 152
# Note this listing was updated: 10-14-2014 as some of results in the book were
# accidentally from older code. Will update later listings as we go forward.
# Note 1:
# Tabulate levels of Var218.
# Note 2:
# Tabulate levels of churn outcome.
# Note 3:
# Include NA values in tabulation.
# example 6.3 of section 6.2.1
# (example 6.3 of section 6.2.1) : Memorization methods : Building single-variable models : Using categorical features
# Title: Churn rates grouped by variable 218 codes
print(table218[,2]/(table218[,1]+table218[,2]))
## cJvF UYBR <NA>
## 0.05994389 0.08223821 0.26523297
# example 6.4 of section 6.2.1
# (example 6.4 of section 6.2.1) : Memorization methods : Building single-variable models : Using categorical features
# Title: Function to build single-variable models for categorical variables
mkPredC <- function(outCol,varCol,appCol) { # Note: 1
pPos <- sum(outCol==pos)/length(outCol) # Note: 2
naTab <- table(as.factor(outCol[is.na(varCol)]))
pPosWna <- (naTab/sum(naTab))[pos] # Note: 3
vTab <- table(as.factor(outCol),varCol)
pPosWv <- (vTab[pos,]+1.0e-3*pPos)/(colSums(vTab)+1.0e-3) # Note: 4
pred <- pPosWv[appCol] # Note: 5
pred[is.na(appCol)] <- pPosWna # Note: 6
pred[is.na(pred)] <- pPos # Note: 7
pred # Note: 8
}
# Note 1:
# Given a vector of training outcomes (outCol),
# a categorical training variable (varCol), and a
# prediction variable (appCol), use outCol and
# varCol to build a single-variable model and then
# apply the model to appCol to get new
# predictions.
# Note 2:
# Get stats on how often outcome is positive
# during training.
# Note 3:
# Get stats on how often outcome is positive for
# NA values of variable during training.
# Note 4:
# Get stats on how often outcome is positive,
# conditioned on levels of training variable.
# Note 5:
# Make predictions by looking up levels of
# appCol.
# Note 6:
# Add in predictions for NA levels of
# appCol.
# Note 7:
# Add in predictions for levels of appCol that
# weren’t known during training.
# Note 8:
# Return vector of predictions.
# example 6.5 of section 6.2.1
# (example 6.5 of section 6.2.1) : Memorization methods : Building single-variable models : Using categorical features
# Title: Applying single-categorical variable models to all of our datasets
for(v in catVars) {
pi <- paste('pred',v,sep='')
dTrain[,pi] <- mkPredC(dTrain[,outcome],dTrain[,v],dTrain[,v])
dCal[,pi] <- mkPredC(dTrain[,outcome],dTrain[,v],dCal[,v])
dTest[,pi] <- mkPredC(dTrain[,outcome],dTrain[,v],dTest[,v])
}
# example 6.6 of section 6.2.1
# (example 6.6 of section 6.2.1) : Memorization methods : Building single-variable models : Using categorical features
# Title: Scoring categorical variables by AUC
library('ROCR')
calcAUC <- function(predcol,outcol) {
perf <- performance(prediction(predcol,outcol==pos),'auc')
as.numeric(perf@y.values)
}
for(v in catVars) {
pi <- paste('pred',v,sep='')
aucTrain <- calcAUC(dTrain[,pi],dTrain[,outcome])
if(aucTrain>=0.8) {
aucCal <- calcAUC(dCal[,pi],dCal[,outcome])
print(sprintf("%s, trainAUC: %4.3f calibrationAUC: %4.3f",
pi,aucTrain,aucCal))
}
}
## [1] "predVar200, trainAUC: 0.828 calibrationAUC: 0.527"
## [1] "predVar202, trainAUC: 0.829 calibrationAUC: 0.522"
## [1] "predVar214, trainAUC: 0.828 calibrationAUC: 0.527"
## [1] "predVar217, trainAUC: 0.898 calibrationAUC: 0.553"
# example 6.7 of section 6.2.2
# (example 6.7 of section 6.2.2) : Memorization methods : Building single-variable models : Using numeric features
# Title: Scoring numeric variables by AUC
mkPredN <- function(outCol,varCol,appCol) {
cuts <- unique(as.numeric(quantile(varCol,
probs=seq(0, 1, 0.1),na.rm=T)))
varC <- cut(varCol,cuts)
appC <- cut(appCol,cuts)
mkPredC(outCol,varC,appC)
}
for(v in numericVars) {
pi <- paste('pred',v,sep='')
dTrain[,pi] <- mkPredN(dTrain[,outcome],dTrain[,v],dTrain[,v])
dTest[,pi] <- mkPredN(dTrain[,outcome],dTrain[,v],dTest[,v])
dCal[,pi] <- mkPredN(dTrain[,outcome],dTrain[,v],dCal[,v])
aucTrain <- calcAUC(dTrain[,pi],dTrain[,outcome])
if(aucTrain>=0.55) {
aucCal <- calcAUC(dCal[,pi],dCal[,outcome])
print(sprintf("%s, trainAUC: %4.3f calibrationAUC: %4.3f",
pi,aucTrain,aucCal))
}
}
## [1] "predVar6, trainAUC: 0.557 calibrationAUC: 0.554"
## [1] "predVar7, trainAUC: 0.555 calibrationAUC: 0.565"
## [1] "predVar13, trainAUC: 0.568 calibrationAUC: 0.553"
## [1] "predVar73, trainAUC: 0.608 calibrationAUC: 0.616"
## [1] "predVar74, trainAUC: 0.574 calibrationAUC: 0.566"
## [1] "predVar81, trainAUC: 0.558 calibrationAUC: 0.542"
## [1] "predVar113, trainAUC: 0.557 calibrationAUC: 0.567"
## [1] "predVar126, trainAUC: 0.635 calibrationAUC: 0.629"
## [1] "predVar140, trainAUC: 0.561 calibrationAUC: 0.560"
## [1] "predVar189, trainAUC: 0.574 calibrationAUC: 0.599"
# example 6.8 of section 6.2.2
# (example 6.8 of section 6.2.2) : Memorization methods : Building single-variable models : Using numeric features
# Title: Plotting variable performance
library('ggplot2')
ggplot(data=dCal) +
geom_density(aes(x=predVar126,color=as.factor(churn)))
# example 6.9 of section 6.2.3
# (example 6.9 of section 6.2.3) : Memorization methods : Building single-variable models : Using cross-validation to estimate effects of overfitting
# Title: Running a repeated cross-validation experiment
var <- 'Var217'
aucs <- rep(0,100)
for(rep in 1:length(aucs)) { # Note: 1
useForCalRep <- rbinom(n=dim(dTrainAll)[[1]],size=1,prob=0.1)>0 # Note: 2
predRep <- mkPredC(dTrainAll[!useForCalRep,outcome], # Note: 3
dTrainAll[!useForCalRep,var],
dTrainAll[useForCalRep,var])
aucs[rep] <- calcAUC(predRep,dTrainAll[useForCalRep,outcome]) # Note: 4
}
mean(aucs)
## [1] 0.5556656
sd(aucs)
## [1] 0.01569345
# Note 1:
# For 100 iterations...
# Note 2:
# ...select a random subset of about 10% of the training data as hold-out set,...
# Note 3:
# ...use the random 90% of training data to train model and evaluate that model on hold-out
# set,...
# Note 4:
# ...calculate resulting model’s AUC using hold-out set; store that value and repeat.
# example 6.10 of section 6.2.3
# (example 6.10 of section 6.2.3) : Memorization methods : Building single-variable models : Using cross-validation to estimate effects of overfitting
# Title: Empirically cross-validating performance
fCross <- function() {
useForCalRep <- rbinom(n=dim(dTrainAll)[[1]],size=1,prob=0.1)>0
predRep <- mkPredC(dTrainAll[!useForCalRep,outcome],
dTrainAll[!useForCalRep,var],
dTrainAll[useForCalRep,var])
calcAUC(predRep,dTrainAll[useForCalRep,outcome])
}
aucs <- replicate(100,fCross())
# example 6.11 of section 6.3.1
# (example 6.11 of section 6.3.1) : Memorization methods : Building models using many variables : Variable selection
# Title: Basic variable selection
# Each variable we use represents a chance of explaining
# more of the outcome variation (a chance of building a better
# model) but also represents a possible source of noise and
# overfitting. To control this effect, we often preselect
# which subset of variables we’ll use to fit. Variable
# selection can be an important defensive modeling step even
# for types of models that “don’t need it” (as seen with
# decision trees in section 6.3.2). Listing 6.11 shows a
# hand-rolled variable selection loop where each variable is
# scored according to a deviance inspired score, where a
# variable is scored with a bonus proportional to the change
# in in scaled log likelihood of the training data. We could
# also try an AIC (Akaike information criterion) by
# subtracting a penalty proportional to the complexity of the
# variable (which in this case is 2^entropy for categorical
# variables and a stand-in of 1 for numeric variables). The
# score is a bit ad hoc, but tends to work well in selecting
# variables. Notice we’re using performance on the calibration
# set (not the training set) to pick variables. Note that we
# don’t use the test set for calibration; to do so lessens the
# reliability of the test set for model quality confirmation.
logLikelyhood <- function(outCol,predCol) { # Note: 1
sum(ifelse(outCol==pos,log(predCol),log(1-predCol)))
}
selVars <- c()
minStep <- 5
baseRateCheck <- logLikelyhood(dCal[,outcome],
sum(dCal[,outcome]==pos)/length(dCal[,outcome]))
for(v in catVars) { # Note: 2
pi <- paste('pred',v,sep='')
liCheck <- 2*((logLikelyhood(dCal[,outcome],dCal[,pi]) -
baseRateCheck))
if(liCheck>minStep) {
print(sprintf("%s, calibrationScore: %g",
pi,liCheck))
selVars <- c(selVars,pi)
}
}
for(v in numericVars) { # Note: 3
pi <- paste('pred',v,sep='')
liCheck <- 2*((logLikelyhood(dCal[,outcome],dCal[,pi]) -
baseRateCheck))
if(liCheck>=minStep) {
print(sprintf("%s, calibrationScore: %g",
pi,liCheck))
selVars <- c(selVars,pi)
}
}
# Note 1:
# Define a convenience function to compute log
# likelihood.
# Note 2:
# Run through categorical variables and pick
# based on a deviance improvement (related to
# difference in log likelihoods; see chapter
# 3).
# Note 3:
# Run through numeric variables and pick
# based on a deviance improvement.
# example 6.13 of section 6.3.2
# (example 6.13 of section 6.3.2) : Memorization methods : Building models using many variables : Using decision trees
# Title: Building a bad decision tree
library('rpart')
fV <- paste(outcome,'>0 ~ ',
paste(c(catVars,numericVars),collapse=' + '),sep='')
tmodel <- rpart(fV,data=dTrain)
print(calcAUC(predict(tmodel,newdata=dTrain),dTrain[,outcome]))
## [1] 0.9241265
print(calcAUC(predict(tmodel,newdata=dTest),dTest[,outcome]))
## [1] 0.5266172
print(calcAUC(predict(tmodel,newdata=dCal),dCal[,outcome]))
## [1] 0.5126917
# example 6.14 of section 6.3.2
# (example 6.14 of section 6.3.2) : Memorization methods : Building models using many variables : Using decision trees
# Title: Building another bad decision tree
tVars <- paste('pred',c(catVars,numericVars),sep='')
fV2 <- paste(outcome,'>0 ~ ',paste(tVars,collapse=' + '),sep='')
tmodel <- rpart(fV2,data=dTrain)
print(calcAUC(predict(tmodel,newdata=dTrain),dTrain[,outcome]))
## [1] 0.928669
print(calcAUC(predict(tmodel,newdata=dTest),dTest[,outcome]))
## [1] 0.5390648
print(calcAUC(predict(tmodel,newdata=dCal),dCal[,outcome]))
## [1] 0.5384152
# example 6.15 of section 6.3.2
# (example 6.15 of section 6.3.2) : Memorization methods : Building models using many variables : Using decision trees
# Title: Building yet another bad decision tree
tmodel <- rpart(fV2,data=dTrain,
control=rpart.control(cp=0.001,minsplit=1000,
minbucket=1000,maxdepth=5)
)
print(calcAUC(predict(tmodel,newdata=dTrain),dTrain[,outcome]))
## [1] 0.9421195
print(calcAUC(predict(tmodel,newdata=dTest),dTest[,outcome]))
## [1] 0.5794633
print(calcAUC(predict(tmodel,newdata=dCal),dCal[,outcome]))
## [1] 0.547967
# example 6.16 of section 6.3.2
# (example 6.16 of section 6.3.2) : Memorization methods : Building models using many variables : Using decision trees
# Title: Building a better decision tree
f <- paste(outcome,'>0 ~ ',paste(selVars,collapse=' + '),sep='')
tmodel <- rpart(f,data=dTrain,
control=rpart.control(cp=0.001,minsplit=1000,
minbucket=1000,maxdepth=5)
)
print(calcAUC(predict(tmodel,newdata=dTrain),dTrain[,outcome]))
## [1] 0.6906852
print(calcAUC(predict(tmodel,newdata=dTest),dTest[,outcome]))
## [1] 0.6843595
print(calcAUC(predict(tmodel,newdata=dCal),dCal[,outcome]))
## [1] 0.6669301
# example 6.17 of section 6.3.2
# (example 6.17 of section 6.3.2) : Memorization methods : Building models using many variables : Using decision trees
# Title: Printing the decision tree
print(tmodel)
## n= 40518
##
## node), split, n, deviance, yval
## * denotes terminal node
##
## 1) root 40518 2769.3550 0.07379436
## 2) predVar126< 0.07366888 18188 726.4097 0.04167583
## 4) predVar126< 0.04391312 8804 189.7251 0.02203544 *
## 5) predVar126>=0.04391312 9384 530.1023 0.06010230
## 10) predVar189< 0.08449448 8317 410.4571 0.05206204 *
## 11) predVar189>=0.08449448 1067 114.9166 0.12277410 *
## 3) predVar126>=0.07366888 22330 2008.9000 0.09995522
## 6) predVar212< 0.07944508 8386 484.2499 0.06153112
## 12) predVar73< 0.06813291 4084 167.5012 0.04285015 *
## 13) predVar73>=0.06813291 4302 313.9705 0.07926546 *
## 7) predVar212>=0.07944508 13944 1504.8230 0.12306370
## 14) predVar218< 0.07134103 6728 580.7390 0.09542212
## 28) predVar126< 0.1015407 3901 271.8426 0.07536529 *
## 29) predVar126>=0.1015407 2827 305.1617 0.12309870
## 58) predVar73< 0.07804522 1452 110.0826 0.08264463 *
## 59) predVar73>=0.07804522 1375 190.1935 0.16581820 *
## 15) predVar218>=0.07134103 7216 914.1502 0.14883590
## 30) predVar74< 0.0797246 2579 239.3579 0.10352850 *
## 31) predVar74>=0.0797246 4637 666.5538 0.17403490
## 62) predVar189< 0.06775545 1031 102.9486 0.11251210 *
## 63) predVar189>=0.06775545 3606 558.5871 0.19162510 *
# example 6.18 of section 6.3.2
# (example 6.18 of section 6.3.2) : Memorization methods : Building models using many variables : Using decision trees
# Title: Plotting the decision tree
par(cex=0.7)
plot(tmodel)
text(tmodel)
# example 6.19 of section 6.3.3
# (example 6.19 of section 6.3.3) : Memorization methods : Building models using many variables : Using nearest neighbor methods
# Title: Running k-nearest neighbors
library('class')
nK <- 200
knnTrain <- dTrain[,selVars] # Note: 1
knnCl <- dTrain[,outcome]==pos # Note: 2
knnPred <- function(df) { # Note: 3
knnDecision <- knn(knnTrain,df,knnCl,k=nK,prob=T)
ifelse(knnDecision==TRUE, # Note: 4
attributes(knnDecision)$prob,
1-(attributes(knnDecision)$prob))
}
print(calcAUC(knnPred(dTrain[,selVars]),dTrain[,outcome]))
## [1] 0.7443927
print(calcAUC(knnPred(dCal[,selVars]),dCal[,outcome]))
## [1] 0.7119394
print(calcAUC(knnPred(dTest[,selVars]),dTest[,outcome]))
## [1] 0.718256
# Note 1:
# Build a data frame with only the variables we
# wish to use for classification.
# Note 2:
# Build a vector with the known training
# outcomes.
# Note 3:
# Bind the knn() training function with our data
# in a new function.
# Note 4:
# Convert knn’s unfortunate convention of
# calculating probability as “proportion of the
# votes for the winning class” into the more useful
# “calculated probability of being a positive
# example.”
# example 6.20 of section 6.3.3
# (example 6.20 of section 6.3.3) : Memorization methods : Building models using many variables : Using nearest neighbor methods
# Title: Platting 200-nearest neighbor performance
dCal$kpred <- knnPred(dCal[,selVars])
ggplot(data=dCal) +
geom_density(aes(x=kpred,
color=as.factor(churn),linetype=as.factor(churn)))
# example 6.21 of section 6.3.3
# (example 6.21 of section 6.3.3) : Memorization methods : Building models using many variables : Using nearest neighbor methods
# Title: Plotting the receiver operating characteristic curve
plotROC <- function(predcol,outcol) {
perf <- performance(prediction(predcol,outcol==pos),'tpr','fpr')
pf <- data.frame(
FalsePositiveRate=perf@x.values[[1]],
TruePositiveRate=perf@y.values[[1]])
ggplot() +
geom_line(data=pf,aes(x=FalsePositiveRate,y=TruePositiveRate)) +
geom_line(aes(x=c(0,1),y=c(0,1)))
}
print(plotROC(knnPred(dTest[,selVars]),dTest[,outcome]))
# example 6.22 of section 6.3.3
# (example 6.22 of section 6.3.3) : Memorization methods : Building models using many variables : Using nearest neighbor methods
# Title: Plotting the performance of a logistic regression model
gmodel <- glm(as.formula(f),data=dTrain,family=binomial(link='logit'))
print(calcAUC(predict(gmodel,newdata=dTrain),dTrain[,outcome]))
## [1] 0.7309537
print(calcAUC(predict(gmodel,newdata=dTest),dTest[,outcome]))
## [1] 0.7234645
print(calcAUC(predict(gmodel,newdata=dCal),dCal[,outcome]))
## [1] 0.7170824
# example 6.23 of section 6.3.4
# (example 6.23 of section 6.3.4) : Memorization methods : Building models using many variables : Using Naive Bayes
# Title: Building, applying, and evaluating a Naive Bayes model
pPos <- sum(dTrain[,outcome]==pos)/length(dTrain[,outcome])
nBayes <- function(pPos,pf) { # Note: 1
pNeg <- 1 - pPos
smoothingEpsilon <- 1.0e-5
scorePos <- log(pPos + smoothingEpsilon) +
rowSums(log(pf/pPos + smoothingEpsilon)) # Note: 2
scoreNeg <- log(pNeg + smoothingEpsilon) +
rowSums(log((1-pf)/(1-pPos) + smoothingEpsilon)) # Note: 3
m <- pmax(scorePos,scoreNeg)
expScorePos <- exp(scorePos-m)
expScoreNeg <- exp(scoreNeg-m) # Note: 4
expScorePos/(expScorePos+expScoreNeg) # Note: 5
}
pVars <- paste('pred',c(numericVars,catVars),sep='')
dTrain$nbpredl <- nBayes(pPos,dTrain[,pVars])
dCal$nbpredl <- nBayes(pPos,dCal[,pVars])
dTest$nbpredl <- nBayes(pPos,dTest[,pVars]) # Note: 6
print(calcAUC(dTrain$nbpredl,dTrain[,outcome]))
## [1] 0.9757348
print(calcAUC(dCal$nbpredl,dCal[,outcome]))
## [1] 0.5995206
print(calcAUC(dTest$nbpredl,dTest[,outcome]))
## [1] 0.5956515 # Note: 7
# Note 1:
# Define a function that performs the Naive
# Bayes prediction.
# Note 2:
# For each row, compute (with a smoothing term)
# the sum of log(P[positive &
# evidence_i]/P[positive]) across all columns. This
# is equivalent to the log of the product of
# P[evidence_i | positive] up to terms that don’t
# depend on the positive/negative outcome.
# Note 3:
# For each row, compute (with a smoothing term)
# the sum of log(P[negative &
# evidence_i]/P[negative]) across all columns. This
# is equivalent to the log of the product of
# P[evidence_i | negative] up to terms that don’t
# depend on the positive/negative outcome.
# Note 4:
# Exponentiate to turn sums back into products,
# but make sure we don’t cause a floating point
# overflow in doing so.
# Note 5:
# Use the fact that the predicted positive
# probability plus the predicted negative
# probability should sum to 1.0 to find and
# eliminate Z. Return the correctly scaled predicted
# odds of being positive as our forecast.
# Note 6:
# Apply the function to make the predictions.
# Note 7:
# Calculate the AUCs. Notice the
# overfit—fantastic performance on the training
# set that isn’t repeated on the calibration or test
# sets.
# example 6.24 of section 6.3.4
# (example 6.24 of section 6.3.4) : Memorization methods : Building models using many variables : Using Naive Bayes
# Title: Using a Naive Bayes package
library('e1071')
lVars <- c(catVars,numericVars)
ff <- paste('as.factor(',outcome,'>0) ~ ',
paste(lVars,collapse=' + '),sep='')
nbmodel <- naiveBayes(as.formula(ff),data=dTrain)
dTrain$nbpred <- predict(nbmodel,newdata=dTrain,type='raw')[,'TRUE']
dCal$nbpred <- predict(nbmodel,newdata=dCal,type='raw')[,'TRUE']
dTest$nbpred <- predict(nbmodel,newdata=dTest,type='raw')[,'TRUE']
calcAUC(dTrain$nbpred,dTrain[,outcome])
## [1] 0.4643591
calcAUC(dCal$nbpred,dCal[,outcome])
## [1] 0.5544484
calcAUC(dTest$nbpred,dTest[,outcome])
## [1] 0.5679519
# example 7.1 of section 7.1.1
# (example 7.1 of section 7.1.1) : Linear and logistic regression : Using linear regression : Understanding linear regression
# Title: Loading the PUMS data
load("psub.RData")
dtrain <- subset(psub,ORIGRANDGROUP >= 500)
dtest <- subset(psub,ORIGRANDGROUP < 500)
model <- lm(log(PINCP,base=10) ~ AGEP + SEX + COW + SCHL,data=dtrain)
dtest$predLogPINCP <- predict(model,newdata=dtest)
dtrain$predLogPINCP <- predict(model,newdata=dtrain)
# example 7.2 of section 7.1.3
# (example 7.2 of section 7.1.3) : Linear and logistic regression : Using linear regression : Making predictions
# Title: Plotting log income as a function of predicted log income
library('ggplot2')
ggplot(data=dtest,aes(x=predLogPINCP,y=log(PINCP,base=10))) +
geom_point(alpha=0.2,color="black") +
geom_smooth(aes(x=predLogPINCP,
y=log(PINCP,base=10)),color="black") +
geom_line(aes(x=log(PINCP,base=10),
y=log(PINCP,base=10)),color="blue",linetype=2) +
scale_x_continuous(limits=c(4,5)) +
scale_y_continuous(limits=c(3.5,5.5))
# example 7.3 of section 7.1.3
# (example 7.3 of section 7.1.3) : Linear and logistic regression : Using linear regression : Making predictions
# Title: Plotting residuals income as a function of predicted log income
ggplot(data=dtest,aes(x=predLogPINCP,
y=predLogPINCP-log(PINCP,base=10))) +
geom_point(alpha=0.2,color="black") +
geom_smooth(aes(x=predLogPINCP,
y=predLogPINCP-log(PINCP,base=10)),
color="black")
# example 7.4 of section 7.1.3
# (example 7.4 of section 7.1.3) : Linear and logistic regression : Using linear regression : Making predictions
# Title: Computing R-squared
rsq <- function(y,f) { 1 - sum((y-f)^2)/sum((y-mean(y))^2) }
rsq(log(dtrain$PINCP,base=10),predict(model,newdata=dtrain))
rsq(log(dtest$PINCP,base=10),predict(model,newdata=dtest))
# example 7.5 of section 7.1.3
# (example 7.5 of section 7.1.3) : Linear and logistic regression : Using linear regression : Making predictions
# Title: Calculating root mean square error
rmse <- function(y, f) { sqrt(mean( (y-f)^2 )) }
rmse(log(dtrain$PINCP,base=10),predict(model,newdata=dtrain))
rmse(log(dtest$PINCP,base=10),predict(model,newdata=dtest))
# example 7.6 of section 7.1.5
# (example 7.6 of section 7.1.5) : Linear and logistic regression : Using linear regression : Reading the model summary and characterizing coefficient quality
# Title: Summarizing residuals
summary(log(dtrain$PINCP,base=10) - predict(model,newdata=dtrain))
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## -1.29200 -0.14150 0.02458 0.00000 0.17630 0.62530
summary(log(dtest$PINCP,base=10) - predict(model,newdata=dtest))
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## -1.494000 -0.165300 0.018920 -0.004637 0.175500 0.868100
# informalexample 7.9 of section 7.1.5
# (informalexample 7.9 of section 7.1.5) : Linear and logistic regression : Using linear regression : Reading the model summary and characterizing coefficient quality
df <- dim(dtrain)[1] - dim(summary(model)$coefficients)[1]
# informalexample 7.10 of section 7.1.5
# (informalexample 7.10 of section 7.1.5) : Linear and logistic regression : Using linear regression : Reading the model summary and characterizing coefficient quality
modelResidualError <- sqrt(sum(residuals(model)^2)/df)
# example 7.7 of section 7.2.1
# (example 7.7 of section 7.2.1) : Linear and logistic regression : Using logistic regression : Understanding logistic regression
# Title: Loading the CDC data
load("NatalRiskData.rData")
train <- sdata[sdata$ORIGRANDGROUP<=5,]
test <- sdata[sdata$ORIGRANDGROUP>5,]
# example 7.8 of section 7.2.2
# (example 7.8 of section 7.2.2) : Linear and logistic regression : Using logistic regression : Building a logistic regression model
# Title: Building the model formula
complications <- c("ULD_MECO","ULD_PRECIP","ULD_BREECH")
riskfactors <- c("URF_DIAB", "URF_CHYPER", "URF_PHYPER",
"URF_ECLAM")
y <- "atRisk"
x <- c("PWGT",
"UPREVIS",
"CIG_REC",
"GESTREC3",
"DPLURAL",
complications,
riskfactors)
fmla <- paste(y, paste(x, collapse="+"), sep="~")
# example 7.9 of section 7.2.2
# (example 7.9 of section 7.2.2) : Linear and logistic regression : Using logistic regression : Building a logistic regression model
# Title: Fitting the logistic regression model
print(fmla)
## [1] "atRisk ~ PWGT+UPREVIS+CIG_REC+GESTREC3+DPLURAL+ULD_MECO+ULD_PRECIP+
## ULD_BREECH+URF_DIAB+URF_CHYPER+URF_PHYPER+URF_ECLAM"
model <- glm(fmla, data=train, family=binomial(link="logit"))
# example 7.10 of section 7.2.3
# (example 7.10 of section 7.2.3) : Linear and logistic regression : Using logistic regression : Making predictions
# Title: Applying the logistic regression model
train$pred <- predict(model, newdata=train, type="response")
test$pred <- predict(model, newdata=test, type="response")
# example 7.11 of section 7.2.3
# (example 7.11 of section 7.2.3) : Linear and logistic regression : Using logistic regression : Making predictions
# Title: Plotting distribution of prediction score grouped by known outcome
library('ggplot2')
ggplot(train, aes(x=pred, color=atRisk, linetype=atRisk)) +
geom_density()
# example 7.12 of section 7.2.3
# (example 7.12 of section 7.2.3) : Linear and logistic regression : Using logistic regression : Making predictions
# Title: Exploring modeling trade-offs
library(ROCR) # Note: 1
library(grid) # Note: 2
predObj <- prediction(train$pred, train$atRisk) # Note: 3
precObj <- performance(predObj, measure="prec") # Note: 4
recObj <- performance(predObj, measure="rec") # Note: 5
precision <- (precObj@y.values)[[1]] # Note: 6
prec.x <- (precObj@x.values)[[1]] # Note: 7
recall <- (recObj@y.values)[[1]]
rocFrame <- data.frame(threshold=prec.x, precision=precision,
recall=recall) # Note: 8
nplot <- function(plist) { # Note: 9
n <- length(plist)
grid.newpage()
pushViewport(viewport(layout=grid.layout(n,1)))
vplayout=function(x,y) {viewport(layout.pos.row=x, layout.pos.col=y)}
for(i in 1:n) {
print(plist[[i]], vp=vplayout(i,1))
}
}
pnull <- mean(as.numeric(train$atRisk)) # Note: 10
p1 <- ggplot(rocFrame, aes(x=threshold)) + # Note: 11
geom_line(aes(y=precision/pnull)) +
coord_cartesian(xlim = c(0,0.05), ylim=c(0,10) )
p2 <- ggplot(rocFrame, aes(x=threshold)) + # Note: 12
geom_line(aes(y=recall)) +
coord_cartesian(xlim = c(0,0.05) )
nplot(list(p1, p2)) # Note: 13
# Note 1:
# Load ROCR library.
# Note 2:
# Load grid library (you’ll need this for the
# nplot function below).
# Note 3:
# Create ROCR prediction object.
# Note 4:
# Create ROCR object to calculate precision as
# a function of threshold.
# Note 5:
# Create ROCR object to calculate recall as a
# function of threshold.
# Note 6:
# at ( @ ) symbol@ (at) symbolROCR objects are what R calls S4 objects;
# the slots (or fields) of an S4 object are stored
# as lists within the object. You extract the slots
# from an S4 object using @ notation.
# Note 7:
# The x values (thresholds) are the same in
# both predObj and recObj, so you only need to
# extract them once.
# Note 8:
# Build data frame with thresholds, precision,
# and recall.
# Note 9:
# Function to plot multiple plots on one page
# (stacked).
# Note 10:
# Calculate rate of at-risk births in the
# training set.
# Note 11:
# Plot enrichment rate as a function of
# threshold.
# Note 12:
# Plot recall as a function of
# threshold.
# Note 13:
# Show both plots simultaneously.
# example 7.13 of section 7.2.3
# (example 7.13 of section 7.2.3) : Linear and logistic regression : Using logistic regression : Making predictions
# Title: Evaluating our chosen model
ctab.test <- table(pred=test$pred>0.02, atRisk=test$atRisk) # Note: 1
ctab.test # Note: 2
## atRisk
## pred FALSE TRUE
## FALSE 9487 93
## TRUE 2405 116
precision <- ctab.test[2,2]/sum(ctab.test[2,])
precision
## [1] 0.04601349
recall <- ctab.test[2,2]/sum(ctab.test[,2])
recall
## [1] 0.5550239
enrich <- precision/mean(as.numeric(test$atRisk))
enrich
## [1] 2.664159
# Note 1:
# Build confusion matrix.
# Note 2:
# Rows contain predicted negatives and
# positives; columns contain actual negatives and
# positives.
# example 7.14 of section 7.2.4
# (example 7.14 of section 7.2.4) : Linear and logistic regression : Using logistic regression : Finding relations and extracting advice from logistic models
# Title: The model coefficients
coefficients(model)
## (Intercept) PWGT
## -4.41218940 0.00376166
## UPREVIS CIG_RECTRUE
## -0.06328943 0.31316930
## GESTREC3< 37 weeks DPLURALtriplet or higher
## 1.54518311 1.39419294
## DPLURALtwin ULD_MECOTRUE
## 0.31231871 0.81842627
## ULD_PRECIPTRUE ULD_BREECHTRUE
## 0.19172008 0.74923672
## URF_DIABTRUE URF_CHYPERTRUE
## -0.34646672 0.56002503
## URF_PHYPERTRUE URF_ECLAMTRUE
## 0.16159872 0.49806435
# example 7.15 of section 7.2.5
# (example 7.15 of section 7.2.5) : Linear and logistic regression : Using logistic regression : Reading the model summary and characterizing coefficients
# Title: The model summary
summary(model)
## Call:
## glm(formula = fmla, family = binomial(link = "logit"), data = train)
##
## Deviance Residuals:
## Min 1Q Median 3Q Max
## -0.9732 -0.1818 -0.1511 -0.1358 3.2641
##
## Coefficients:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) -4.412189 0.289352 -15.249 < 2e-16 ***
## PWGT 0.003762 0.001487 2.530 0.011417 *
## UPREVIS -0.063289 0.015252 -4.150 3.33e-05 ***
## CIG_RECTRUE 0.313169 0.187230 1.673 0.094398 .
## GESTREC3< 37 weeks 1.545183 0.140795 10.975 < 2e-16 ***
## DPLURALtriplet or higher 1.394193 0.498866 2.795 0.005194 **
## DPLURALtwin 0.312319 0.241088 1.295 0.195163
## ULD_MECOTRUE 0.818426 0.235798 3.471 0.000519 ***
## ULD_PRECIPTRUE 0.191720 0.357680 0.536 0.591951
## ULD_BREECHTRUE 0.749237 0.178129 4.206 2.60e-05 ***
## URF_DIABTRUE -0.346467 0.287514 -1.205 0.228187
## URF_CHYPERTRUE 0.560025 0.389678 1.437 0.150676
## URF_PHYPERTRUE 0.161599 0.250003 0.646 0.518029
## URF_ECLAMTRUE 0.498064 0.776948 0.641 0.521489
## ---
## Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
##
## (Dispersion parameter for binomial family taken to be 1)
##
## Null deviance: 2698.7 on 14211 degrees of freedom
## Residual deviance: 2463.0 on 14198 degrees of freedom
## AIC: 2491
##
## Number of Fisher Scoring iterations: 7
# example 7.16 of section 7.2.5
# (example 7.16 of section 7.2.5) : Linear and logistic regression : Using logistic regression : Reading the model summary and characterizing coefficients
# Title: Calculating deviance residuals
pred <- predict(model, newdata=train, type="response") # Note: 1
llcomponents <- function(y, py) { # Note: 2
y*log(py) + (1-y)*log(1-py)
}
edev <- sign(as.numeric(train$atRisk) - pred) * # Note: 3
sqrt(-2*llcomponents(as.numeric(train$atRisk), pred))
summary(edev)
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## -0.9732 -0.1818 -0.1511 -0.1244 -0.1358 3.2640
# Note 1:
# Create vector of predictions for training
# data.
# Note 2:
# Function to return the log likelihoods for
# each data point. Argument y is the true outcome
# (as a numeric variable, 0/1); argument py is the
# predicted probability.
# Note 3:
# Calculate deviance residuals.
# example 7.17 of section 7.2.5
# (example 7.17 of section 7.2.5) : Linear and logistic regression : Using logistic regression : Reading the model summary and characterizing coefficients
# Title: Computing deviance
loglikelihood <- function(y, py) { # Note: 1
sum(y * log(py) + (1-y)*log(1 - py))
}
pnull <- mean(as.numeric(train$atRisk)) # Note: 2
null.dev <- -2*loglikelihood(as.numeric(train$atRisk), pnull) # Note: 3
pnull
## [1] 0.01920912
null.dev
## [1] 2698.716
model$null.deviance # Note: 4
## [1] 2698.716
pred <- predict(model, newdata=train, type="response") # Note: 5
resid.dev <- -2*loglikelihood(as.numeric(train$atRisk), pred) # Note: 6
resid.dev
## [1] 2462.992
model$deviance # Note: 7
## [1] 2462.992
testy <- as.numeric(test$atRisk) # Note: 8
testpred <- predict(model, newdata=test,
type="response")
pnull.test <- mean(testy)
null.dev.test <- -2*loglikelihood(testy, pnull.test)
resid.dev.test <- -2*loglikelihood(testy, testpred)
pnull.test
## [1] 0.0172713
null.dev.test
## [1] 2110.91
resid.dev.test
## [1] 1947.094
# Note 1:
# Function to calculate the log likelihood of
# a dataset. Variable y is the outcome
# in numeric form (1 for positive examples, 0 for
# negative). Variable py is the
# predicted probability that
# y==1.
# Note 2:
# Calculate rate of positive examples in
# dataset.
# Note 3:
# Calculate null deviance.
# Note 4:
# For training data, the null deviance is
# stored in the slot model$null.deviance.
# Note 5:
# Predict probabilities for training
# data.
# Note 6:
# Calculate deviance of model for training
# data.
# Note 7:
# For training data, model deviance is stored
# in the slot model$deviance.
# Note 8:
# Calculate null deviance and residual
# deviance for test data.
# example 7.18 of section 7.2.5
# (example 7.18 of section 7.2.5) : Linear and logistic regression : Using logistic regression : Reading the model summary and characterizing coefficients
# Title: Calculating the significance of the observed fit
df.null <- dim(train)[[1]] - 1 # Note: 1
df.model <- dim(train)[[1]] - length(model$coefficients) # Note: 2
df.null
## [1] 14211
df.model
## [1] 14198
delDev <- null.dev - resid.dev # Note: 3
deldf <- df.null - df.model
p <- pchisq(delDev, deldf, lower.tail=F) # Note: 4
delDev
## [1] 235.724
deldf
## [1] 13
p
## [1] 5.84896e-43
# Note 1:
# Null model has (number of data points - 1)
# degrees of freedom.
# Note 2:
# Fitted model has (number of data points -
# number of coefficients) degrees of freedom.
# Note 3:
# Compute difference in deviances and
# difference in degrees of freedom.
# Note 4:
# Estimate probability of seeing the observed
# difference in deviances under null model (the
# p-value) using chi-squared distribution.
# example 7.19 of section 7.2.5
# (example 7.19 of section 7.2.5) : Linear and logistic regression : Using logistic regression : Reading the model summary and characterizing coefficients
# Title: Calculating the pseudo R-squared
pr2 <- 1-(resid.dev/null.dev)
print(pr2)
## [1] 0.08734674
pr2.test <- 1-(resid.dev.test/null.dev.test)
print(pr2.test)
## [1] 0.07760427
# example 7.20 of section 7.2.5
# (example 7.20 of section 7.2.5) : Linear and logistic regression : Using logistic regression : Reading the model summary and characterizing coefficients
# Title: Calculating the Akaike information criterion
aic <- 2*(length(model$coefficients) -
loglikelihood(as.numeric(train$atRisk), pred))
aic
## [1] 2490.992
# example 8.1 of section 8.1.2
# (example 8.1 of section 8.1.2) : Unsupervised methods : Cluster analysis : Preparing the data
# Title: Reading the protein data
protein <- read.table("protein.txt", sep="\t", header=TRUE)
summary(protein)
## Country RedMeat WhiteMeat Eggs
## Albania : 1 Min. : 4.400 Min. : 1.400 Min. :0.500
## Austria : 1 1st Qu.: 7.800 1st Qu.: 4.900 1st Qu.:2.700
## Belgium : 1 Median : 9.500 Median : 7.800 Median :2.900
## Bulgaria : 1 Mean : 9.828 Mean : 7.896 Mean :2.936
## Czechoslovakia: 1 3rd Qu.:10.600 3rd Qu.:10.800 3rd Qu.:3.700
## Denmark : 1 Max. :18.000 Max. :14.000 Max. :4.700
## (Other) :19
## Milk Fish Cereals Starch
## Min. : 4.90 Min. : 0.200 Min. :18.60 Min. :0.600
## 1st Qu.:11.10 1st Qu.: 2.100 1st Qu.:24.30 1st Qu.:3.100
## Median :17.60 Median : 3.400 Median :28.00 Median :4.700
## Mean :17.11 Mean : 4.284 Mean :32.25 Mean :4.276
## 3rd Qu.:23.30 3rd Qu.: 5.800 3rd Qu.:40.10 3rd Qu.:5.700
## Max. :33.70 Max. :14.200 Max. :56.70 Max. :6.500
##
## Nuts Fr.Veg
## Min. :0.700 Min. :1.400
## 1st Qu.:1.500 1st Qu.:2.900
## Median :2.400 Median :3.800
## Mean :3.072 Mean :4.136
## 3rd Qu.:4.700 3rd Qu.:4.900
## Max. :7.800 Max. :7.900
# example 8.2 of section 8.1.2
# (example 8.2 of section 8.1.2) : Unsupervised methods : Cluster analysis : Preparing the data
# Title: Rescaling the dataset
vars.to.use <- colnames(protein)[-1] # Note: 1
pmatrix <- scale(protein[,vars.to.use]) # Note: 2
pcenter <- attr(pmatrix, "scaled:center") # Note: 3
pscale <- attr(pmatrix, "scaled:scale")
# Note 1:
# Use all the columns except the first
# (Country).
# Note 2:
# The output of scale() is a matrix. For the
# purposes of this chapter, you can think of a
# matrix as a data frame with all numeric columns
# (this isn’t strictly true, but it’s close enough).
# Note 3:
# The scale() function annotates its output
# with two attributes—scaled:center returns the mean
# values of all the columns, and scaled:scale
# returns the standard deviations. You’ll store
# these away so you can “unscale” the data
# later.
# example 8.3 of section 8.1.3
# (example 8.3 of section 8.1.3) : Unsupervised methods : Cluster analysis : Hierarchical clustering with hclust
# Title: Hierarchical clustering
d <- dist(pmatrix, method="euclidean") # Note: 1
pfit <- hclust(d, method="ward.D") # Note: 2
plot(pfit, labels=protein$Country) # Note: 3
# Note 1:
# Create the distance matrix.
# Note 2:
# Do the clustering.
# Note 3:
# Plot the dendrogram.
# informalexample 8.5 of section 8.1.3
# (informalexample 8.5 of section 8.1.3) : Unsupervised methods : Cluster analysis : Hierarchical clustering with hclust
rect.hclust(pfit, k=5)
# example 8.4 of section 8.1.3
# (example 8.4 of section 8.1.3) : Unsupervised methods : Cluster analysis : Hierarchical clustering with hclust
# Title: Extracting the clusters found by hclust()
groups <- cutree(pfit, k=5)
print_clusters <- function(labels, k) { # Note: 1
for(i in 1:k) {
print(paste("cluster", i))
print(protein[labels==i,c("Country","RedMeat","Fish","Fr.Veg")])
}
}
print_clusters(groups, 5)
## [1] "cluster 1"
## Country RedMeat Fish Fr.Veg
## 1 Albania 10.1 0.2 1.7
## 4 Bulgaria 7.8 1.2 4.2
## 18 Romania 6.2 1.0 2.8
## 25 Yugoslavia 4.4 0.6 3.2
## [1] "cluster 2"
## Country RedMeat Fish Fr.Veg
## 2 Austria 8.9 2.1 4.3
## 3 Belgium 13.5 4.5 4.0
## 9 France 18.0 5.7 6.5
## 12 Ireland 13.9 2.2 2.9
## 14 Netherlands 9.5 2.5 3.7
## 21 Switzerland 13.1 2.3 4.9
## 22 UK 17.4 4.3 3.3
## 24 W Germany 11.4 3.4 3.8
## [1] "cluster 3"
## Country RedMeat Fish Fr.Veg
## 5 Czechoslovakia 9.7 2.0 4.0
## 7 E Germany 8.4 5.4 3.6
## 11 Hungary 5.3 0.3 4.2
## 16 Poland 6.9 3.0 6.6
## 23 USSR 9.3 3.0 2.9
## [1] "cluster 4"
## Country RedMeat Fish Fr.Veg
## 6 Denmark 10.6 9.9 2.4
## 8 Finland 9.5 5.8 1.4
## 15 Norway 9.4 9.7 2.7
## 20 Sweden 9.9 7.5 2.0
## [1] "cluster 5"
## Country RedMeat Fish Fr.Veg
## 10 Greece 10.2 5.9 6.5
## 13 Italy 9.0 3.4 6.7
## 17 Portugal 6.2 14.2 7.9
## 19 Spain 7.1 7.0 7.2
# Note 1:
# A convenience function for printing out the
# countries in each cluster, along with the values
# for red meat, fish, and fruit/vegetable
# consumption. We’ll use this function throughout
# this section. Note that the function is hardcoded
# for the protein dataset.
# example 8.5 of section 8.1.3
# (example 8.5 of section 8.1.3) : Unsupervised methods : Cluster analysis : Hierarchical clustering with hclust
# Title: Projecting the clusters on the first two principal components
library(ggplot2)
princ <- prcomp(pmatrix) # Note: 1
nComp <- 2
project <- predict(princ, newdata=pmatrix)[,1:nComp] # Note: 2
project.plus <- cbind(as.data.frame(project), # Note: 3
cluster=as.factor(groups),
country=protein$Country)
ggplot(project.plus, aes(x=PC1, y=PC2)) + # Note: 4
geom_point(aes(shape=cluster)) +
geom_text(aes(label=country),
hjust=0, vjust=1)
# Note 1:
# Calculate the principal components of the
# data.
# Note 2:
# The predict() function will rotate the data
# into the space described by the principal
# components. We only want the projection on the
# first two axes.
# Note 3:
# Create a data frame with the transformed
# data, along with the cluster label and country
# label of each point.
# Note 4:
# Plot it.
# example 8.6 of section 8.1.3
# (example 8.6 of section 8.1.3) : Unsupervised methods : Cluster analysis : Hierarchical clustering with hclust
# Title: Running clusterboot() on the protein data
library(fpc) # Note: 1
kbest.p<-5 # Note: 2
cboot.hclust <- clusterboot(pmatrix,clustermethod=hclustCBI, # Note: 3
method="ward.D", k=kbest.p)
summary(cboot.hclust$result) # Note: 4
## Length Class Mode
## result 7 hclust list
## noise 1 -none- logical
## nc 1 -none- numeric
## clusterlist 5 -none- list
## partition 25 -none- numeric
## clustermethod 1 -none- character
## nccl 1 -none- numeric
groups<-cboot.hclust$result$partition # Note: 5
print_clusters(groups, kbest.p) # Note: 6
## [1] "cluster 1"
## Country RedMeat Fish Fr.Veg
## 1 Albania 10.1 0.2 1.7
## 4 Bulgaria 7.8 1.2 4.2
## 18 Romania 6.2 1.0 2.8
## 25 Yugoslavia 4.4 0.6 3.2
## [1] "cluster 2"
## Country RedMeat Fish Fr.Veg
## 2 Austria 8.9 2.1 4.3
## 3 Belgium 13.5 4.5 4.0
## 9 France 18.0 5.7 6.5
## 12 Ireland 13.9 2.2 2.9
## 14 Netherlands 9.5 2.5 3.7
## 21 Switzerland 13.1 2.3 4.9
## 22 UK 17.4 4.3 3.3
## 24 W Germany 11.4 3.4 3.8
## [1] "cluster 3"
## Country RedMeat Fish Fr.Veg
## 5 Czechoslovakia 9.7 2.0 4.0
## 7 E Germany 8.4 5.4 3.6
## 11 Hungary 5.3 0.3 4.2
## 16 Poland 6.9 3.0 6.6
## 23 USSR 9.3 3.0 2.9
## [1] "cluster 4"
## Country RedMeat Fish Fr.Veg
## 6 Denmark 10.6 9.9 2.4
## 8 Finland 9.5 5.8 1.4
## 15 Norway 9.4 9.7 2.7
## 20 Sweden 9.9 7.5 2.0
## [1] "cluster 5"
## Country RedMeat Fish Fr.Veg
## 10 Greece 10.2 5.9 6.5
## 13 Italy 9.0 3.4 6.7
## 17 Portugal 6.2 14.2 7.9
## 19 Spain 7.1 7.0 7.2
cboot.hclust$bootmean # Note: 7
## [1] 0.7905000 0.7990913 0.6173056 0.9312857 0.7560000
cboot.hclust$bootbrd # Note: 8
## [1] 25 11 47 8 35
# Note 1:
# Load the fpc package. You may have to
# install it first. We’ll discuss installing R
# packages in appendix .
# Note 2:
# Set the desired number of clusters.
# Note 3:
# Run clusterboot() with hclust
# ('clustermethod=hclustCBI') using Ward’s method
# ('method="ward.D"') and kbest.p clusters
# ('k=kbest.p'). Return the results in an object
# called cboot.hclust.
# Note 4:
# The results of the clustering are in
# cboot.hclust$result. The output of the hclust()
# function is in cboot.hclust$result$result.
# Note 5:
# cboot.hclust$result$partition returns a
# vector of clusterlabels.
# Note 6:
# The clusters are the same as those produced
# by a direct call to hclust().
# Note 7:
# The vector of cluster stabilities.
# Note 8:
# The count of how many times each cluster was
# dissolved. By default clusterboot() runs 100
# bootstrap iterations.
# example 8.7 of section 8.1.3
# (example 8.7 of section 8.1.3) : Unsupervised methods : Cluster analysis : Hierarchical clustering with hclust
# Title: Calculating total within sum of squares
sqr_edist <- function(x, y) { # Note: 1
sum((x-y)^2)
}
wss.cluster <- function(clustermat) { # Note: 2
c0 <- apply(clustermat, 2, FUN=mean) # Note: 3
sum(apply(clustermat, 1, FUN=function(row){sqr_edist(row,c0)})) # Note: 4
}
wss.total <- function(dmatrix, labels) { # Note: 5
wsstot <- 0
k <- length(unique(labels))
for(i in 1:k)
wsstot <- wsstot + wss.cluster(subset(dmatrix, labels==i)) # Note: 6
wsstot
}
# Note 1:
# Function to calculate squared distance
# between two vectors.
# Note 2:
# Function to calculate the WSS for a single
# cluster, which is represented as a matrix (one row
# for every point).
# Note 3:
# Calculate the centroid of the cluster (the
# mean of all the points).
# Note 4:
# Calculate the squared difference of every
# point in the cluster from the centroid, and sum
# all the distances.
# Note 5:
# Function to compute the total WSS from a set
# of data points and cluster labels.
# Note 6:
# Extract each cluster, calculate the
# cluster’s WSS, and sum all the values.
# example 8.8 of section 8.1.3
# (example 8.8 of section 8.1.3) : Unsupervised methods : Cluster analysis : Hierarchical clustering with hclust
# Title: The Calinski-Harabasz index
totss <- function(dmatrix) { # Note: 1
grandmean <- apply(dmatrix, 2, FUN=mean)
sum(apply(dmatrix, 1, FUN=function(row){sqr_edist(row, grandmean)}))
}
ch_criterion <- function(dmatrix, kmax, method="kmeans") { # Note: 2
if(!(method %in% c("kmeans", "hclust"))) {
stop("method must be one of c('kmeans', 'hclust')")
}
npts <- dim(dmatrix)[1] # number of rows.
totss <- totss(dmatrix) # Note: 3
wss <- numeric(kmax)
crit <- numeric(kmax)
wss[1] <- (npts-1)*sum(apply(dmatrix, 2, var)) # Note: 4
for(k in 2:kmax) { # Note: 5
if(method=="kmeans") {
clustering<-kmeans(dmatrix, k, nstart=10, iter.max=100)
wss[k] <- clustering$tot.withinss
}else { # hclust # Note: 6
d <- dist(dmatrix, method="euclidean")
pfit <- hclust(d, method="ward.D")
labels <- cutree(pfit, k=k)
wss[k] <- wss.total(dmatrix, labels)
}
}
bss <- totss - wss # Note: 7
crit.num <- bss/(0:(kmax-1)) # Note: 8
crit.denom <- wss/(npts - 1:kmax) # Note: 9
list(crit = crit.num/crit.denom, wss = wss, totss = totss) # Note: 10
}
# Note 1:
# Convenience function to calculate the total
# sum of squares.
# Note 2:
# A function to calculate the CH index for a
# number of clusters from 1 to kmax.
# Note 3:
# The total sum of squares is independent of
# the clustering.
# Note 4:
# Calculate WSS for k=1 (which is really just
# total sum of squares).
# Note 5:
# Calculate WSS for k from 2 to kmax. kmeans()
# returns the total WSS as one of its
# outputs.
# Note 6:
# For hclust(), calculate total WSS by
# hand.
# Note 7:
# Calculate BSS for k from 1 to kmax.
# Note 8:
# Normalize BSS by k-1.
# Note 9:
# Normalize WSS by npts - k.
# Note 10:
# Return a vector of CH indices and of WSS for
# k from 1 to kmax. Also return total sum of
# squares.
# example 8.9 of section 8.1.3
# (example 8.9 of section 8.1.3) : Unsupervised methods : Cluster analysis : Hierarchical clustering with hclust
# Title: Evaluating clusterings with different numbers of clusters
library(reshape2) # Note: 1
clustcrit <- ch_criterion(pmatrix, 10, method="hclust") # Note: 2
critframe <- data.frame(k=1:10, ch=scale(clustcrit$crit), # Note: 3
wss=scale(clustcrit$wss))
critframe <- melt(critframe, id.vars=c("k"), # Note: 4
variable.name="measure",
value.name="score")
ggplot(critframe, aes(x=k, y=score, color=measure)) + # Note: 5
geom_point(aes(shape=measure)) + geom_line(aes(linetype=measure)) +
scale_x_continuous(breaks=1:10, labels=1:10)
# Note 1:
# Load the reshape2 package (for the melt()
# function).
# Note 2:
# Calculate both criteria for 1–10
# clusters.
# Note 3:
# Create a data frame with the number of
# clusters, the CH criterion, and the WSS criterion.
# We’ll scale both the CH and WSS criteria to
# similar ranges so that we can plot them both on
# the same graph.
# Note 4:
# Use the melt() function to put the data
# frame in a shape suitable for ggplot
# Note 5:
# Plot it.
# example 8.10 of section 8.1.4
# (example 8.10 of section 8.1.4) : Unsupervised methods : Cluster analysis : The k-means algorithm
# Title: Running k-means with k=5
pclusters <- kmeans(pmatrix, kbest.p, nstart=100, iter.max=100) # Note: 1
summary(pclusters) # Note: 2
## Length Class Mode
## cluster 25 -none- numeric
## centers 45 -none- numeric
## totss 1 -none- numeric
## withinss 5 -none- numeric
## tot.withinss 1 -none- numeric
## betweenss 1 -none- numeric
## size 5 -none- numeric
pclusters$centers # Note: 3
## RedMeat WhiteMeat Eggs Milk Fish
## 1 -0.807569986 -0.8719354 -1.55330561 -1.0783324 -1.0386379
## 2 0.006572897 -0.2290150 0.19147892 1.3458748 1.1582546
## 3 -0.570049402 0.5803879 -0.08589708 -0.4604938 -0.4537795
## 4 1.011180399 0.7421332 0.94084150 0.5700581 -0.2671539
## 5 -0.508801956 -1.1088009 -0.41248496 -0.8320414 0.9819154
## Cereals Starch Nuts Fr.Veg
## 1 1.7200335 -1.4234267 0.9961313 -0.64360439
## 2 -0.8722721 0.1676780 -0.9553392 -1.11480485
## 3 0.3181839 0.7857609 -0.2679180 0.06873983
## 4 -0.6877583 0.2288743 -0.5083895 0.02161979
## 5 0.1300253 -0.1842010 1.3108846 1.62924487
pclusters$size # Note: 4
## [1] 4 4 5 8 4
groups <- pclusters$cluster # Note: 5
print_clusters(groups, kbest.p) # Note: 6
## [1] "cluster 1"
## Country RedMeat Fish Fr.Veg
## 1 Albania 10.1 0.2 1.7
## 4 Bulgaria 7.8 1.2 4.2
## 18 Romania 6.2 1.0 2.8
## 25 Yugoslavia 4.4 0.6 3.2
## [1] "cluster 2"
## Country RedMeat Fish Fr.Veg
## 6 Denmark 10.6 9.9 2.4
## 8 Finland 9.5 5.8 1.4
## 15 Norway 9.4 9.7 2.7
## 20 Sweden 9.9 7.5 2.0
## [1] "cluster 3"
## Country RedMeat Fish Fr.Veg
## 5 Czechoslovakia 9.7 2.0 4.0
## 7 E Germany 8.4 5.4 3.6
## 11 Hungary 5.3 0.3 4.2
## 16 Poland 6.9 3.0 6.6
## 23 USSR 9.3 3.0 2.9
## [1] "cluster 4"
## Country RedMeat Fish Fr.Veg
## 2 Austria 8.9 2.1 4.3
## 3 Belgium 13.5 4.5 4.0
## 9 France 18.0 5.7 6.5
## 12 Ireland 13.9 2.2 2.9
## 14 Netherlands 9.5 2.5 3.7
## 21 Switzerland 13.1 2.3 4.9
## 22 UK 17.4 4.3 3.3
## 24 W Germany 11.4 3.4 3.8
## [1] "cluster 5"
## Country RedMeat Fish Fr.Veg
## 10 Greece 10.2 5.9 6.5
## 13 Italy 9.0 3.4 6.7
## 17 Portugal 6.2 14.2 7.9
## 19 Spain 7.1 7.0 7.2
# Note 1:
# Run kmeans() with five clusters (kbest.p=5),
# 100 random starts, and 100 maximum iterations per
# run.
# Note 2:
# kmeans() returns all the sum of squares
# measures.
# Note 3:
# pclusters$centers is a matrix whose rows are
# the centroids of the clusters. Note that
# pclusters$centers is in the scaled coordinates,
# not the original protein coordinates.
# Note 4:
# pclusters$size returns the number of points
# in each cluster. Generally (though not always) a
# good clustering will be fairly well balanced: no
# extremely small clusters and no extremely large
# ones.
# Note 5:
# pclusters$cluster is a vector of cluster
# labels.
# Note 6:
# In this case, kmeans() and hclust() returned
# the same clustering. This won’t always be
# true.
# example 8.11 of section 8.1.4
# (example 8.11 of section 8.1.4) : Unsupervised methods : Cluster analysis : The k-means algorithm
# Title: Plotting cluster criteria
clustering.ch <- kmeansruns(pmatrix, krange=1:10, criterion="ch") # Note: 1
clustering.ch$bestk # Note: 2
## [1] 2
clustering.asw <- kmeansruns(pmatrix, krange=1:10, criterion="asw") # Note: 3
clustering.asw$bestk
## [1] 3
clustering.ch$crit # Note: 4
## [1] 0.000000 14.094814 11.417985 10.418801 10.011797 9.964967
## [7] 9.861682 9.412089 9.166676 9.075569
clustcrit$crit # Note: 5
## [1] NaN 12.215107 10.359587 9.690891 10.011797 9.964967
## [7] 9.506978 9.092065 8.822406 8.695065
critframe <- data.frame(k=1:10, ch=scale(clustering.ch$crit), # Note: 6
asw=scale(clustering.asw$crit))
critframe <- melt(critframe, id.vars=c("k"),
variable.name="measure",
value.name="score")
ggplot(critframe, aes(x=k, y=score, color=measure)) +
geom_point(aes(shape=measure)) + geom_line(aes(linetype=measure)) +
scale_x_continuous(breaks=1:10, labels=1:10)
summary(clustering.ch) # Note: 7
## Length Class Mode
## cluster 25 -none- numeric
## centers 18 -none- numeric
## totss 1 -none- numeric
## withinss 2 -none- numeric
## tot.withinss 1 -none- numeric
## betweenss 1 -none- numeric
## size 2 -none- numeric
## crit 10 -none- numeric
## bestk 1 -none- numeric
# Note 1:
# Run kmeansruns() from 1–10 clusters, and the
# CH criterion. By default, kmeansruns() uses 100
# random starts and 100 maximum iterations per
# run.
# Note 2:
# The CH criterion picks two clusters.
# Note 3:
# Run kmeansruns() from 1–10 clusters, and the
# average silhouette width criterion. Average
# silhouette width picks 3 clusters.
# Note 4:
# The vector of criterion values is called
# crit.
# Note 5:
# Compare the CH values for kmeans() and
# hclust(). They’re not quite the same, because the
# two algorithms didn’t pick the same
# clusters.
# Note 6:
# Plot the values for the two criteria.
# Note 7:
# kmeansruns() also returns the output of
# kmeans for k=bestk.
# example 8.12 of section 8.1.4
# (example 8.12 of section 8.1.4) : Unsupervised methods : Cluster analysis : The k-means algorithm
# Title: Running clusterboot() with k-means
kbest.p<-5
cboot<-clusterboot(pmatrix, clustermethod=kmeansCBI,
runs=100,iter.max=100,
krange=kbest.p, seed=15555) # Note: 1
groups <- cboot$result$partition
print_clusters(cboot$result$partition, kbest.p)
## [1] "cluster 1"
## Country RedMeat Fish Fr.Veg
## 1 Albania 10.1 0.2 1.7
## 4 Bulgaria 7.8 1.2 4.2
## 18 Romania 6.2 1.0 2.8
## 25 Yugoslavia 4.4 0.6 3.2
## [1] "cluster 2"
## Country RedMeat Fish Fr.Veg
## 6 Denmark 10.6 9.9 2.4
## 8 Finland 9.5 5.8 1.4
## 15 Norway 9.4 9.7 2.7
## 20 Sweden 9.9 7.5 2.0
## [1] "cluster 3"
## Country RedMeat Fish Fr.Veg
## 5 Czechoslovakia 9.7 2.0 4.0
## 7 E Germany 8.4 5.4 3.6
## 11 Hungary 5.3 0.3 4.2
## 16 Poland 6.9 3.0 6.6
## 23 USSR 9.3 3.0 2.9
## [1] "cluster 4"
## Country RedMeat Fish Fr.Veg
## 2 Austria 8.9 2.1 4.3
## 3 Belgium 13.5 4.5 4.0
## 9 France 18.0 5.7 6.5
## 12 Ireland 13.9 2.2 2.9
## 14 Netherlands 9.5 2.5 3.7
## 21 Switzerland 13.1 2.3 4.9
## 22 UK 17.4 4.3 3.3
## 24 W Germany 11.4 3.4 3.8
## [1] "cluster 5"
## Country RedMeat Fish Fr.Veg
## 10 Greece 10.2 5.9 6.5
## 13 Italy 9.0 3.4 6.7
## 17 Portugal 6.2 14.2 7.9
## 19 Spain 7.1 7.0 7.2
cboot$bootmean
## [1] 0.8670000 0.8420714 0.6147024 0.7647341 0.7508333
cboot$bootbrd
## [1] 15 20 49 17 32
# Note 1:
# We’ve set the seed for the random generator
# so the results are reproducible.
# example 8.13 of section 8.1.5
# (example 8.13 of section 8.1.5) : Unsupervised methods : Cluster analysis : Assigning new points to clusters
# Title: A function to assign points to a cluster
assign_cluster <- function(newpt, centers, xcenter=0, xscale=1) { # Note: 1
xpt <- (newpt - xcenter)/xscale # Note: 2
dists <- apply(centers, 1, FUN=function(c0){sqr_edist(c0, xpt)}) # Note: 3
which.min(dists) # Note: 4
}
# Note 1:
# A function to assign a new data point newpt to
# a clustering described by centers, a matrix where
# each row is a cluster centroid. If the data was
# scaled (using scale()) before clustering, then
# xcenter and xscale are the scaled:center and
# scaled:scale attributes, respectively.
# Note 2:
# Center and scale the new data point.
# Note 3:
# Calculate how far the new data point is from
# each of the cluster centers.
# Note 4:
# Return the cluster number of the closest
# centroid.
# example 8.14 of section 8.1.5
# (example 8.14 of section 8.1.5) : Unsupervised methods : Cluster analysis : Assigning new points to clusters
# Title: An example of assigning points to cluster
rnorm.multidim <- function(n, mean, sd, colstr="x") { # Note: 1
ndim <- length(mean)
data <- NULL
for(i in 1:ndim) {
col <- rnorm(n, mean=mean[[i]], sd=sd[[i]])
data<-cbind(data, col)
}
cnames <- paste(colstr, 1:ndim, sep='')
colnames(data) <- cnames
data
}
mean1 <- c(1, 1, 1) # Note: 2
sd1 <- c(1, 2, 1)
mean2 <- c(10, -3, 5)
sd2 <- c(2, 1, 2)
mean3 <- c(-5, -5, -5)
sd3 <- c(1.5, 2, 1)
clust1 <- rnorm.multidim(100, mean1, sd1) # Note: 3
clust2 <- rnorm.multidim(100, mean2, sd2)
clust3 <- rnorm.multidim(100, mean3, sd3)
toydata <- rbind(clust3, rbind(clust1, clust2))
tmatrix <- scale(toydata) # Note: 4
tcenter <- attr(tmatrix, "scaled:center") # Note: 5
tscale<-attr(tmatrix, "scaled:scale")
kbest.t <- 3
tclusters <- kmeans(tmatrix, kbest.t, nstart=100, iter.max=100) # Note: 6
tclusters$size # Note: 7
## [1] 100 101 99
unscale <- function(scaledpt, centervec, scalevec) { # Note: 8
scaledpt*scalevec + centervec
}
unscale(tclusters$centers[1,], tcenter, tscale) # Note: 9
## x1 x2 x3
## 9.978961 -3.097584 4.864689
mean2
## [1] 10 -3 5
unscale(tclusters$centers[2,], tcenter, tscale) # Note: 10
## x1 x2 x3
## -4.979523 -4.927404 -4.908949
mean3
## [1] -5 -5 -5
unscale(tclusters$centers[3,], tcenter, tscale) # Note: 11
## x1 x2 x3
## 1.0003356 1.3037825 0.9571058
mean1
## [1] 1 1 1
assign_cluster(rnorm.multidim(1, mean1, sd1), # Note: 12
tclusters$centers,
tcenter, tscale)
## 3 # Note: 13
## 3
assign_cluster(rnorm.multidim(1, mean2, sd1), # Note: 14
tclusters$centers,
tcenter, tscale)
## 1 # Note: 15
## 1
assign_cluster(rnorm.multidim(1, mean3, sd1), # Note: 16
tclusters$centers,
tcenter, tscale)
## 2 # Note: 17
## 2
# Note 1:
# A function to generate n points drawn from a
# multidimensional Gaussian distribution with
# centroid mean and standard deviation sd. The
# dimension of the distribution is given by the
# length of the vector mean.
# Note 2:
# The parameters for three Gaussian
# distributions.
# Note 3:
# Create a dataset with 100 points each drawn
# from the above distributions.
# Note 4:
# Scale the dataset.
# Note 5:
# Store the centering and scaling parameters for
# future use.
# Note 6:
# Cluster the dataset, using k-means with three
# clusters.
# Note 7:
# The resulting clusters are about the right
# size.
# Note 8:
# A function to “unscale” data points (put them
# back in the coordinates of the original
# dataset).
# Note 9:
# Unscale the first centroid. It corresponds to
# our original distribution 2.
# Note 10:
# The second centroid corresponds to the
# original distribution 3.
# Note 11:
# The third centroid corresponds to the original
# distribution 1.
# Note 12:
# Generate a random point from the original
# distribution 1 and assign it to one of the
# discovered clusters.
# Note 13:
# It’s assigned to cluster 3, as we would
# expect.
# Note 14:
# Generate a random point from the original
# distribution 2 and assign it.
# Note 15:
# It’s assigned to cluster 1.
# Note 16:
# Generate a random point from the original
# distribution 3 and assign it.
# Note 17:
# It’s assigned to cluster 2.
# example 8.15 of section 8.2.3
# (example 8.15 of section 8.2.3) : Unsupervised methods : Association rules : Mining association rules with the arules package
# Title: Reading in the book data
library(arules) # Note: 1
bookbaskets <- read.transactions("bookdata.tsv.gz", format="single", # Note: 2
sep="\t", # Note: 3
cols=c("userid", "title"), # Note: 4
rm.duplicates=T) # Note: 5
# Note 1:
# Load the arules package.
# Note 2:
# Specify the file and the file format.
# Note 3:
# Specify the column separator (a tab).
# Note 4:
# Specify the column of transaction IDs and of
# item IDs, respectively.
# Note 5:
# Tell the function to look for and remove
# duplicate entries (for example, multiple entries
# for “The Hobbit” by the same user).
# example 8.16 of section 8.2.3
# (example 8.16 of section 8.2.3) : Unsupervised methods : Association rules : Mining association rules with the arules package
# Title: Examining the transaction data
class(bookbaskets) # Note: 1
## [1] "transactions"
## attr(,"package")
## [1] "arules"
bookbaskets # Note: 2
## transactions in sparse format with
## 92108 transactions (rows) and
## 220447 items (columns)
dim(bookbaskets) # Note: 3
## [1] 92108 220447
colnames(bookbaskets)[1:5] # Note: 4
## [1] " A Light in the Storm:[...]"
## [2] " Always Have Popsicles"
## [3] " Apple Magic"
## [4] " Ask Lily"
## [5] " Beyond IBM: Leadership Marketing and Finance for the 1990s"
rownames(bookbaskets)[1:5] # Note: 5
## [1] "10" "1000" "100001" "100002" "100004"
# Note 1:
# The object is of class transactions.
# Note 2:
# Printing the object tells you its
# dimensions.
# Note 3:
# You can also use dim() to see the dimensions
# of the matrix.
# Note 4:
# The columns are labeled by book
# title.
# Note 5:
# The rows are labeled by customer.
# informalexample 8.7 of section 8.2.3
# (informalexample 8.7 of section 8.2.3) : Unsupervised methods : Association rules : Mining association rules with the arules package
basketSizes <- size(bookbaskets)
summary(basketSizes)
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 1.0 1.0 1.0 11.1 4.0 10250.0
# example 8.17 of section 8.2.3
# (example 8.17 of section 8.2.3) : Unsupervised methods : Association rules : Mining association rules with the arules package
# Title: Examining the size distribution
quantile(basketSizes, probs=seq(0,1,0.1)) # Note: 1
## 0% 10% 20% 30% 40% 50% 60% 70% 80% 90% 100%
## 1 1 1 1 1 1 2 3 5 13 10253
library(ggplot2) # Note: 2
ggplot(data.frame(count=basketSizes)) +
geom_density(aes(x=count), binwidth=1) +
scale_x_log10()
# Note 1:
# Look at the basket size distribution, in 10%
# increments.
# Note 2:
# Plot the distribution to get a better
# look.
# informalexample 8.8 of section 8.2.3
# (informalexample 8.8 of section 8.2.3) : Unsupervised methods : Association rules : Mining association rules with the arules package
bookFreq <- itemFrequency(bookbaskets)
## summary(bookFreq)
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 1.086e-05 1.086e-05 1.086e-05 5.035e-05 3.257e-05 2.716e-02
sum(bookFreq)
## [1] 11.09909
# example 8.18 of section 8.2.3
# (example 8.18 of section 8.2.3) : Unsupervised methods : Association rules : Mining association rules with the arules package
# Title: Finding the ten most frequent books
bookCount <- (bookFreq/sum(bookFreq))*sum(basketSizes) # Note: 1
summary(bookCount)
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 1.000 1.000 1.000 4.637 3.000 2502.000
orderedBooks <- sort(bookCount, decreasing=T) # Note: 2
orderedBooks[1:10]
## Wild Animus
## 2502
## The Lovely Bones: A Novel
## 1295
## She's Come Undone
## 934
## The Da Vinci Code
## 905
## Harry Potter and the Sorcerer's Stone
## 832
## The Nanny Diaries: A Novel
## 821
## A Painted House
## 819
## Bridget Jones's Diary
## 772
## The Secret Life of Bees
## 762
## Divine Secrets of the Ya-Ya Sisterhood: A Novel
## 737
orderedBooks[1]/dim(bookbaskets)[1] # Note: 3
## Wild Animus
## 0.02716376
# Note 1:
# Get the absolute count of book
# occurrences.
# Note 2:
# Sort the count and list the 10 most popular
# books.
# Note 3:
# The most popular book in the dataset
# occurred in fewer than 3% of the baskets.
# informalexample 8.9 of section 8.2.3
# (informalexample 8.9 of section 8.2.3) : Unsupervised methods : Association rules : Mining association rules with the arules package
bookbaskets_use <- bookbaskets[basketSizes > 1]
dim(bookbaskets_use)
## [1] 40822 220447
# example 8.19 of section 8.2.3
# (example 8.19 of section 8.2.3) : Unsupervised methods : Association rules : Mining association rules with the arules package
# Title: Finding the association rules
rules <- apriori(bookbaskets_use, # Note: 1
parameter =list(support = 0.002, confidence=0.75))
summary(rules)
## set of 191 rules # Note: 2
##
## rule length distribution (lhs + rhs):sizes # Note: 3
## 2 3 4 5
## 11 100 66 14
##
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 2.000 3.000 3.000 3.435 4.000 5.000
##
## summary of quality measures: # Note: 4
## support confidence lift
## Min. :0.002009 Min. :0.7500 Min. : 40.89
## 1st Qu.:0.002131 1st Qu.:0.8113 1st Qu.: 86.44
## Median :0.002278 Median :0.8468 Median :131.36
## Mean :0.002593 Mean :0.8569 Mean :129.68
## 3rd Qu.:0.002695 3rd Qu.:0.9065 3rd Qu.:158.77
## Max. :0.005830 Max. :0.9882 Max. :321.89
##
## mining info: # Note: 5
## data ntransactions support confidence
## bookbaskets_use 40822 0.002 0.75
# Note 1:
# Call apriori() with a minimum support of
# 0.002 and a minimum confidence of 0.75.
# Note 2:
# The summary of the apriori() output reports
# the number of rules found;...
# Note 3:
# ...the distribution of rule lengths (in this
# example, most rules contain 3 items—2 on the left
# side, X (lhs), and one on the right side, Y
# (rhs));...
# Note 4:
# ...a summary of rule quality measures,
# including support and confidence;...
# Note 5:
# ...and some information on how apriori() was
# called.
# example 8.20 of section 8.2.3
# (example 8.20 of section 8.2.3) : Unsupervised methods : Association rules : Mining association rules with the arules package
# Title: Scoring rules
measures <- interestMeasure(rules, # Note: 1
measure=c("coverage", "fishersExactTest"), # Note: 2
transactions=bookbaskets_use) # Note: 3
summary(measures)
## coverage fishersExactTest
## Min. :0.002082 Min. : 0.000e+00
## 1st Qu.:0.002511 1st Qu.: 0.000e+00
## Median :0.002719 Median : 0.000e+00
## Mean :0.003039 Mean :5.080e-138
## 3rd Qu.:0.003160 3rd Qu.: 0.000e+00
## Max. :0.006982 Max. :9.702e-136
# Note 1:
# The call to interestMeasure() takes as
# arguments the discovered rules,...
# Note 2:
# ...a list of interest measures to
# apply,...
# Note 3:
# ...and a dataset to evaluate the interest
# measures over. This is usually the same set used
# to mine the rules, but it needn’t be. For
# instance, you can evaluate the rules over the full
# dataset, bookbaskets, to get coverage estimates
# that reflect all the customers, not just the ones
# who showed interest in more than one book.
# informalexample 8.10 of section 8.2.3
# (informalexample 8.10 of section 8.2.3) : Unsupervised methods : Association rules : Mining association rules with the arules package
inspect(head((sort(rules, by="confidence")), n=5))
# example 8.21 of section 8.2.3
# (example 8.21 of section 8.2.3) : Unsupervised methods : Association rules : Mining association rules with the arules package
# Title: Finding rules with restrictions
brules <- apriori(bookbaskets_use,
parameter =list(support = 0.001, # Note: 1
confidence=0.6),
appearance=list(rhs=c("The Lovely Bones: A Novel"), # Note: 2
default="lhs")) # Note: 3
summary(brules)
## set of 46 rules
##
## rule length distribution (lhs + rhs):sizes
## 3 4
## 44 2
##
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 3.000 3.000 3.000 3.043 3.000 4.000
##
## summary of quality measures:
## support confidence lift
## Min. :0.001004 Min. :0.6000 Min. :21.81
## 1st Qu.:0.001029 1st Qu.:0.6118 1st Qu.:22.24
## Median :0.001102 Median :0.6258 Median :22.75
## Mean :0.001132 Mean :0.6365 Mean :23.14
## 3rd Qu.:0.001219 3rd Qu.:0.6457 3rd Qu.:23.47
## Max. :0.001396 Max. :0.7455 Max. :27.10
##
## mining info:
## data ntransactions support confidence
## bookbaskets_use 40822 0.001 0.6
# Note 1:
# Relax the minimum support to 0.001 and the
# minimum confidence to 0.6.
# Note 2:
# Only The Lovely Bones
# is allowed to appear on the right side of the
# rules.
# Note 3:
# By default, all the books can go into the
# left side of the rules.
# example 8.22 of section 8.2.3
# (example 8.22 of section 8.2.3) : Unsupervised methods : Association rules : Mining association rules with the arules package
# Title: Inspecting rules
brulesConf <- sort(brules, by="confidence") # Note: 1
inspect(head(lhs(brulesConf), n=5)) # Note: 2
## items
## 1 {Divine Secrets of the Ya-Ya Sisterhood: A Novel,
## Lucky : A Memoir}
## 2 {Lucky : A Memoir,
## The Notebook}
## 3 {Lucky : A Memoir,
## Wild Animus}
## 4 {Midwives: A Novel,
## Wicked: The Life and Times of the Wicked Witch of the West}
## 5 {Lucky : A Memoir,
## Summer Sisters}
# Note 1:
# Sort the rules by confidence.
# Note 2:
# Use the lhs() function to get the left
# itemsets of each rule; then inspect the top
# five.
# example 8.23 of section 8.2.3
# (example 8.23 of section 8.2.3) : Unsupervised methods : Association rules : Mining association rules with the arules package
# Title: Inspecting rules with restrictions
brulesSub <- subset(brules, subset=!(lhs %in% "Lucky : A Memoir")) # Note: 1
brulesConf <- sort(brulesSub, by="confidence")
inspect(head(lhs(brulesConf), n=5))
## items
## 1 {Midwives: A Novel,
## Wicked: The Life and Times of the Wicked Witch of the West}
## 2 {She's Come Undone,
## The Secret Life of Bees,
## Wild Animus}
## 3 {A Walk to Remember,
## The Nanny Diaries: A Novel}
## 4 {Beloved,
## The Red Tent}
## 5 {The Da Vinci Code,
## The Reader}
# Note 1:
# Restrict to the subset of rules where
# Lucky is not in the left
# side.
# example 9.1 of section 9.1.1
# (example 9.1 of section 9.1.1) : Exploring advanced methods : Using bagging and random forests to reduce training variance : Using bagging to improve prediction
# Title: Preparing Spambase data and evaluating the performance of decision trees
spamD <- read.table('spamD.tsv',header=T,sep='\t') # Note: 1
spamTrain <- subset(spamD,spamD$rgroup>=10)
spamTest <- subset(spamD,spamD$rgroup<10)
spamVars <- setdiff(colnames(spamD),list('rgroup','spam'))
spamFormula <- as.formula(paste('spam=="spam"', # Note: 2
paste(spamVars,collapse=' + '),sep=' ~ '))
loglikelihood <- function(y, py) { # Note: 3
pysmooth <- ifelse(py==0, 1e-12,
ifelse(py==1, 1-1e-12, py))
sum(y * log(pysmooth) + (1-y)*log(1 - pysmooth))
}
accuracyMeasures <- function(pred, truth, name="model") { # Note: 4
dev.norm <- -2*loglikelihood(as.numeric(truth), pred)/length(pred) # Note: 5
ctable <- table(truth=truth,
pred=(pred>0.5)) # Note: 6
accuracy <- sum(diag(ctable))/sum(ctable)
precision <- ctable[2,2]/sum(ctable[,2])
recall <- ctable[2,2]/sum(ctable[2,])
f1 <- 2*precision*recall/(precision+recall)
data.frame(model=name, accuracy=accuracy, f1=f1, dev.norm)
}
library(rpart) # Note: 7
treemodel <- rpart(spamFormula, spamTrain)
accuracyMeasures(predict(treemodel, newdata=spamTrain), # Note: 8
spamTrain$spam=="spam",
name="tree, training")
accuracyMeasures(predict(treemodel, newdata=spamTest),
spamTest$spam=="spam",
name="tree, test")
# Note 1:
# Load the data and split into training (90% of data)
# and test (10% of data) sets.
# Note 2:
# Use all the features and do binary classification,
# where TRUE corresponds to spam documents.
# Note 3:
# A function to calculate log likelihood (for
# calculating deviance).
# Note 4:
# A function to calculate and return various measures
# on the model: normalized deviance, prediction accuracy, and f1, which is the
# harmonic mean of precision and recall.
# Note 5:
# Normalize the deviance by the number of data points
# so that we can compare the deviance across training and test
# sets.
# Note 6:
# Convert the class probability estimator into a
# classifier by labeling documents that score greater than 0.5 as
# spam.
# Note 7:
# Load the rpart library and fit a decision tree
# model.
# Note 8:
# Evaluate the decision tree model against the
# training and test sets.
# example 9.2 of section 9.1.1
# (example 9.2 of section 9.1.1) : Exploring advanced methods : Using bagging and random forests to reduce training variance : Using bagging to improve prediction
# Title: Bagging decision trees
ntrain <- dim(spamTrain)[1]
n <- ntrain # Note: 1
ntree <- 100
samples <- sapply(1:ntree, # Note: 2
FUN = function(iter)
{sample(1:ntrain, size=n, replace=T)})
treelist <-lapply(1:ntree, # Note: 3
FUN=function(iter)
{samp <- samples[,iter];
rpart(spamFormula, spamTrain[samp,])})
predict.bag <- function(treelist, newdata) { # Note: 4
preds <- sapply(1:length(treelist),
FUN=function(iter) {
predict(treelist[[iter]], newdata=newdata)})
predsums <- rowSums(preds)
predsums/length(treelist)
}
accuracyMeasures(predict.bag(treelist, newdata=spamTrain), # Note: 5
spamTrain$spam=="spam",
name="bagging, training")
accuracyMeasures(predict.bag(treelist, newdata=spamTest),
spamTest$spam=="spam",
name="bagging, test")
# Note 1:
# Use bootstrap samples the same size as the training
# set, with 100 trees.
# Note 2:
# Build the bootstrap samples by sampling the row indices of spamTrain with replacement. Each
# column of the matrix samples represents the row indices into spamTrain
# that comprise the bootstrap sample.
# Note 3:
# Train the individual decision trees and return them
# in a list. Note: this step can take a few minutes.
# Note 4:
# predict.bag assumes the underlying classifier returns decision probabilities, not
# decisions.
# Note 5:
# Evaluate the bagged decision trees against the
# training and test sets.
# example 9.3 of section 9.1.2
# (example 9.3 of section 9.1.2) : Exploring advanced methods : Using bagging and random forests to reduce training variance : Using random forests to further improve prediction
# Title: Using random forests
library(randomForest) # Note: 1
set.seed(5123512) # Note: 2
fmodel <- randomForest(x=spamTrain[,spamVars], # Note: 3
y=spamTrain$spam,
ntree=100, # Note: 4
nodesize=7, # Note: 5
importance=T) # Note: 6
accuracyMeasures(predict(fmodel, # Note: 7
newdata=spamTrain[,spamVars],type='prob')[,'spam'],
spamTrain$spam=="spam",name="random forest, train")
## model accuracy f1 dev.norm
## 1 random forest, train 0.9884142 0.9706611 0.1428786
accuracyMeasures(predict(fmodel,
newdata=spamTest[,spamVars],type='prob')[,'spam'],
spamTest$spam=="spam",name="random forest, test")
## model accuracy f1 dev.norm
## 1 random forest, test 0.9541485 0.8845029 0.3972416
# Note 1:
# Load the randomForest package.
# Note 2:
# Set the pseudo-random seed to a known value to try
# and make the random forest run repeatable.
# Note 3:
# Call the randomForest() function to build the model
# with explanatory variables as x and the category to be predicted as
# y.
# Note 4:
# Use 100 trees to be compatible with our bagging
# example. The default is 500 trees.
# Note 5:
# Specify that each node of a tree must have a minimum
# of 7 elements, to be compatible with the default minimum node size that rpart()
# uses on this training set.
# Note 6:
# Tell the algorithm to save information to be used for
# calculating variable importance (we’ll see this later).
# Note 7:
# Report the model quality.
# example 9.4 of section 9.1.2
# (example 9.4 of section 9.1.2) : Exploring advanced methods : Using bagging and random forests to reduce training variance : Using random forests to further improve prediction
# Title: randomForest variable importances
varImp <- importance(fmodel) # Note: 1
varImp[1:10, ] # Note: 2
## non-spam spam MeanDecreaseAccuracy
## word.freq.make 2.096811 3.7304353 4.334207
## word.freq.address 3.603167 3.9967031 4.977452
## word.freq.all 2.799456 4.9527834 4.924958
## word.freq.3d 3.000273 0.4125932 2.917972
## word.freq.our 9.037946 7.9421391 10.731509
## word.freq.over 5.879377 4.2402613 5.751371
## word.freq.remove 16.637390 13.9331691 17.753122
## word.freq.internet 7.301055 4.4458342 7.947515
## word.freq.order 3.937897 4.3587883 4.866540
## word.freq.mail 5.022432 3.4701224 6.103929
varImpPlot(fmodel, type=1) # Note: 3
# Note 1:
# Call importance() on the spam
# model.
# Note 2:
# The importance() function returns a matrix of
# importance measures (larger values = more important).
# Note 3:
# Plot the variable importance as measured by
# accuracy change.
# example 9.5 of section 9.1.2
# (example 9.5 of section 9.1.2) : Exploring advanced methods : Using bagging and random forests to reduce training variance : Using random forests to further improve prediction
# Title: Fitting with fewer variables
selVars <- names(sort(varImp[,1], decreasing=T))[1:25] # Note: 1
fsel <- randomForest(x=spamTrain[,selVars],y=spamTrain$spam, # Note: 2
ntree=100,
nodesize=7,
importance=T)
accuracyMeasures(predict(fsel,
newdata=spamTrain[,selVars],type='prob')[,'spam'],
spamTrain$spam=="spam",name="RF small, train")
## model accuracy f1 dev.norm
## 1 RF small, train 0.9876901 0.9688546 0.1506817
accuracyMeasures(predict(fsel,
newdata=spamTest[,selVars],type='prob')[,'spam'],
spamTest$spam=="spam",name="RF small, test")
## model accuracy f1 dev.norm
## 1 RF small, test 0.9497817 0.8738142 0.400825
# Note 1:
# Sort the variables by their importance, as
# measured by accuracy change.
# Note 2:
# Build a random forest model using only the 25
# most important variables.
# example 9.6 of section 9.2.2
# (example 9.6 of section 9.2.2) : Exploring advanced methods : Using generalized additive models (GAMs) to learn non-monotone relationships : A one-dimensional regression example
# Title: Preparing an artificial problem
set.seed(602957)
x <- rnorm(1000)
noise <- rnorm(1000, sd=1.5)
y <- 3*sin(2*x) + cos(0.75*x) - 1.5*(x^2 ) + noise
select <- runif(1000)
frame <- data.frame(y=y, x = x)
train <- frame[select > 0.1,]
test <-frame[select <= 0.1,]
# example 9.7 of section 9.2.2
# (example 9.7 of section 9.2.2) : Exploring advanced methods : Using generalized additive models (GAMs) to learn non-monotone relationships : A one-dimensional regression example
# Title: Linear regression applied to our artificial example
lin.model <- lm(y ~ x, data=train)
summary(lin.model)
## Call:
## lm(formula = y ~ x, data = train)
##
## Residuals:
## Min 1Q Median 3Q Max
## -17.698 -1.774 0.193 2.499 7.529
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) -0.8330 0.1161 -7.175 1.51e-12 ***
## x 0.7395 0.1197 6.180 9.74e-10 ***
## ---
## Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
##
## Residual standard error: 3.485 on 899 degrees of freedom
## Multiple R-squared: 0.04075, Adjusted R-squared: 0.03968
## F-statistic: 38.19 on 1 and 899 DF, p-value: 9.737e-10
#
# calculate the root mean squared error (rmse)
#
resid.lin <- train$y-predict(lin.model)
sqrt(mean(resid.lin^2))
## [1] 3.481091
# example 9.8 of section 9.2.2
# (example 9.8 of section 9.2.2) : Exploring advanced methods : Using generalized additive models (GAMs) to learn non-monotone relationships : A one-dimensional regression example
# Title: GAM applied to our artificial example
library(mgcv) # Note: 1
glin.model <- gam(y~s(x), data=train) # Note: 2
glin.model$converged # Note: 3
## [1] TRUE
summary(glin.model)
## Family: gaussian # Note: 4
## Link function: identity
##
## Formula:
## y ~ s(x)
##
## Parametric coefficients: # Note: 5
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) -0.83467 0.04852 -17.2 <2e-16 ***
## ---
## Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
##
## Approximate significance of smooth terms: # Note: 6
## edf Ref.df F p-value
## s(x) 8.685 8.972 497.8 <2e-16 ***
## ---
## Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
##
## R-sq.(adj) = 0.832 Deviance explained = 83.4% # Note: 7
## GCV score = 2.144 Scale est. = 2.121 n = 901
#
# calculate the root mean squared error (rmse)
#
resid.glin <- train$y-predict(glin.model)
sqrt(mean(resid.glin^2))
## [1] 1.448514
# Note 1:
# Load the mgcv package.
# Note 2:
# Build the model, specifying that x should be
# treated as a nonlinear variable.
# Note 3:
# The converged parameter tells you if the algorithm
# converged. You should only trust the output if this is TRUE.
# Note 4:
# Setting family=gaussian and link=identity tells you that the model was treated with the same
# distributions assumptions as a standard linear regression.
# Note 5:
# The parametric coefficients are the linear terms (in this example, only the constant term).
# This section of the summary tells you which linear terms were
# significantly different from 0.
# Note 6:
# The smooth terms are the nonlinear terms. This section of the summary tells you which
# nonlinear terms were significantly different from 0. It also tells you
# the effective degrees of freedom (edf) used up to build each smooth
# term. An edf near 1 indicates that the variable has an approximately
# linear relationship to the output.
# Note 7:
# “R-sq (adj)” is the adjusted R-squared. “Deviance
# explained” is the raw R-squared (0.834).
# example 9.9 of section 9.2.2
# (example 9.9 of section 9.2.2) : Exploring advanced methods : Using generalized additive models (GAMs) to learn non-monotone relationships : A one-dimensional regression example
# Title: Comparing linear regression and GAM performance
actual <- test$y
pred.lin <- predict(lin.model, newdata=test) # Note: 1
pred.glin <- predict(glin.model, newdata=test)
resid.lin <- actual-pred.lin
resid.glin <- actual-pred.glin
sqrt(mean(resid.lin^2)) # Note: 2
## [1] 2.792653
sqrt(mean(resid.glin^2))
## [1] 1.401399
cor(actual, pred.lin)^2 # Note: 3
## [1] 0.1543172
cor(actual, pred.glin)^2
## [1] 0.7828869
# Note 1:
# Call both models on the test
# data.
# Note 2:
# Compare the RMSE of the linear model and the GAM
# on the test data.
# Note 3:
# Compare the R-squared of the linear model and the
# GAM on test data.
# example 9.10 of section 9.2.3
# (example 9.10 of section 9.2.3) : Exploring advanced methods : Using generalized additive models (GAMs) to learn non-monotone relationships : Extracting the nonlinear relationships
# Title: Extracting a learned spline from a GAM
sx <- predict(glin.model, type="terms")
summary(sx)
## s(x)
## Min. :-17.527035
## 1st Qu.: -2.378636
## Median : 0.009427
## Mean : 0.000000
## 3rd Qu.: 2.869166
## Max. : 4.084999
xframe <- cbind(train, sx=sx[,1])
ggplot(xframe, aes(x=x)) + geom_point(aes(y=y), alpha=0.4) +
geom_line(aes(y=sx))
# example 9.11 of section 9.2.4
# (example 9.11 of section 9.2.4) : Exploring advanced methods : Using generalized additive models (GAMs) to learn non-monotone relationships : Using GAM on actual data
# Title: Applying linear regression (with and without GAM) to health data
library(mgcv)
library(ggplot2)
load("NatalBirthData.rData")
train <- sdata[sdata$ORIGRANDGROUP<=5,]
test <- sdata[sdata$ORIGRANDGROUP>5,]
form.lin <- as.formula("DBWT ~ PWGT + WTGAIN + MAGER + UPREVIS")
linmodel <- lm(form.lin, data=train) # Note: 1
summary(linmodel)
## Call:
## lm(formula = form.lin, data = train)
##
## Residuals:
## Min 1Q Median 3Q Max
## -3155.43 -272.09 45.04 349.81 2870.55
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 2419.7090 31.9291 75.784 < 2e-16 ***
## PWGT 2.1713 0.1241 17.494 < 2e-16 ***
## WTGAIN 7.5773 0.3178 23.840 < 2e-16 ***
## MAGER 5.3213 0.7787 6.834 8.6e-12 ***
## UPREVIS 12.8753 1.1786 10.924 < 2e-16 ***
## ---
## Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
##
## Residual standard error: 562.7 on 14381 degrees of freedom
## Multiple R-squared: 0.06596, Adjusted R-squared: 0.0657 # Note: 2
## F-statistic: 253.9 on 4 and 14381 DF, p-value: < 2.2e-16
form.glin <- as.formula("DBWT ~ s(PWGT) + s(WTGAIN) +
s(MAGER) + s(UPREVIS)")
glinmodel <- gam(form.glin, data=train) # Note: 3
glinmodel$converged # Note: 4
## [1] TRUE
summary(glinmodel)
## Family: gaussian
## Link function: identity
##
## Formula:
## DBWT ~ s(PWGT) + s(WTGAIN) + s(MAGER) + s(UPREVIS)
##
## Parametric coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 3276.948 4.623 708.8 <2e-16 ***
## ---
## Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
##
## Approximate significance of smooth terms:
## edf Ref.df F p-value
## s(PWGT) 5.374 6.443 68.981 < 2e-16 ***
## s(WTGAIN) 4.719 5.743 102.313 < 2e-16 ***
## s(MAGER) 7.742 8.428 6.959 1.82e-09 ***
## s(UPREVIS) 5.491 6.425 48.423 < 2e-16 ***
## ---
## Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
##
## R-sq.(adj) = 0.0927 Deviance explained = 9.42% # Note: 5
## GCV score = 3.0804e+05 Scale est. = 3.0752e+05 n = 14386
# Note 1:
# Build a linear model with four
# variables.
# Note 2:
# The model explains about 7% of the variance; all
# coefficients are significantly different from 0.
# Note 3:
# Build a GAM with the same
# variables.
# Note 4:
# Verify that the model has
# converged.
# Note 5:
# The model explains just under 10% of the variance;
# all variables have a nonlinear effect significantly different from
# 0.
# example 9.12 of section 9.2.4
# (example 9.12 of section 9.2.4) : Exploring advanced methods : Using generalized additive models (GAMs) to learn non-monotone relationships : Using GAM on actual data
# Title: Plotting GAM results
terms <- predict(glinmodel, type="terms") # Note: 1
tframe <- cbind(DBWT = train$DBWT, as.data.frame(terms)) # Note: 2
colnames(tframe) <- gsub('[()]', '', colnames(tframe)) # Note: 3
pframe <- cbind(tframe, train[,c("PWGT", "WTGAIN",
"MAGER", "UPREVIS")]) # Note: 4
p1 <- ggplot(pframe, aes(x=PWGT)) +
geom_point(aes(y=scale(sPWGT, scale=F))) + # Note: 5
geom_smooth(aes(y=scale(DBWT, scale=F))) # + # Note: 6
# [...] # Note: 7
# Note 1:
# Get the matrix of s()
# functions.
# Note 2:
# Bind in birth weight; convert to data
# frame.
# Note 3:
# Make the column names reference-friendly
# (“s(PWGT)” is converted to “sPWGT”, etc.).
# Note 4:
# Bind in the input variables.
# Note 5:
# Plot s(PWGT) shifted to be zero mean versus PWGT (mother’s weight) as points.
# Note 6:
# Plot the smoothing curve of DWBT (birth weight) shifted to be zero mean versus PWGT (mother’s
# weight).
# Note 7:
# Repeat for remaining variables (omitted for
# brevity).
# example 9.13 of section 9.2.4
# (example 9.13 of section 9.2.4) : Exploring advanced methods : Using generalized additive models (GAMs) to learn non-monotone relationships : Using GAM on actual data
# Title: Checking GAM model performance on hold-out data
pred.lin <- predict(linmodel, newdata=test) # Note: 1
pred.glin <- predict(glinmodel, newdata=test)
cor(pred.lin, test$DBWT)^2 # Note: 2
# [1] 0.0616812
cor(pred.glin, test$DBWT)^2
# [1] 0.08857426
# Note 1:
# Run both the linear model and the GAM on the test
# data.
# Note 2:
# Calculate R-squared for both
# models.
# example 9.14 of section 9.2.5
# (example 9.14 of section 9.2.5) : Exploring advanced methods : Using generalized additive models (GAMs) to learn non-monotone relationships : Using GAM for logistic regression
# Title: GLM logistic regression
form <- as.formula("DBWT < 2000 ~ PWGT + WTGAIN + MAGER + UPREVIS")
logmod <- glm(form, data=train, family=binomial(link="logit"))
# example 9.15 of section 9.2.5
# (example 9.15 of section 9.2.5) : Exploring advanced methods : Using generalized additive models (GAMs) to learn non-monotone relationships : Using GAM for logistic regression
# Title: GAM logistic regression
form2 <- as.formula("DBWT<2000~s(PWGT)+s(WTGAIN)+
s(MAGER)+s(UPREVIS)")
glogmod <- gam(form2, data=train, family=binomial(link="logit"))
glogmod$converged
## [1] TRUE
summary(glogmod)
## Family: binomial
## Link function: logit
##
## Formula:
## DBWT < 2000 ~ s(PWGT) + s(WTGAIN) + s(MAGER) + s(UPREVIS)
##
## Parametric coefficients:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) -3.94085 0.06794 -58 <2e-16 ***
## ---
## Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
##
## Approximate significance of smooth terms:
## edf Ref.df Chi.sq p-value
## s(PWGT) 1.905 2.420 2.463 0.36412 # Note: 1
## s(WTGAIN) 3.674 4.543 64.426 1.72e-12 ***
## s(MAGER) 1.003 1.005 8.335 0.00394 **
## s(UPREVIS) 6.802 7.216 217.631 < 2e-16 ***
## ---
## Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
##
## R-sq.(adj) = 0.0331 Deviance explained = 9.14% # Note: 2
## UBRE score = -0.76987 Scale est. = 1 n = 14386
# Note 1:
# Note that there’s no proof that the mother’s weight (PWGT) has a significant effect on
# outcome.
# Note 2:
# “Deviance explained” is the pseudo R-squared: 1 -
# (deviance/null.deviance).
# example 9.16 of section 9.3.1
# (example 9.16 of section 9.3.1) : Exploring advanced methods : Using kernel methods to increase data separation : Understanding kernel functions
# Title: An artificial kernel example
u <- c(1,2)
v <- c(3,4)
k <- function(u,v) { # Note: 1
u[1]*v[1] + u[2]*v[2] +
u[1]*u[1]*v[1]*v[1] + u[2]*u[2]*v[2]*v[2] +
u[1]*u[2]*v[1]*v[2]
}
phi <- function(x) { # Note: 2
x <- as.numeric(x)
c(x,x*x,combn(x,2,FUN=prod))
}
print(k(u,v)) # Note: 3
## [1] 108
print(phi(u))
## [1] 1 2 1 4 2
print(phi(v))
## [1] 3 4 9 16 12
print(as.numeric(phi(u) %*% phi(v))) # Note: 4
## [1] 108
# Note 1:
# Define a function of two vector variables
# (both two dimensional) as the sum of various products of terms.
# Note 2:
# Define a function of a single vector variable
# that returns a vector containing the original entries plus all products of
# entries.
# Note 3:
# Example evaluation of k(,).
# Note 4:
# Confirm phi() agrees with k(,). phi() is the certificate that shows k(,) is in fact a
# kernel.
# example 9.17 of section 9.3.2
# (example 9.17 of section 9.3.2) : Exploring advanced methods : Using kernel methods to increase data separation : Using an explicit kernel on a problem
# Title: Applying stepwise linear regression to PUMS data
dtrain <- subset(psub,ORIGRANDGROUP >= 500)
dtest <- subset(psub,ORIGRANDGROUP < 500) # Note: 1
m1 <- step( # Note: 2
lm(log(PINCP,base=10) ~ AGEP + SEX + COW + SCHL,
data=dtrain), # Note: 3
direction='both')
rmse <- function(y, f) { sqrt(mean( (y-f)^2 )) } # Note: 4
print(rmse(log(dtest$PINCP,base=10),
predict(m1,newdata=dtest))) # Note: 5
# [1] 0.2752171
# Note 1:
# Split data into test and training.
# Note 2:
# Ask that the linear regression model we’re building be
# stepwise improved, which is a powerful automated procedure for
# removing variables that don’t seem to have significant impacts
# (can improve generalization performance).
# Note 3:
# Build the basic linear regression model.
# Note 4:
# Define the RMSE function.
# Note 5:
# Calculate the RMSE between the prediction and the
# actuals.
# example 9.18 of section 9.3.2
# (example 9.18 of section 9.3.2) : Exploring advanced methods : Using kernel methods to increase data separation : Using an explicit kernel on a problem
# Title: Applying an example explicit kernel transform
phi <- function(x) { # Note: 1
x <- as.numeric(x)
c(x,x*x,combn(x,2,FUN=prod))
}
phiNames <- function(n) { # Note: 2
c(n,paste(n,n,sep=':'),
combn(n,2,FUN=function(x) {paste(x,collapse=':')}))
}
modelMatrix <- model.matrix(~ 0 + AGEP + SEX + COW + SCHL,psub) # Note: 3
colnames(modelMatrix) <- gsub('[^a-zA-Z0-9]+','_',
colnames(modelMatrix)) # Note: 4
pM <- t(apply(modelMatrix,1,phi)) # Note: 5
vars <- phiNames(colnames(modelMatrix))
vars <- gsub('[^a-zA-Z0-9]+','_',vars)
colnames(pM) <- vars # Note: 6
pM <- as.data.frame(pM)
pM$PINCP <- psub$PINCP
pM$ORIGRANDGROUP <- psub$ORIGRANDGROUP
pMtrain <- subset(pM,ORIGRANDGROUP >= 500)
pMtest <- subset(pM,ORIGRANDGROUP < 500) # Note: 7
# Note 1:
# Define our primal kernel function: map a
# vector to a copy of itself plus all square terms and cross-multiplied
# terms.
# Note 2:
# Define a function similar to our primal
# kernel, but working on variable names instead of values.
# Note 3:
# Convert data to a matrix where all
# categorical variables are encoded as multiple numeric indicators.
# Note 4:
# Remove problematic characters from matrix
# column names.
# Note 5:
# Apply the primal kernel function to every
# row of the matrix and transpose results so they’re written as rows (not as a
# list as returned by apply()).
# Note 6:
# Extend names from original matrix to
# names for compound variables in new matrix.
# Note 7:
# Add in outcomes, test/train split
# columns, and prepare new data for modeling.
# example 9.19 of section 9.3.2
# (example 9.19 of section 9.3.2) : Exploring advanced methods : Using kernel methods to increase data separation : Using an explicit kernel on a problem
# Title: Modeling using the explicit kernel transform
formulaStr2 <- paste('log(PINCP,base=10)',
paste(vars,collapse=' + '),
sep=' ~ ')
m2 <- lm(as.formula(formulaStr2),data=pMtrain)
coef2 <- summary(m2)$coefficients
interestingVars <- setdiff(rownames(coef2)[coef2[,'Pr(>|t|)']<0.01],
'(Intercept)')
interestingVars <- union(colnames(modelMatrix),interestingVars) # Note: 1
formulaStr3 <- paste('log(PINCP,base=10)',
paste(interestingVars,collapse=' + '),
sep=' ~ ')
m3 <- step(lm(as.formula(formulaStr3),data=pMtrain),direction='both') # Note: 2
print(rmse(log(pMtest$PINCP,base=10),predict(m3,newdata=pMtest))) # Note: 3
# [1] 0.2735955
# Note 1:
# Select a set of interesting variables by building an initial model using all of the new
# variables and retaining an interesting subset. This is an ad hoc
# move to speed up the stepwise regression by trying to quickly
# dispose of many useless derived variables. By introducing many new
# variables, the primal kernel method also introduces many new degrees
# of freedom, which can invite overfitting.
# Note 2:
# Stepwise regress on subset of variables to
# get new model.
# Note 3:
# Calculate the RMSE between the prediction and the actuals.
# example 9.20 of section 9.3.2
# (example 9.20 of section 9.3.2) : Exploring advanced methods : Using kernel methods to increase data separation : Using an explicit kernel on a problem
# Title: Inspecting the results of the explicit kernel model
print(summary(m3))
## Call:
## lm(formula = log(PINCP, base = 10) ~ AGEP + SEXM +
## COWPrivate_not_for_profit_employee +
## SCHLAssociate_s_degree + SCHLBachelor_s_degree +
## SCHLDoctorate_degree +
## SCHLGED_or_alternative_credential + SCHLMaster_s_degree +
## SCHLProfessional_degree + SCHLRegular_high_school_diploma +
## SCHLsome_college_credit_no_degree + AGEP_AGEP, data = pMtrain)
##
## Residuals:
## Min 1Q Median 3Q Max
## -1.29264 -0.14925 0.01343 0.17021 0.61968
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 2.9400460 0.2219310 13.248 < 2e-16 ***
## AGEP 0.0663537 0.0124905 5.312 1.54e-07 ***
## SEXM 0.0934876 0.0224236 4.169 3.52e-05 ***
## COWPrivate_not_for_profit_em -0.1187914 0.0379944 -3.127 0.00186 **
## SCHLAssociate_s_degree 0.2317211 0.0509509 4.548 6.60e-06 ***
## SCHLBachelor_s_degree 0.3844459 0.0417445 9.210 < 2e-16 ***
## SCHLDoctorate_degree 0.3190572 0.1569356 2.033 0.04250 *
## SCHLGED_or_alternative_creden 0.1405157 0.0766743 1.833 0.06737 .
## SCHLMaster_s_degree 0.4553550 0.0485609 9.377 < 2e-16 ***
## SCHLProfessional_degree 0.6525921 0.0845052 7.723 5.01e-14 ***
## SCHLRegular_high_school_diplo 0.1016590 0.0415834 2.445 0.01479 *
## SCHLsome_college_credit_no_de 0.1655906 0.0416345 3.977 7.85e-05 ***
## AGEP_AGEP -0.0007547 0.0001704 -4.428 1.14e-05 ***
## ---
## Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
##
## Residual standard error: 0.2649 on 582 degrees of freedom
## Multiple R-squared: 0.3541, Adjusted R-squared: 0.3408
## F-statistic: 26.59 on 12 and 582 DF, p-value: < 2.2e-16
# example 9.21 of section 9.4.2
# (example 9.21 of section 9.4.2) : Exploring advanced methods : Using SVMs to model complicated decision boundaries : Trying an SVM on artificial example data
# Title: Setting up the spirals data as an example classification problem
library('kernlab')
data('spirals') # Note: 1
sc <- specc(spirals, centers = 2) # Note: 2
s <- data.frame(x=spirals[,1],y=spirals[,2],
class=as.factor(sc)) # Note: 3
library('ggplot2')
ggplot(data=s) +
geom_text(aes(x=x,y=y,
label=class,color=class)) +
coord_fixed() +
theme_bw() + theme(legend.position='none') # Note: 4
# Note 1:
# Load the kernlab kernel and support vector
# machine package and then ask that the included example "spirals" be made
# available.
# Note 2:
# Use kernlab’s spectral clustering routine
# to identify the two different spirals in the example dataset.
# Note 3:
# Combine the spiral coordinates and the
# spiral label into a data frame.
# Note 4:
# Plot the spirals with class labels.
# example 9.22 of section 9.4.2
# (example 9.22 of section 9.4.2) : Exploring advanced methods : Using SVMs to model complicated decision boundaries : Trying an SVM on artificial example data
# Title: SVM with a poor choice of kernel
set.seed(2335246L)
s$group <- sample.int(100,size=dim(s)[[1]],replace=T)
sTrain <- subset(s,group>10)
sTest <- subset(s,group<=10) # Note: 1
# mSVMV <- ksvm(class~x+y,data=sTrain,kernel='vanilladot')
# had been using ksvm, but it seems to keep bad state in some cases
library('e1071')
mSVMV <- svm(class~x+y,data=sTrain,kernel='linear',type='nu-classification') # Note: 2
sTest$predSVMV <- predict(mSVMV,newdata=sTest,type='response') # Note: 3
ggplot() +
geom_text(data=sTest,aes(x=x,y=y,
label=predSVMV),size=12) +
geom_text(data=s,aes(x=x,y=y,
label=class,color=class),alpha=0.7) +
coord_fixed() +
theme_bw() + theme(legend.position='none') # Note: 4
# Note 1:
# Prepare to try to learn spiral class label
# from coordinates using a support vector machine.
# Note 2:
# Build the support vector model using a
# vanilladot kernel (not a very good kernel).
# Note 3:
# Use the model to predict class on held-out
# data.
# Note 4:
# Plot the predictions on top of a grey copy
# of all the data so we can see if predictions agree with the original
# markings.
# example 9.23 of section 9.4.2
# (example 9.23 of section 9.4.2) : Exploring advanced methods : Using SVMs to model complicated decision boundaries : Trying an SVM on artificial example data
# Title: SVM with a good choice of kernel
# mSVMG <- ksvm(class~x+y,data=sTrain,kernel='rbfdot')
# had been using ksvm, but it seems to be keeping bad state in some cases
mSVMG <- svm(class~x+y,data=sTrain,kernel='radial',type='nu-classification') # Note: 1
sTest$predSVMG <- predict(mSVMG,newdata=sTest,type='response')
ggplot() +
geom_text(data=sTest,aes(x=x,y=y,
label=predSVMG),size=12) +
geom_text(data=s,aes(x=x,y=y,
label=class,color=class),alpha=0.7) +
coord_fixed() +
theme_bw() + theme(legend.position='none')
# Note 1:
# This time use the "radial" or
# Gaussian kernel, which is a nice geometric similarity measure.
# example 9.24 of section 9.4.3
# (example 9.24 of section 9.4.3) : Exploring advanced methods : Using SVMs to model complicated decision boundaries : Using SVMs on real data
# Title: Revisiting the Spambase example with GLM
spamD <- read.table('spamD.tsv',header=T,sep='\t')
spamTrain <- subset(spamD,spamD$rgroup>=10)
spamTest <- subset(spamD,spamD$rgroup<10)
spamVars <- setdiff(colnames(spamD),list('rgroup','spam'))
spamFormula <- as.formula(paste('spam=="spam"',
paste(spamVars,collapse=' + '),sep=' ~ '))
spamModel <- glm(spamFormula,family=binomial(link='logit'),
data=spamTrain)
spamTest$pred <- predict(spamModel,newdata=spamTest,
type='response')
print(with(spamTest,table(y=spam,glPred=pred>=0.5)))
## glPred
## y FALSE TRUE
## non-spam 264 14
## spam 22 158
# example 9.25 of section 9.4.3
# (example 9.25 of section 9.4.3) : Exploring advanced methods : Using SVMs to model complicated decision boundaries : Using SVMs on real data
# Title: Applying an SVM to the Spambase example
library('kernlab')
spamFormulaV <- as.formula(paste('spam',
paste(spamVars,collapse=' + '),sep=' ~ '))
# may want to switch to library('e1071') svm() as had some state holding problems in some examles
svmM <- ksvm(spamFormulaV,data=spamTrain, # Note: 1
kernel='rbfdot', # Note: 2
C=10, # Note: 3
prob.model=T,cross=5, # Note: 4
class.weights=c('spam'=1,'non-spam'=10) # Note: 5
)
spamTest$svmPred <- predict(svmM,newdata=spamTest,type='response')
print(with(spamTest,table(y=spam,svmPred=svmPred)))
## svmPred
## y non-spam spam
## non-spam 269 9
## spam 27 153
# Note 1:
# Build a support vector model for the Spambase
# problem.
# Note 2:
# Ask for the radial dot or Gaussian kernel (in
# fact the default kernel).
# Note 3:
# Set the “soft margin penalty” high; prefer not moving training examples over getting a wider
# margin. Prefer a complex model that applies weakly to all the data
# over a simpler model that applies strongly on a subset of the
# data.
# Note 4:
# Ask that, in addition to a predictive model, an estimate of a model estimating class
# probabilities also be built. Not all SVM libraries support this
# operation, and the probabilities are essentially built after the
# model (through a cross-validation procedure) and may not be as high-quality
# as the model itself.
# Note 5:
# Explicitly control the trade-off between
# false positive and false negative errors. In this case, we say non-spam
# classified as spam (a false positive) should be considered an expensive
# mistake.
# example 9.26 of section 9.4.3
# (example 9.26 of section 9.4.3) : Exploring advanced methods : Using SVMs to model complicated decision boundaries : Using SVMs on real data
# Title: Printing the SVM results summary
print(svmM)
## Support Vector Machine object of class "ksvm"
##
## SV type: C-svc (classification)
## parameter : cost C = 10
##
## Gaussian Radial Basis kernel function.
## Hyperparameter : sigma = 0.0299836801848002
##
## Number of Support Vectors : 1118
##
## Objective Function Value : -4642.236
## Training error : 0.028482
## Cross validation error : 0.076998
## Probability model included.
# example 9.27 of section 9.4.3
# (example 9.27 of section 9.4.3) : Exploring advanced methods : Using SVMs to model complicated decision boundaries : Using SVMs on real data
# Title: Shifting decision point to perform an apples-to-apples comparison
sameCut <- sort(spamTest$pred)[length(spamTest$pred)-162] # Note: 1
print(with(spamTest,table(y=spam,glPred=pred>sameCut))) # Note: 2
## glPred
## y FALSE TRUE
## non-spam 267 11
## spam 29 151
# Note 1:
# Find out what GLM score threshold has 162
# examples above it.
# Note 2:
# Ask the GLM model for its predictions that
# are above the threshold. We’re essentially asking the model for its 162 best
# candidate spam prediction results.
# informalexample 10.1 of section 10.2.1
# (informalexample 10.1 of section 10.2.1) : Documentation and deployment : Using knitr to produce milestone documentation : What is knitr?
library(knitr)
knit('simple.Rmd')
# informalexample 10.2 of section 10.2.1
# (informalexample 10.2 of section 10.2.1) : Documentation and deployment : Using knitr to produce milestone documentation : What is knitr?
echo "library(knitr); knit('add.Rnw')" | R --vanilla # Note: 1
pdflatex add.tex # Note: 2
# Note 1:
# Use R in batch mode to create add.tex from
# add.Rnw.
# Note 2:
# Use LaTeX to create add.pdf from
# add.tex.
# example 10.7 of section 10.3.1
# (example 10.7 of section 10.3.1) : Documentation and deployment : Using comments and version control for running documentation : Writing effective comments
# Title: Example code comment
# Return the pseudo logarithm of x, which is close to
# sign(x)*log10(abs(x)) for x such that abs(x) is large
# and doesn't "blow up" near zero. Useful
# for transforming wide-range variables that may be negative
# (like profit/loss).
# See: http://www.win-vector.com/blog
# /2012/03/modeling-trick-the-signed-pseudo-logarithm/
# NB: This transform has the undesirable property of making most
# signed distributions appear bimodal around the origin, no matter
# what the underlying distribution really looks like.
# The argument x is assumed be numeric and can be a vector.
pseudoLog10 <- function(x) { asinh(x/2)/log(10) }
# example 10.8 of section 10.3.1
# (example 10.8 of section 10.3.1) : Documentation and deployment : Using comments and version control for running documentation : Writing effective comments
# Title: Useless comment
#######################################
# Function: addone
# Author: John Mount
# Version: 1.3.11
# Location: RSource/helperFns/addone.R
# Date: 10/31/13
# Arguments: x
# Purpose: Adds one
#######################################
addone <- function(x) { x + 1 }
# example 10.9 of section 10.3.1
# (example 10.9 of section 10.3.1) : Documentation and deployment : Using comments and version control for running documentation : Writing effective comments
# Title: Worse than useless comment
# adds one
addtwo <- function(x) { x + 2 }
# example 10.16 of section 10.4.1
# (example 10.16 of section 10.4.1) : Documentation and deployment : Deploying models : Deploying models as R HTTP services
# Title: Buzz model as an R-based HTTP service
library(Rook) # Note: 1
load('thRS500.Rdata') # Note: 2
library(randomForest) # Note: 3
numericPositions <- sapply(buzztrain[,varslist],is.numeric) # Note: 4
modelFn <- function(env) { # Note: 5
errors <- c()
warnings <- c()
val <- c()
row <- c()
tryCatch(
{
arg <- Multipart$parse(env) # Note: 6
row <- as.list(arg[varslist])
names(row) <- varslist
row[numericPositions] <- as.numeric(row[numericPositions])
frame <- data.frame(row)
val <- predict(fmodel,newdata=frame)
},
warning = function(w) { message(w)
warnings <<- c(warnings,as.character(w)) },
error = function(e) { message(e)
errors <<- c(errors,as.character(e)) }
)
body <- paste( # Note: 7
'val=',val,'\n',
'nerrors=',length(errors),'\n',
'nwarnings=',length(warnings),'\n',
'query=',env$QUERY_STRING,'\n',
'errors=',paste(errors,collapse=' '),'\n',
'warnings=',paste(warnings,collapse=' '),'\n',
'data row','\n',
paste(capture.output(print(row)),collapse='\n'),'\n',
sep='')
list(
status=ifelse(length(errors)<=0,200L,400L),
headers=list('Content-Type' = 'text/text'),
body=body )
}
s <- Rhttpd$new() # Note: 8
s$add(name="modelFn",app=modelFn) # Note: 9
s$start() # Note: 10
print(s)
## Server started on 127.0.0.1:20714
## [1] modelFn http://127.0.0.1:20714/custom/modelFn # Note: 11
##
## Call browse() with an index number or name to run an application.
# Note 1:
# Load the rook HTTP server library.
# Note 2:
# Load the saved buzz workspace (includes the
# random forest model).
# Note 3:
# Load the random forest library (loading the
# workspace doesn’t load the library).
# Note 4:
# Determine which variables are numeric (in the
# rook server, everything defaults to
# character).
# Note 5:
# Declare the modeling service.
# Note 6:
# This block does the actual work: parse data
# and apply the model.
# Note 7:
# Format results, place in a list, and
# return.
# Note 8:
# Start a new rook HTTP service.
# Note 9:
# Register our model function as an HTTP
# service.
# Note 10:
# Start the HTTP server.
# Note 11:
# This is the URL where the service is
# running.
# example 10.17 of section 10.4.1
# (example 10.17 of section 10.4.1) : Documentation and deployment : Deploying models : Deploying models as R HTTP services
# Title: Calling the buzz HTTP service
rowAsForm <- function(url,row) { # Note: 1
s <- paste('<HTML><HEAD></HEAD><BODY><FORM action="',url,
'" enctype="multipart/form-data" method="POST">\n',sep='')
s <- paste(s,'<input type="submit" value="Send"/>',sep='\n')
qpaste <- function(a,b) {
paste('<p> ',a,' <input type="text" name="',a,
'" value="',b,'"/> </p>',sep='') }
assignments <- mapply('qpaste',varslist,as.list(row)[varslist])
s <- paste(s,paste(assignments,collapse='\n'),sep='\n')
s <- paste(s,'</FORM></BODY></HTML>',sep='\n')
s
}
url <- 'http://127.0.0.1:20714/custom/modelFn' # Note: 2
cat(rowAsForm(url,buzztest[7,]),file='buzztest7.html') # Note: 3
# Note 1:
# Function to convert a row of dataset into a
# huge HTML form that transmits all of the variable
# values to HTTP server on submit (when the Send
# button is clicked).
# Note 2:
# The URL we started the rook HTTP server on;
# you’ll have to copy the URL address and port from
# what’s printed when you started the Rook
# service.
# Note 3:
# Write the form representing the variables for
# the seventh test example to a file.
# example 10.18 of section 10.4.2
# (example 10.18 of section 10.4.2) : Documentation and deployment : Deploying models : Deploying models by export
# Title: Exporting the random forest model
load('thRS500.Rdata') # Note: 1
library(randomForest) # Note: 2
extractTrees <- function(rfModel) { # Note: 3
ei <- function(i) {
ti <- getTree(rfModel,k=i,labelVar=T)
ti$nodeid <- 1:dim(ti)[[1]]
ti$treeid <- i
ti
}
nTrees <- rfModel$ntree
do.call('rbind',sapply(1:nTrees,ei,simplify=F))
}
write.table(extractTrees(fmodel), # Note: 4
file='rfmodel.tsv',row.names=F,sep='\t',quote=F)
# Note 1:
# Load the saved buzz workspace (includes the
# random forest model).
# Note 2:
# Load the random forest library (loading the
# workspace doesn’t load the library).
# Note 3:
# Define a function that joins the tree tables
# from the random forest getTree() method into one
# large table of trees.
# Note 4:
# Write the table of trees as a tab-separated
# values table (easy for other software to
# read).
# informalexample A.1 of section A.1.5
# (informalexample A.1 of section A.1.5) : Working with R and other tools : Installing the tools : R resources
install.packages('ctv',repos='https://cran.r-project.org')
library('ctv')
# install.views('TimeSeries') # can take a LONG time
# example A.1 of section A.2
# (example A.1 of section A.2) : Working with R and other tools : Starting with R
# Title: Trying a few R commands
1
## [1] 1
1/2
## [1] 0.5
'Joe'
## [1] "Joe"
"Joe"
## [1] "Joe"
"Joe"=='Joe'
## [1] TRUE
c()
## NULL
is.null(c())
## [1] TRUE
is.null(5)
## [1] FALSE
c(1)
## [1] 1
c(1,2)
## [1] 1 2
c("Apple",'Orange')
## [1] "Apple" "Orange"
length(c(1,2))
## [1] 2
vec <- c(1,2)
vec
## [1] 1 2
# informalexample A.2 of section A.2.1
# (informalexample A.2 of section A.2.1) : Working with R and other tools : Starting with R : Primary features of R
x <- 2
x < - 3
## [1] FALSE
print(x)
## [1] 2
# example A.2 of section A.2.1
# (example A.2 of section A.2.1) : Working with R and other tools : Starting with R : Primary features of R
# Title: Binding values to function arguments
divide <- function(numerator,denominator) { numerator/denominator }
divide(1,2)
## [1] 0.5
divide(2,1)
## [1] 2
divide(denominator=2,numerator=1)
## [1] 0.5
divide(denominator<-2,numerator<-1) # yields 2, a wrong answer
## [1] 2
# example A.3 of section A.2.1
# (example A.3 of section A.2.1) : Working with R and other tools : Starting with R : Primary features of R
# Title: Demonstrating side effects
x<-1
good <- function() { x <- 5}
good()
print(x)
## [1] 1
bad <- function() { x <<- 5}
bad()
print(x)
## [1] 5
# example A.4 of section A.2.1
# (example A.4 of section A.2.1) : Working with R and other tools : Starting with R : Primary features of R
# Title: R truth tables for Boolean operators
c(T,T,F,F) == c(T,F,T,F)
## [1] TRUE FALSE FALSE TRUE
c(T,T,F,F) & c(T,F,T,F)
## [1] TRUE FALSE FALSE FALSE
c(T,T,F,F) | c(T,F,T,F)
## [1] TRUE TRUE TRUE FALSE
# informalexample A.3 of section A.2.1
# (informalexample A.3 of section A.2.1) : Working with R and other tools : Starting with R : Primary features of R
add <- function(a,b) { a + b}
add(1,2)
## [1] 3
# informalexample A.4 of section A.2.1
# (informalexample A.4 of section A.2.1) : Working with R and other tools : Starting with R : Primary features of R
add(1,'fred')
## Error in a + b : non-numeric argument to binary operator
# example A.5 of section A.2.1
# (example A.5 of section A.2.1) : Working with R and other tools : Starting with R : Primary features of R
# Title: Call-by-value effect
vec <- c(1,2)
fun <- function(v) { v[[2]]<-5; print(v)}
fun(vec)
## [1] 1 5
print(vec)
## [1] 1 2
# informalexample A.5 of section A.2.2
# (informalexample A.5 of section A.2.2) : Working with R and other tools : Starting with R : Primary R data types
vec <- c(2,3)
vec[[2]] <- 5
print(vec)
## [1] 2 5
# example A.6 of section A.2.2
# (example A.6 of section A.2.2) : Working with R and other tools : Starting with R : Primary R data types
# Title: Examples of R indexing operators
x <- list('a'=6,b='fred')
names(x)
## [1] "a" "b"
x$a
## [1] 6
x$b
## [1] "fred"
x[['a']]
## $a
## [1] 6
x[c('a','a','b','b')]
## $a
## [1] 6
##
## $a
## [1] 6
##
## $b
## [1] "fred"
##
## $b
## [1] "fred"
# example A.7 of section A.2.2
# (example A.7 of section A.2.2) : Working with R and other tools : Starting with R : Primary R data types
# Title: R’s treatment of unexpected factor levels
factor('red',levels=c('red','orange'))
## [1] red
## Levels: red orange
factor('apple',levels=c('red','orange'))
## [1] <NA>
## Levels: red orange
# example A.8 of section A.2.2
# (example A.8 of section A.2.2) : Working with R and other tools : Starting with R : Primary R data types
# Title: Confirm lm() encodes new strings correctly.
d <- data.frame(x=factor(c('a','b','c')),
y=c(1,2,3))
m <- lm(y~0+x,data=d) # Note: 1
print(predict(m,
newdata=data.frame(x='b'))[[1]]) # Note: 2
# [1] 2
print(predict(m,
newdata=data.frame(x=factor('b',levels=c('b'))))[[1]]) # Note: 3
# [1] 2
# Note 1:
# Build a data frame and linear model mapping
# a,b,c to 1,2,3.
# Note 2:
# Show that model gets correct prediction for
# b as a string.
# Note 3:
# Show that model gets correct prediction for
# b as a factor, encoded with a different number of
# levels. This shows that lm() is correctly treating
# factors as strings.
# example A.9 of section A.2.3
# (example A.9 of section A.2.3) : Working with R and other tools : Starting with R : Loading data from HTTPS sources
# Title: Loading UCI car data directly from GitHub using HTTPS
require(RCurl) # Note: 1
urlBase <-
'https://raw.githubusercontent.com/WinVector/zmPDSwR/master/' # Note: 2
mkCon <- function(nm) { # Note: 3
textConnection(getURL(paste(urlBase,nm,sep='/')))
}
cars <- read.table(mkCon('car.data.csv'), # Note: 4
sep=',',header=T,comment.char='')
# Note 1:
# Bring in the RCurl library for more connection
# methods.
# Note 2:
# Form a valid HTTPS base URL for raw access to
# the GitHub repository.
# Note 3:
# Define a function that wraps a URL path
# fragment into a usable HTTPS connection.
# Note 4:
# Load the car data from GitHub over
# HTTPS.
# example A.10 of section A.3.2
# (example A.10 of section A.3.2) : Working with R and other tools : Using databases with R : Starting with SQuirreL SQL
# Title: Reading database data into R
install.packages('RJDBC',repos='https://cran.r-project.org') # Note: 1
library('RJDBC') # Note: 2
drv <- JDBC("org.h2.Driver","h2-1.3.170.jar",identifier.quote="'") # Note: 3
conn <- dbConnect(drv,"jdbc:h2:h2demodb_h2","u","u") # Note: 4
d <- dbGetQuery(conn,"SELECT * FROM example_table") # Note: 5
print(d) # Note: 6
## STATUSID NAME
## 1 1 Joe
## 2 2 Fred # Note: 7
# Note 1:
# Install the RJDBC package from the CRAN
# package repository.
# Note 2:
# Load the RJDBC library.
# Note 3:
# Use the RJDBC library to build a database
# driver.
# Note 4:
# Use the database driver to build a database
# connection. In our SQuirreL SQL example, we used
# the path /Users/johnmount/Downloads/h2demodb_h2.
# So the path fragment given here (h2demodb_h2)
# works only if R is working in the directory
# /Users/johnmount/Downloads. You would alter all of
# these paths and URLs to work for your own
# directories.
# Note 5:
# Run a SQL select query using the database
# connection to populate a data frame.
# Note 6:
# Print the result data frame.
# Note 7:
# The database table as an R data frame.
# example A.11 of section A.3.4
# (example A.11 of section A.3.4) : Working with R and other tools : Using databases with R : An example SQL data transformation task
# Title: Loading an Excel spreadsheet
library(gdata)
bookings <- read.xls('Workbook1.xlsx',sheet=1,pattern='date',
stringsAsFactors=F,as.is=T)
prices <- read.xls('Workbook1.xlsx',sheet=2,pattern='date',
stringsAsFactors=F,as.is=T)
# example A.12 of section A.3.4
# (example A.12 of section A.3.4) : Working with R and other tools : Using databases with R : An example SQL data transformation task
# Title: The hotel reservation and price data
print(bookings)
## date day.of.stay X1.before X2.before X3.before
## 1 2013-07-01 105 98 95 96
## 2 2013-07-02 103 100 98 95
## 3 2013-07-03 105 95 90 80
## 4 2013-07-04 105 105 107 98
print(prices)
## date day.of.stay X1.before X2.before X3.before
## 1 2013-07-01 $250.00 $200.00 $280.00 $300.00
## 2 2013-07-02 $200.00 $250.00 $290.00 $250.00
## 3 2013-07-03 $200.00 $200.00 $250.00 $275.00
## 4 2013-07-04 $250.00 $300.00 $300.00 $200.00
# example A.13 of section A.3.4
# (example A.13 of section A.3.4) : Working with R and other tools : Using databases with R : An example SQL data transformation task
# Title: Using melt to restructure data
library('reshape2')
bthin <- melt(bookings,id.vars=c('date'),
variable.name='daysBefore',value.name='bookings') # Note: 1
pthin <- melt(prices,id.vars=c('date'),
variable.name='daysBefore',value.name='price') # Note: 2
daysCodes <- c('day.of.stay', 'X1.before', 'X2.before', 'X3.before')
bthin$nDaysBefore <- match(bthin$daysBefore,daysCodes)-1 # Note: 3
pthin$nDaysBefore <- match(pthin$daysBefore,daysCodes)-1 # Note: 4
pthin$price <- as.numeric(gsub('\\$','',pthin$price)) # Note: 5
print(head(pthin))
## date daysBefore price nDaysBefore
## 1 2013-07-01 day.of.stay 250 0
## 2 2013-07-02 day.of.stay 200 0
## 3 2013-07-03 day.of.stay 200 0
## 4 2013-07-04 day.of.stay 250 0
## 5 2013-07-01 X1.before 200 1
## 6 2013-07-02 X1.before 250 1
# Note 1:
# Use melt to change columns that are not date
# (day.of.stay, Xn.before) to values stored in a new
# column called daysBefore. Each booking count
# becomes a new row (instead of having many
# different bookings in the same row).
# Note 2:
# Each price entry becomes a new row (instead
# of having many different prices in the same
# row).
# Note 3:
# Use match and dayCodes list to convert key
# strings to numeric nDaysBefore in our bookings
# data.
# Note 4:
# Use match and dayCodes list to convert key
# strings to numeric nDaysBefore in our price
# data.
# Note 5:
# Remove dollar sign and convert prices to
# numeric type.
# example A.14 of section A.3.4
# (example A.14 of section A.3.4) : Working with R and other tools : Using databases with R : An example SQL data transformation task
# Title: Assembling many rows using SQL
options(gsubfn.engine = "R") # Note: 1
library('sqldf')
joined <- sqldf(' # Note: 2
select # Note: 3
bCurrent.date as StayDate, # Note: 4
bCurrent.daysBefore as daysBefore,
bCurrent.nDaysBefore as nDaysBefore,
p.price as price,
bCurrent.bookings as bookingsCurrent,
bPrevious.bookings as bookingsPrevious,
bCurrent.bookings - bPrevious.bookings as pickup
from
bthin bCurrent # Note: 5
join
bthin bPrevious # Note: 6
on
bCurrent.date=bPrevious.date
and bCurrent.nDaysBefore+1=bPrevious.nDaysBefore # Note: 7
join
pthin p # Note: 8
on
bCurrent.date=p.date
and bCurrent.nDaysBefore=p.nDaysBefore # Note: 9
')
print(joined)
# Note 1:
# Prevent library(sqldf) from triggering a
# tcl/tk dependency which causes R to exit on OS X
# if X11 isn’t installed. See
# https://code.google.com/p/sqldf/ for
# troubleshooting details.
# Note 2:
# Create a new data frame of rows built out of
# triples of rows from pthin and bthin.
# Note 3:
# SQL statements typically start with the word
# “select.”
# Note 4:
# List of derived columns (and their new
# names) for our new data frame.
# Note 5:
# First data frame we’re pulling data from:
# bthin.
# Note 6:
# Second pull from bthin.
# Note 7:
# Conditions to match b1 rows to b2
# rows.
# Note 8:
# Third data frame we are pulling data from:
# pthin.
# Note 9:
# Conditions to match p to b2 (and implicitly
# b1).
# example A.15 of section A.3.4
# (example A.15 of section A.3.4) : Working with R and other tools : Using databases with R : An example SQL data transformation task
# Title: Showing our hotel model results
library('ggplot2')
ggplot(data=joined,aes(x=price,y=pickup)) +
geom_point() + geom_jitter() + geom_smooth(method='lm')
print(summary(lm(pickup~price,data=joined)))
#
#Call:
#lm(formula = pickup ~ price, data = joined)
#
#Residuals:
# Min 1Q Median 3Q Max
#-4.614 -2.812 -1.213 3.387 6.386
#
#Coefficients:
# Estimate Std. Error t value Pr(>|t|)
#(Intercept) 11.00765 7.98736 1.378 0.198
#price -0.02798 0.03190 -0.877 0.401
#
#Residual standard error: 4.21 on 10 degrees of freedom
#Multiple R-squared: 0.07144, Adjusted R-squared: -0.02142
#F-statistic: 0.7693 on 1 and 10 DF, p-value: 0.401
# example B.1 of section B.1.1
# (example B.1 of section B.1.1) : Important statistical concepts : Distributions : Normal distribution
# Title: Plotting the theoretical normal density
library(ggplot2)
x <- seq(from=-5, to=5, length.out=100) # the interval [-5 5]
f <- dnorm(x) # normal with mean 0 and sd 1
ggplot(data.frame(x=x,y=f), aes(x=x,y=y)) + geom_line()
# example B.2 of section B.1.1
# (example B.2 of section B.1.1) : Important statistical concepts : Distributions : Normal distribution
# Title: Plotting an empirical normal density
library(ggplot2)
# draw 1000 points from a normal with mean 0, sd 1
u <- rnorm(1000)
# plot the distribution of points,
# compared to normal curve as computed by dnorm() (dashed line)
ggplot(data.frame(x=u), aes(x=x)) + geom_density() +
geom_line(data=data.frame(x=x,y=f), aes(x=x,y=y), linetype=2)
# example B.3 of section B.1.1
# (example B.3 of section B.1.1) : Important statistical concepts : Distributions : Normal distribution
# Title: Working with the normal cdf
# --- estimate probabilities (areas) under the curve ---
# 50% of the observations will be less than the mean
pnorm(0)
# [1] 0.5
# about 2.3% of all observations are more than 2 standard
# deviations below the mean
pnorm(-2)
# [1] 0.02275013
# about 95.4% of all observations are within 2 standard deviations
# from the mean
pnorm(2) - pnorm(-2)
# [1] 0.9544997
# example B.4 of section B.1.1
# (example B.4 of section B.1.1) : Important statistical concepts : Distributions : Normal distribution
# Title: Plotting x < qnorm(0.75)
# --- return the quantiles corresponding to specific probabilities ---
# the median (50th percentile) of a normal is also the mean
qnorm(0.5)
# [1] 0
# calculate the 75th percentile
qnorm(0.75)
# [1] 0.6744898
pnorm(0.6744898)
# [1] 0.75
# --- Illustrate the 75th percentile ---
# create a graph of the normal distribution with mean 0, sd 1
x <- seq(from=-5, to=5, length.out=100)
f <- dnorm(x)
nframe <- data.frame(x=x,y=f)
# calculate the 75th percentile
line <- qnorm(0.75)
xstr <- sprintf("qnorm(0.75) = %1.3f", line)
# the part of the normal distribution to the left
# of the 75th percentile
nframe75 <- subset(nframe, nframe$x < line)
# Plot it.
# The shaded area is 75% of the area under the normal curve
ggplot(nframe, aes(x=x,y=y)) + geom_line() +
geom_area(data=nframe75, aes(x=x,y=y), fill="gray") +
geom_vline(aes(xintercept=line), linetype=2) +
geom_text(x=line, y=0, label=xstr, vjust=1)
# example B.5 of section B.1.3
# (example B.5 of section B.1.3) : Important statistical concepts : Distributions : Lognormal distribution
# Title: Demonstrating some properties of the lognormal distribution
# draw 1001 samples from a lognormal with meanlog 0, sdlog 1
u <- rlnorm(1001)
# the mean of u is higher than the median
mean(u)
# [1] 1.638628
median(u)
# [1] 1.001051
# the mean of log(u) is approx meanlog=0
mean(log(u))
# [1] -0.002942916
# the sd of log(u) is approx sdlog=1
sd(log(u))
# [1] 0.9820357
# generate the lognormal with meanlog=0, sdlog=1
x <- seq(from=0, to=25, length.out=500)
f <- dlnorm(x)
# generate a normal with mean=0, sd=1
x2 <- seq(from=-5,to=5, length.out=500)
f2 <- dnorm(x2)
# make data frames
lnormframe <- data.frame(x=x,y=f)
normframe <- data.frame(x=x2, y=f2)
dframe <- data.frame(u=u)
# plot densityplots with theoretical curves superimposed
p1 <- ggplot(dframe, aes(x=u)) + geom_density() +
geom_line(data=lnormframe, aes(x=x,y=y), linetype=2)
p2 <- ggplot(dframe, aes(x=log(u))) + geom_density() +
geom_line(data=normframe, aes(x=x,y=y), linetype=2)
# functions to plot multiple plots on one page
library(grid)
nplot <- function(plist) {
n <- length(plist)
grid.newpage()
pushViewport(viewport(layout=grid.layout(n,1)))
vplayout<-function(x,y) {viewport(layout.pos.row=x, layout.pos.col=y)}
for(i in 1:n) {
print(plist[[i]], vp=vplayout(i,1))
}
}
# this is the plot that leads this section.
nplot(list(p1, p2))
# example B.6 of section B.1.3
# (example B.6 of section B.1.3) : Important statistical concepts : Distributions : Lognormal distribution
# Title: Plotting the lognormal distribution
# the 50th percentile (or median) of the lognormal with
# meanlog=0 and sdlog=10
qlnorm(0.5)
# [1] 1
# the probability of seeing a value x less than 1
plnorm(1)
# [1] 0.5
# the probability of observing a value x less than 10:
plnorm(10)
# [1] 0.9893489
# -- show the 75th percentile of the lognormal
# use lnormframe from previous example: the
# theoretical lognormal curve
line <- qlnorm(0.75)
xstr <- sprintf("qlnorm(0.75) = %1.3f", line)
lnormframe75 <- subset(lnormframe, lnormframe$x < line)
# Plot it
# The shaded area is 75% of the area under the lognormal curve
ggplot(lnormframe, aes(x=x,y=y)) + geom_line() +
geom_area(data=lnormframe75, aes(x=x,y=y), fill="gray") +
geom_vline(aes(xintercept=line), linetype=2) +
geom_text(x=line, y=0, label=xstr, hjust= 0, vjust=1)
# example B.7 of section B.1.4
# (example B.7 of section B.1.4) : Important statistical concepts : Distributions : Binomial distribution
# Title: Plotting the binomial distribution
library(ggplot2)
#
# use dbinom to produce the theoretical curves
#
numflips <- 50
# x is the number of heads that we see
x <- 0:numflips
# probability of heads for several different coins
p <- c(0.05, 0.15, 0.5, 0.75)
plabels <- paste("p =", p)
# calculate the probability of seeing x heads in numflips flips
# for all the coins. This probably isn't the most elegant
# way to do this, but at least it's easy to read
flips <- NULL
for(i in 1:length(p)) {
coin <- p[i]
label <- plabels[i]
tmp <- data.frame(number.of.heads=x,
probability = dbinom(x, numflips, coin),
coin.type = label)
flips <- rbind(flips, tmp)
}
# plot it
# this is the plot that leads this section
ggplot(flips, aes(x=number.of.heads, y=probability)) +
geom_point(aes(color=coin.type, shape=coin.type)) +
geom_line(aes(color=coin.type))
# example B.8 of section B.1.4
# (example B.8 of section B.1.4) : Important statistical concepts : Distributions : Binomial distribution
# Title: Working with the theoretical binomial distribution
p = 0.5 # the percentage of females in this student population
class.size <- 20 # size of a classroom
numclasses <- 100 # how many classrooms we observe
# what might a typical outcome look like?
numFemales <- rbinom(numclasses, class.size, p) # Note: 1
# the theoretical counts (not necessarily integral)
probs <- dbinom(0:class.size, class.size, p)
tcount <- numclasses*probs
# the obvious way to plot this is with histogram or geom_bar
# but this might just look better
zero <- function(x) {0} # a dummy function that returns only 0
ggplot(data.frame(number.of.girls=numFemales, dummy=1),
aes(x=number.of.girls, y=dummy)) +
# count the number of times you see x heads
stat_summary(fun.y="sum", geom="point", size=2) + # Note: 2
stat_summary(fun.ymax="sum", fun.ymin="zero", geom="linerange") +
# superimpose the theoretical number of times you see x heads
geom_line(data=data.frame(x=0:class.size, y=probs),
aes(x=x, y=tcount), linetype=2) +
scale_x_continuous(breaks=0:class.size, labels=0:class.size) +
scale_y_continuous("number of classrooms")
# Note 1:
# Because we didn’t call set.seed, we
# expect different results each time we run this line.
# Note 2:
# stat_summary is one of the ways to
# control data aggregation during plotting. In this case, we’re using it to
# place the dot and bar measured from the empirical data in with the
# theoretical density curve.
# example B.9 of section B.1.4
# (example B.9 of section B.1.4) : Important statistical concepts : Distributions : Binomial distribution
# Title: Simulating a binomial distribution
# use rbinom to simulate flipping a coin of probability p N times
p75 <- 0.75 # a very unfair coin (mostly heads)
N <- 1000 # flip it several times
flips_v1 <- rbinom(N, 1, p75)
# Another way to generate unfair flips is to use runif:
# the probability that a uniform random number from [0 1)
# is less than p is exactly p. So "less than p" is "heads".
flips_v2 <- as.numeric(runif(N) < p75)
prettyprint_flips <- function(flips) {
outcome <- ifelse(flips==1, "heads", "tails")
table(outcome)
}
prettyprint_flips(flips_v1)
# outcome
# heads tails
# 756 244
prettyprint_flips(flips_v2)
# outcome
# heads tails
# 743 257
# example B.10 of section B.1.4
# (example B.10 of section B.1.4) : Important statistical concepts : Distributions : Binomial distribution
# Title: Working with the binomial distribution
# pbinom example
nflips <- 100
nheads <- c(25, 45, 50, 60) # number of heads
# what are the probabilities of observing at most that
# number of heads on a fair coin?
left.tail <- pbinom(nheads, nflips, 0.5)
sprintf("%2.2f", left.tail)
# [1] "0.00" "0.18" "0.54" "0.98"
# the probabilities of observing more than that
# number of heads on a fair coin?
right.tail <- pbinom(nheads, nflips, 0.5, lower.tail=F)
sprintf("%2.2f", right.tail)
# [1] "1.00" "0.82" "0.46" "0.02"
# as expected:
left.tail+right.tail
# [1] 1 1 1 1
# so if you flip a fair coin 100 times,
# you are guaranteed to see more than 10 heads,
# almost guaranteed to see fewer than 60, and
# probably more than 45.
# qbinom example
nflips <- 100
# what's the 95% "central" interval of heads that you
# would expect to observe on 100 flips of a fair coin?
left.edge <- qbinom(0.025, nflips, 0.5)
right.edge <- qbinom(0.025, nflips, 0.5, lower.tail=F)
c(left.edge, right.edge)
# [1] 40 60
# so with 95% probability you should see between 40 and 60 heads
# example B.11 of section B.1.4
# (example B.11 of section B.1.4) : Important statistical concepts : Distributions : Binomial distribution
# Title: Working with the binomial cdf
# because this is a discrete probability distribution,
# pbinom and qbinom are not exact inverses of each other
# this direction works
pbinom(45, nflips, 0.5)
# [1] 0.1841008
qbinom(0.1841008, nflips, 0.5)
# [1] 45
# this direction won't be exact
qbinom(0.75, nflips, 0.5)
# [1] 53
pbinom(53, nflips, 0.5)
# [1] 0.7579408
# example B.12 of section B.2.2
# (example B.12 of section B.2.2) : Important statistical concepts : Statistical theory : A/B tests
# Title: Building simulated A/B test data
set.seed(123515)
d <- rbind( # Note: 1
data.frame(group='A',converted=rbinom(100000,size=1,p=0.05)), # Note: 2
data.frame(group='B',converted=rbinom(10000,size=1,p=0.055)) # Note: 3
)
# Note 1:
# Build a data frame to store simulated
# examples.
# Note 2:
# Add 100,000 examples from the A group
# simulating a conversion rate of 5%.
# Note 3:
# Add 10,000 examples from the B group
# simulating a conversion rate of 5.5%.
# example B.13 of section B.2.2
# (example B.13 of section B.2.2) : Important statistical concepts : Statistical theory : A/B tests
# Title: Summarizing the A/B test into a contingency table
tab <- table(d)
print(tab)
## converted
## group 0 1
## A 94979 5021
## B 9398 602
# example B.14 of section B.2.2
# (example B.14 of section B.2.2) : Important statistical concepts : Statistical theory : A/B tests
# Title: Calculating the observed A and B rates
aConversionRate <- tab['A','1']/sum(tab['A',])
print(aConversionRate)
## [1] 0.05021
bConversionRate <- tab['B','1']/sum(tab['B',])
print(bConversionRate)
## [1] 0.0602
commonRate <- sum(tab[,'1'])/sum(tab)
print(commonRate)
## [1] 0.05111818
# example B.15 of section B.2.2
# (example B.15 of section B.2.2) : Important statistical concepts : Statistical theory : A/B tests
# Title: Calculating the significance of the observed difference in rates
fisher.test(tab)
## Fisher's Exact Test for Count Data
##
## data: tab
## p-value = 2.469e-05
## alternative hypothesis: true odds ratio is not equal to 1
## 95 percent confidence interval:
## 1.108716 1.322464
## sample estimates:
## odds ratio
## 1.211706
# example B.16 of section B.2.2
# (example B.16 of section B.2.2) : Important statistical concepts : Statistical theory : A/B tests
# Title: Computing frequentist significance
print(pbinom( # Note: 1
lower.tail=F, # Note: 2
q=tab['B','1']-1, # Note: 3
size=sum(tab['B',]), # Note: 4
prob=commonRate # Note: 5
))
## [1] 3.153319e-05
# Note 1:
# Use the pbinom() call to calculate how
# likely different observed counts are.
# Note 2:
# Signal we want the probability of being
# greater than a given q.
# Note 3:
# Ask for the probability of seeing at least as many conversions as our observed B groups
# did.
# Note 4:
# Specify the total number of trials as
# equal to what we saw in our B group.
# Note 5:
# Specify the conversion probability at the
# estimated common rate.
# example B.17 of section B.2.2
# (example B.17 of section B.2.2) : Important statistical concepts : Statistical theory : A/B tests
# Title: Bayesian estimate of the posterior tail mass
print(pbeta( # Note: 1
aConversionRate, # Note: 2
shape1=commonRate+tab['B','1'], # Note: 3
shape2=(1-commonRate)+tab['B','0'])) # Note: 4
## [1] 4.731817e-06
# Note 1:
# pbeta() functionUse pbeta() to estimate how likely
# different observed conversion rates are.
# Note 2:
# Ask for the probability of seeing a
# conversion rate no larger than aConversionRate.
# Note 3:
# Estimate conversion count as prior
# commonRate plus the B observations.
# Note 4:
# Estimate nonconversion count as prior
# 1-commonRate plus the B observations.
# example B.18 of section B.2.2
# (example B.18 of section B.2.2) : Important statistical concepts : Statistical theory : A/B tests
# Title: Plotting the posterior distribution of the B group
library('ggplot2')
plt <- data.frame(x=seq(from=0.04,to=0.07,length.out=301))
plt$density <- dbeta(plt$x,
shape1=commonRate+tab['B','1'],
shape2=(1-commonRate)+tab['B','0'])
ggplot(dat=plt) +
geom_line(aes(x=x,y=density)) +
geom_vline(aes(xintercept=bConversionRate)) +
geom_vline(aes(xintercept=aConversionRate),linetype=2)
# example B.19 of section B.2.3
# (example B.19 of section B.2.3) : Important statistical concepts : Statistical theory : Power of tests
# Title: Sample size estimate
estimate <- function(targetRate,difference,errorProb) {
ceiling(-log(errorProb)*targetRate/(difference^2))
}
est <- estimate(0.045,0.004,0.05)
print(est)
## [1] 8426
# example B.20 of section B.2.3
# (example B.20 of section B.2.3) : Important statistical concepts : Statistical theory : Power of tests
# Title: Exact binomial sample size calculation
errorProb <- function(targetRate,difference,size) { # Note: 1
pbinom(ceiling((targetRate-difference)*size),
size=size,prob=targetRate)
}
print(errorProb(0.045,0.004,est)) # Note: 2
## [1] 0.04153646
binSearchNonPositive <- function(fEventuallyNegative) { # Note: 3
low <- 1
high <- low+1
while(fEventuallyNegative(high)>0) {
high <- 2*high
}
while(high>low+1) {
m <- low + (high-low) %/% 2
if(fEventuallyNegative(m)>0) {
low <- m
} else {
high <- m
}
}
high
}
actualSize <- function(targetRate,difference,errorProb) {
binSearchNonPositive(function(n) {
errorProb(targetRate,difference,n) - errorProb })
}
size <- actualSize(0.045,0.004,0.05) # Note: 4
print(size)
## [1] 7623
print(errorProb(0.045,0.004,size))
## [1] 0.04983659
# Note 1:
# Define a function that calculates the
# probability of seeing a low number of conversions, assuming the actual
# conversion rate is targetRate and the size of the experiment is size. Low is
# considered be a count that’s at least difference*size below the expected value
# targetRate*size.
# Note 2:
# Calculate probability of a bad experiment using
# estimated experiment size. The failure odds are around 4% (under the 5% we’re
# designing for), which means the estimate size was slightly high.
# Note 3:
# Define a binary search that finds a non-positive
# value of a function that’s guaranteed to be eventually negative. This search
# works around the minor non-monotonicity in errorProb() (due to rounding
# issues).
# Note 4:
# Calculate the required sample size for our B
# experiment.
# example B.21 of section B.2.4
# (example B.21 of section B.2.4) : Important statistical concepts : Statistical theory : Specialized statistical tests
# Title: Building synthetic uncorrelated income example
set.seed(235236) # Note: 1
d <- data.frame(EarnedIncome=100000*rlnorm(100),
CapitalGains=100000*rlnorm(100)) # Note: 2
print(with(d,cor(EarnedIncome,CapitalGains)))
# [1] -0.01066116 # Note: 3
# Note 1:
# Set the pseudo-random seed to a known
# value so the demonstration is repeatable.
# Note 2:
# Generate our synthetic data.
# Note 3:
# The correlation is -0.01, which is very near 0—indicating (as designed) no relation.
# example B.22 of section B.2.4
# (example B.22 of section B.2.4) : Important statistical concepts : Statistical theory : Specialized statistical tests
# Title: Calculating the (non)significance of the observed correlation
with(d,cor(EarnedIncome,CapitalGains,method='spearman'))
# [1] 0.03083108
with(d,cor.test(EarnedIncome,CapitalGains,method='spearman'))
#
# Spearman's rank correlation rho
#
#data: EarnedIncome and CapitalGains
#S = 161512, p-value = 0.7604
#alternative hypothesis: true rho is not equal to 0
#sample estimates:
# rho
#0.03083108
# example B.23 of section B.3.1
# (example B.23 of section B.3.1) : Important statistical concepts : Examples of the statistical view of data : Sampling bias
# Title: Misleading significance result from biased observations
veryHighIncome <- subset(d, EarnedIncome+CapitalGains>=500000)
print(with(veryHighIncome,cor.test(EarnedIncome,CapitalGains,
method='spearman')))
#
# Spearman's rank correlation rho
#
#data: EarnedIncome and CapitalGains
#S = 1046, p-value < 2.2e-16
#alternative hypothesis: true rho is not equal to 0
#sample estimates:
# rho
#-0.8678571
# example B.24 of section B.3.1
# (example B.24 of section B.3.1) : Important statistical concepts : Examples of the statistical view of data : Sampling bias
# Title: Plotting biased view of income and capital gains
library(ggplot2)
ggplot(data=d,aes(x=EarnedIncome,y=CapitalGains)) +
geom_point() + geom_smooth(method='lm') +
coord_cartesian(xlim=c(0,max(d)),ylim=c(0,max(d))) # Note: 1
ggplot(data=veryHighIncome,aes(x=EarnedIncome,y=CapitalGains)) +
geom_point() + geom_smooth(method='lm') +
geom_point(data=subset(d,EarnedIncome+CapitalGains<500000),
aes(x=EarnedIncome,y=CapitalGains),
shape=4,alpha=0.5,color='red') +
geom_segment(x=0,xend=500000,y=500000,yend=0,
linetype=2,alpha=0.5,color='red') +
coord_cartesian(xlim=c(0,max(d)),ylim=c(0,max(d))) # Note: 2
print(with(subset(d,EarnedIncome+CapitalGains<500000),
cor.test(EarnedIncome,CapitalGains,method='spearman'))) # Note: 3
#
# Spearman's rank correlation rho
#
#data: EarnedIncome and CapitalGains
#S = 107664, p-value = 0.6357
#alternative hypothesis: true rho is not equal to 0
#sample estimates:
# rho
#-0.05202267
# Note 1:
# Plot all of the income data with linear
# trend line (and uncertainty band).
# Note 2:
# Plot the very high income data and linear
# trend line (also include cut-off and portrayal of suppressed data).
# Note 3:
# Compute correlation of suppressed
# data.
# example B.25 of section B.3.2
# (example B.25 of section B.3.2) : Important statistical concepts : Examples of the statistical view of data : Omitted variable bias
# Title: Summarizing our synthetic biological data
load('synth.RData')
print(summary(s))
## week Caco2A2BPapp FractionHumanAbsorption
## Min. : 1.00 Min. :6.994e-08 Min. :0.09347
## 1st Qu.: 25.75 1st Qu.:7.312e-07 1st Qu.:0.50343
## Median : 50.50 Median :1.378e-05 Median :0.86937
## Mean : 50.50 Mean :2.006e-05 Mean :0.71492
## 3rd Qu.: 75.25 3rd Qu.:4.238e-05 3rd Qu.:0.93908
## Max. :100.00 Max. :6.062e-05 Max. :0.99170
head(s)
## week Caco2A2BPapp FractionHumanAbsorption
## 1 1 6.061924e-05 0.11568186
## 2 2 6.061924e-05 0.11732401
## 3 3 6.061924e-05 0.09347046
## 4 4 6.061924e-05 0.12893540
## 5 5 5.461941e-05 0.19021858
## 6 6 5.370623e-05 0.14892154
# View(s) # Note: 1
# Note 1:
# Display a date in spreadsheet like
# window. View is one of the commands that has a much better implementation in
# RStudio than in basic R.
# example B.26 of section B.3.2
# (example B.26 of section B.3.2) : Important statistical concepts : Examples of the statistical view of data : Omitted variable bias
# Title: Building data that improves over time
set.seed(2535251)
s <- data.frame(week=1:100)
s$Caco2A2BPapp <- sort(sample(d$Caco2A2BPapp,100,replace=T),
decreasing=T)
sigmoid <- function(x) {1/(1+exp(-x))}
s$FractionHumanAbsorption <- # Note: 1
sigmoid(
7.5 + 0.5*log(s$Caco2A2BPapp) + # Note: 2
s$week/10 - mean(s$week/10) + # Note: 3
rnorm(100)/3 # Note: 4
)
write.table(s,'synth.csv',sep=',',
quote=F,row.names=F)
# Note 1:
# Build synthetic examples.
# Note 2:
# Add in Caco2 to absorption relation learned from original dataset. Note the relation is
# positive: better Caco2 always drives better absorption in our
# synthetic dataset. We’re log transforming Caco2, as it has over 3
# decades of range.
# Note 3:
# Add in a mean-0 term that depends on time to simulate the effects of improvements as the
# project moves forward.
# Note 4:
# Add in a mean-0 noise term.
# example B.27 of section B.3.2
# (example B.27 of section B.3.2) : Important statistical concepts : Examples of the statistical view of data : Omitted variable bias
# Title: A bad model (due to omitted variable bias)
print(summary(glm(data=s,
FractionHumanAbsorption~log(Caco2A2BPapp),
family=binomial(link='logit'))))
## Warning: non-integer #successes in a binomial glm!
##
## Call:
## glm(formula = FractionHumanAbsorption ~ log(Caco2A2BPapp),
## family = binomial(link = "logit"),
## data = s)
##
## Deviance Residuals:
## Min 1Q Median 3Q Max
## -0.609 -0.246 -0.118 0.202 0.557
##
## Coefficients:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) -10.003 2.752 -3.64 0.00028 ***
## log(Caco2A2BPapp) -0.969 0.257 -3.77 0.00016 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## (Dispersion parameter for binomial family taken to be 1)
##
## Null deviance: 43.7821 on 99 degrees of freedom
## Residual deviance: 9.4621 on 98 degrees of freedom
## AIC: 64.7
##
## Number of Fisher Scoring iterations: 6
# example B.28 of section B.3.2
# (example B.28 of section B.3.2) : Important statistical concepts : Examples of the statistical view of data : Omitted variable bias
# Title: A better model
print(summary(glm(data=s,
FractionHumanAbsorption~week+log(Caco2A2BPapp),
family=binomial(link='logit'))))
## Warning: non-integer #successes in a binomial glm!
##
## Call:
## glm(formula = FractionHumanAbsorption ~ week + log(Caco2A2BPapp),
## family = binomial(link = "logit"), data = s)
##
## Deviance Residuals:
## Min 1Q Median 3Q Max
## -0.3474 -0.0568 -0.0010 0.0709 0.3038
##
## Coefficients:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 3.1413 4.6837 0.67 0.5024
## week 0.1033 0.0386 2.68 0.0074 **
## log(Caco2A2BPapp) 0.5689 0.5419 1.05 0.2938
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## (Dispersion parameter for binomial family taken to be 1)
##
## Null deviance: 43.7821 on 99 degrees of freedom
## Residual deviance: 1.2595 on 97 degrees of freedom
## AIC: 47.82
##
## Number of Fisher Scoring iterations: 6
|
##' Louvain on an igraph object.
##'
##' This functions depends on Python and louvain being installed. Make sure igraph,
##' louvain and numpy are installed. For example with something like:
##' 'pip install python-igraph louvain numpy'.
##' @title Python wrapper to run Louvain
##' @param graph a igraph object
##' @param gamma a vector of gamma. Default 1.
##' @param nreps the number of repetition for each gamma, Default 1.
##' @param nb_cores the number of processors to use. Default is 1.
##' @return a list with
##' \item{comm}{a data.frame with the community for each gamma}
##' \item{gamma}{the input gammas corresponding to the columns of comm}
##' @author Jean Monlong
##' @export
run_louvain <- function(graph, gamma=1, nreps=1, nb_cores=1){
temp.pref = paste0('tempforlouvain', round(stats::runif(1, 0, 1e4)))
## Write input data
igraph::write_graph(graph, file=paste0(temp.pref, '.pajek'), format='pajek')
## Run each gamma in a a different core
res.l = parallel::mclapply(gamma, function(gamma){
gammas = rep(gamma, nreps)
## Write Python script
write(c('import igraph',
'import louvain',
'import numpy',
'import random',
'g = igraph.Graph()',
paste0('g = g.Read_Pajek("', temp.pref, '.pajek")'),
'ord = range(g.vcount())',
paste0('gammas = [', paste(gammas, collapse=','), ']'),
'part = numpy.zeros([g.vcount(), len(gammas)], dtype=int)',
'for run in range(len(gammas)):',
' random.shuffle(ord)',
' g2 = g.permute_vertices(ord)',
' partition = louvain.find_partition(g2, louvain.RBConfigurationVertexPartition, resolution_parameter=gammas[run], weights="weight")',
' com = 1',
' for idx in partition:',
' part[idx, run] = com',
' com += 1',
' part[:, run] = part[ord, run]',
paste0('numpy.savetxt("', temp.pref, '_', gamma, '.csv", part, delimiter=",", fmt="%i")')),
file=paste0(temp.pref, '_', gamma, '.py'))
## Run command
runcmd = tryCatch({
system2('python', paste0(temp.pref, '_', gamma, '.py'))
}, error = function(err){
stop('Error when running Louvain in Python. Make sure igraph, louvain and numpy are installed. For example something like:\n',
'pip install python-igraph louvain numpy\n\n',
'The python error was:\n',
err)
})
## Load results and cleanup
comm.df = utils::read.csv(paste0(temp.pref, '_', gamma, '.csv'), header=FALSE,
as.is=TRUE)
file.remove(paste0(temp.pref, '_', gamma, c('.csv', '.py')))
if(nrow(comm.df) != length(igraph::V(graph))){
comm.df = matrix(NA, length(igraph::V(graph)), length(gammas))
}
return(list(comm.df=as.matrix(comm.df), gammas=gammas))
}, mc.cores=nb_cores)
file.remove(paste0(temp.pref, '.pajek'))
res = list(comm=do.call(cbind, lapply(res.l, function(e) e$comm.df)),
gamma=do.call(c, lapply(res.l, function(e) e$gammas)))
return(res)
}
| /R/run_louvain.R | permissive | jmonlong/scCNAutils | R | false | false | 3,132 | r | ##' Louvain on an igraph object.
##'
##' This functions depends on Python and louvain being installed. Make sure igraph,
##' louvain and numpy are installed. For example with something like:
##' 'pip install python-igraph louvain numpy'.
##' @title Python wrapper to run Louvain
##' @param graph a igraph object
##' @param gamma a vector of gamma. Default 1.
##' @param nreps the number of repetition for each gamma, Default 1.
##' @param nb_cores the number of processors to use. Default is 1.
##' @return a list with
##' \item{comm}{a data.frame with the community for each gamma}
##' \item{gamma}{the input gammas corresponding to the columns of comm}
##' @author Jean Monlong
##' @export
run_louvain <- function(graph, gamma=1, nreps=1, nb_cores=1){
temp.pref = paste0('tempforlouvain', round(stats::runif(1, 0, 1e4)))
## Write input data
igraph::write_graph(graph, file=paste0(temp.pref, '.pajek'), format='pajek')
## Run each gamma in a a different core
res.l = parallel::mclapply(gamma, function(gamma){
gammas = rep(gamma, nreps)
## Write Python script
write(c('import igraph',
'import louvain',
'import numpy',
'import random',
'g = igraph.Graph()',
paste0('g = g.Read_Pajek("', temp.pref, '.pajek")'),
'ord = range(g.vcount())',
paste0('gammas = [', paste(gammas, collapse=','), ']'),
'part = numpy.zeros([g.vcount(), len(gammas)], dtype=int)',
'for run in range(len(gammas)):',
' random.shuffle(ord)',
' g2 = g.permute_vertices(ord)',
' partition = louvain.find_partition(g2, louvain.RBConfigurationVertexPartition, resolution_parameter=gammas[run], weights="weight")',
' com = 1',
' for idx in partition:',
' part[idx, run] = com',
' com += 1',
' part[:, run] = part[ord, run]',
paste0('numpy.savetxt("', temp.pref, '_', gamma, '.csv", part, delimiter=",", fmt="%i")')),
file=paste0(temp.pref, '_', gamma, '.py'))
## Run command
runcmd = tryCatch({
system2('python', paste0(temp.pref, '_', gamma, '.py'))
}, error = function(err){
stop('Error when running Louvain in Python. Make sure igraph, louvain and numpy are installed. For example something like:\n',
'pip install python-igraph louvain numpy\n\n',
'The python error was:\n',
err)
})
## Load results and cleanup
comm.df = utils::read.csv(paste0(temp.pref, '_', gamma, '.csv'), header=FALSE,
as.is=TRUE)
file.remove(paste0(temp.pref, '_', gamma, c('.csv', '.py')))
if(nrow(comm.df) != length(igraph::V(graph))){
comm.df = matrix(NA, length(igraph::V(graph)), length(gammas))
}
return(list(comm.df=as.matrix(comm.df), gammas=gammas))
}, mc.cores=nb_cores)
file.remove(paste0(temp.pref, '.pajek'))
res = list(comm=do.call(cbind, lapply(res.l, function(e) e$comm.df)),
gamma=do.call(c, lapply(res.l, function(e) e$gammas)))
return(res)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calcGjoint.R
\name{calcGjoint}
\alias{calcGjoint}
\title{calcGjoint}
\usage{
calcGjoint(freq, nU = 1, fst = 0, refK = NULL, refR = NULL,
ibd = c(1, 0, 0), sortComb = FALSE)
}
\arguments{
\item{freq}{A vector of allele frequencies for a given population.}
\item{nU}{Number of unknowns (where the first is a related)}
\item{fst}{Assumed theta/fst-correction}
\item{refK}{contains a vector of alleles for the known typed reference profiles (a,b,c,d...)}
\item{refR}{contains a vector of alleles for a related reference profile (a,b)}
\item{ibd}{the identical by decent coefficients of the relationship denotation}
\item{sortComb}{boolean of whether only the outcome G1,G2>=G3>=...>=GnU should be calculated (the Gprobs are symmetrical for k=2,..,U)}
}
\value{
Glist A list with genotypes and genotype probabilities
}
\description{
getGlist Returns a list of joint genotypes with corresponding joing probabilities for unknowns contributors for a given marker.
}
\details{
The function returns the list of all possible genotypes, with corresponding probabilities. The allele-names in popFreq needs to be numbers.
}
\examples{
freq = rgamma(8,1,1)
freq = freq/sum(freq)
names(freq) <- 1:length(freq)
system.time({ foo = calcGjoint(freq,nU=3,fst=0.1,refK=c("2","3","1","1"),refR=c("2","3"),ibd=c(1/4,1/2,1/4),sortComb=FALSE) })
system.time({ foo = calcGjoint(freq,nU=3,fst=0.1,refK=c("2","3","1","1"),refR=c("2","3"),ibd=c(1/4,1/2,1/4),sortComb=TRUE) })
}
\author{
Oyvind Bleka
}
| /euroformix_2.3.0/man/calcGjoint.Rd | no_license | oyvble/euroformixArchive | R | false | true | 1,560 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calcGjoint.R
\name{calcGjoint}
\alias{calcGjoint}
\title{calcGjoint}
\usage{
calcGjoint(freq, nU = 1, fst = 0, refK = NULL, refR = NULL,
ibd = c(1, 0, 0), sortComb = FALSE)
}
\arguments{
\item{freq}{A vector of allele frequencies for a given population.}
\item{nU}{Number of unknowns (where the first is a related)}
\item{fst}{Assumed theta/fst-correction}
\item{refK}{contains a vector of alleles for the known typed reference profiles (a,b,c,d...)}
\item{refR}{contains a vector of alleles for a related reference profile (a,b)}
\item{ibd}{the identical by decent coefficients of the relationship denotation}
\item{sortComb}{boolean of whether only the outcome G1,G2>=G3>=...>=GnU should be calculated (the Gprobs are symmetrical for k=2,..,U)}
}
\value{
Glist A list with genotypes and genotype probabilities
}
\description{
getGlist Returns a list of joint genotypes with corresponding joing probabilities for unknowns contributors for a given marker.
}
\details{
The function returns the list of all possible genotypes, with corresponding probabilities. The allele-names in popFreq needs to be numbers.
}
\examples{
freq = rgamma(8,1,1)
freq = freq/sum(freq)
names(freq) <- 1:length(freq)
system.time({ foo = calcGjoint(freq,nU=3,fst=0.1,refK=c("2","3","1","1"),refR=c("2","3"),ibd=c(1/4,1/2,1/4),sortComb=FALSE) })
system.time({ foo = calcGjoint(freq,nU=3,fst=0.1,refK=c("2","3","1","1"),refR=c("2","3"),ibd=c(1/4,1/2,1/4),sortComb=TRUE) })
}
\author{
Oyvind Bleka
}
|
nn.indit<-function(dendat)
{
n<-dim(dendat)[1]
maxk<-n-1
indmat<-matrix(0,n,maxk)
eta<-dist(dendat)
#i<j eta[n*(i-1) - i*(i-1)/2 + j-i]
for (i in 2:(n-1)){
i1<-seq(1,i-1)
j1<-i
irow1<-eta[n*(i1-1) - i1*(i1-1)/2 + j1-i1]
j2<-seq(i+1,n)
irow2<-eta[n*(i-1) - i*(i-1)/2 + j2-i]
irow<-c(irow1,irow2)
or<-order(irow)
poisi<-c(seq(1,i-1),seq(i+1,n))
indmat[i,]<-poisi[or]
}
i<-1
j<-seq(i+1,n)
irow<-eta[n*(i-1) - i*(i-1)/2 + j-i]
or<-order(irow)
poisi<-seq(2,n)
indmat[i,]<-poisi[or]
i<-n
i1<-seq(1,n-1)
j<-i
irow<-eta[n*(i1-1) - i1*(i1-1)/2 + j-i1]
or<-order(irow)
poisi<-seq(1,n-1)
indmat[i,]<-poisi[or]
return(indmat)
}
| /R/nn.indit.R | no_license | cran/denpro | R | false | false | 648 | r | nn.indit<-function(dendat)
{
n<-dim(dendat)[1]
maxk<-n-1
indmat<-matrix(0,n,maxk)
eta<-dist(dendat)
#i<j eta[n*(i-1) - i*(i-1)/2 + j-i]
for (i in 2:(n-1)){
i1<-seq(1,i-1)
j1<-i
irow1<-eta[n*(i1-1) - i1*(i1-1)/2 + j1-i1]
j2<-seq(i+1,n)
irow2<-eta[n*(i-1) - i*(i-1)/2 + j2-i]
irow<-c(irow1,irow2)
or<-order(irow)
poisi<-c(seq(1,i-1),seq(i+1,n))
indmat[i,]<-poisi[or]
}
i<-1
j<-seq(i+1,n)
irow<-eta[n*(i-1) - i*(i-1)/2 + j-i]
or<-order(irow)
poisi<-seq(2,n)
indmat[i,]<-poisi[or]
i<-n
i1<-seq(1,n-1)
j<-i
irow<-eta[n*(i1-1) - i1*(i1-1)/2 + j-i1]
or<-order(irow)
poisi<-seq(1,n-1)
indmat[i,]<-poisi[or]
return(indmat)
}
|
# https://github.com/stan-dev/rstan/wiki/Installing-RStan-on-Linux
dotR <- file.path(Sys.getenv("HOME"), ".R")
if (!file.exists(dotR)) dir.create(dotR)
M <- file.path(dotR, "Makevars")
if (!file.exists(M)) file.create(M)
cat("\nCXX14FLAGS=-O3 -march=native -mtune=native -fPIC","CXX14=clang++",file = M, sep = "\n", append = TRUE)
install.packages("rstan", type = "source") | /install_stan.r | no_license | himaeda1332/docker-ml-rstan | R | false | false | 378 | r |
# https://github.com/stan-dev/rstan/wiki/Installing-RStan-on-Linux
dotR <- file.path(Sys.getenv("HOME"), ".R")
if (!file.exists(dotR)) dir.create(dotR)
M <- file.path(dotR, "Makevars")
if (!file.exists(M)) file.create(M)
cat("\nCXX14FLAGS=-O3 -march=native -mtune=native -fPIC","CXX14=clang++",file = M, sep = "\n", append = TRUE)
install.packages("rstan", type = "source") |
# _
# platform x86_64-w64-mingw32
# arch x86_64
# os mingw32
# crt ucrt
# system x86_64, mingw32
# status
# major 4
# minor 2.1
# year 2022
# month 06
# day 23
# svn rev 82513
# language R
# version.string R version 4.2.1 (2022-06-23 ucrt)
# nickname Funny-Looking Kid
# Mike Rieger, Update 04/06/2023
# Figure 3C_plot: Generate plot for Fig 3C (POff in smeared lawns or predator)
rm(list=ls()) # for console work
graphics.off() # for console work
wd="~/../Dropbox/chalasanilabsync/mrieger/Manuscripts/PribadiEtAl-2022/FINALv2/Figures/FIG 3/"
prefix = "Figure3B-C"
mdl = "pOff"
plotjitter=0.08
setwd(wd)
source("../alwaysLoad.R") # base set of custom functions for reports
load(paste0(prefix,".RData"))
# reset prefix for plots:
prefix = "Figure3C"
# atab: anova tab
# d: raw data
# dd: processed data <- data points in scatter.
# exemplarData: <- means & confidence estimates
# ph: posthoc comparisons
# All we really need is: dd for data points, and exemplardata
cls=c("#5CB3E5","#818181","#D36228")
names(cls)=levels(dd$Condition)
# This column is called Interaction for robustness with other Figures, but
# this particular dataset is a single factor term.
dd$x = as.numeric(interaction(dd$Condition)) # just for robustness across code, other figures have interaction term labels
dd$cols=cls[dd$x]
exemplarData = exemplarData[[mdl]] # overwrite with just the table we need
exemplarData$x = 1:length(levels(interaction(dd$Condition))) #"interaction" will already be the rownames
xlims=c(-1,1)+range(dd$x)
xticks=exemplarData$x
xlabs=levels(interaction(dd$Condition))
ylims=c(0,100)
yticks=seq(0,100,by=20)
#Print as EPS & as PDF, one is always a little better than the other
setEPS()
postscript(paste0(prefix,".eps"))
plot.new()
plot.window(xlim=xlims,ylim=ylims)
points(dd$x+runif(nrow(dd),min=-plotjitter,max=plotjitter),
dd$pct.off,col=dd$cols,pch=19)
axis(1,at=xticks,labels=xlabs,cex.axis=0.6)
axis(2,at=yticks,las=2,cex.axis=0.6)
title(ylab=mdl,main=prefix)
for(i in 1:nrow(exemplarData)){
# Estimate point
points(exemplarData[i,"x"]+2*plotjitter,
exemplarData[i,"Prob"]*100,pch=19)
# Up and Down CI Line
lines(rep(exemplarData[i,"x"],2)+2*plotjitter,
c(exemplarData[i,"Prob.lwr"],exemplarData[i,"Prob.upr"])*100,lwd=2)
lines(rep(exemplarData[i,"x"],2)+2*plotjitter+c(-1,1)*plotjitter,
rep(exemplarData[i,"Prob.lwr"],2)*100,lwd=2)
lines(rep(exemplarData[i,"x"],2)+2*plotjitter+c(-1,1)*plotjitter,
rep(exemplarData[i,"Prob.upr"],2)*100,lwd=2)
}
dev.off()
pdf(paste0(prefix,".pdf"))
plot.new()
plot.window(xlim=xlims,ylim=ylims)
points(dd$x+runif(nrow(dd),min=-plotjitter,max=plotjitter),
dd$pct.off,col=dd$cols,pch=19)
axis(1,at=xticks,labels=xlabs,cex.axis=0.6)
axis(2,at=yticks,las=2,cex.axis=0.6)
title(ylab=mdl,main=prefix)
for(i in 1:nrow(exemplarData)){
# Estimate point
points(exemplarData[i,"x"]+2*plotjitter,
exemplarData[i,"Prob"]*100,pch=19)
# Up and Down CI Line
lines(rep(exemplarData[i,"x"],2)+2*plotjitter,
c(exemplarData[i,"Prob.lwr"],exemplarData[i,"Prob.upr"])*100,lwd=2)
lines(rep(exemplarData[i,"x"],2)+2*plotjitter+c(-1,1)*plotjitter,
rep(exemplarData[i,"Prob.lwr"],2)*100,lwd=2)
lines(rep(exemplarData[i,"x"],2)+2*plotjitter+c(-1,1)*plotjitter,
rep(exemplarData[i,"Prob.upr"],2)*100,lwd=2)
}
dev.off() | /FIG 3/Figure3C_plot.R | no_license | shreklab/PribadiEtAl2023 | R | false | false | 3,866 | r | # _
# platform x86_64-w64-mingw32
# arch x86_64
# os mingw32
# crt ucrt
# system x86_64, mingw32
# status
# major 4
# minor 2.1
# year 2022
# month 06
# day 23
# svn rev 82513
# language R
# version.string R version 4.2.1 (2022-06-23 ucrt)
# nickname Funny-Looking Kid
# Mike Rieger, Update 04/06/2023
# Figure 3C_plot: Generate plot for Fig 3C (POff in smeared lawns or predator)
rm(list=ls()) # for console work
graphics.off() # for console work
wd="~/../Dropbox/chalasanilabsync/mrieger/Manuscripts/PribadiEtAl-2022/FINALv2/Figures/FIG 3/"
prefix = "Figure3B-C"
mdl = "pOff"
plotjitter=0.08
setwd(wd)
source("../alwaysLoad.R") # base set of custom functions for reports
load(paste0(prefix,".RData"))
# reset prefix for plots:
prefix = "Figure3C"
# atab: anova tab
# d: raw data
# dd: processed data <- data points in scatter.
# exemplarData: <- means & confidence estimates
# ph: posthoc comparisons
# All we really need is: dd for data points, and exemplardata
cls=c("#5CB3E5","#818181","#D36228")
names(cls)=levels(dd$Condition)
# This column is called Interaction for robustness with other Figures, but
# this particular dataset is a single factor term.
dd$x = as.numeric(interaction(dd$Condition)) # just for robustness across code, other figures have interaction term labels
dd$cols=cls[dd$x]
exemplarData = exemplarData[[mdl]] # overwrite with just the table we need
exemplarData$x = 1:length(levels(interaction(dd$Condition))) #"interaction" will already be the rownames
xlims=c(-1,1)+range(dd$x)
xticks=exemplarData$x
xlabs=levels(interaction(dd$Condition))
ylims=c(0,100)
yticks=seq(0,100,by=20)
#Print as EPS & as PDF, one is always a little better than the other
setEPS()
postscript(paste0(prefix,".eps"))
plot.new()
plot.window(xlim=xlims,ylim=ylims)
points(dd$x+runif(nrow(dd),min=-plotjitter,max=plotjitter),
dd$pct.off,col=dd$cols,pch=19)
axis(1,at=xticks,labels=xlabs,cex.axis=0.6)
axis(2,at=yticks,las=2,cex.axis=0.6)
title(ylab=mdl,main=prefix)
for(i in 1:nrow(exemplarData)){
# Estimate point
points(exemplarData[i,"x"]+2*plotjitter,
exemplarData[i,"Prob"]*100,pch=19)
# Up and Down CI Line
lines(rep(exemplarData[i,"x"],2)+2*plotjitter,
c(exemplarData[i,"Prob.lwr"],exemplarData[i,"Prob.upr"])*100,lwd=2)
lines(rep(exemplarData[i,"x"],2)+2*plotjitter+c(-1,1)*plotjitter,
rep(exemplarData[i,"Prob.lwr"],2)*100,lwd=2)
lines(rep(exemplarData[i,"x"],2)+2*plotjitter+c(-1,1)*plotjitter,
rep(exemplarData[i,"Prob.upr"],2)*100,lwd=2)
}
dev.off()
pdf(paste0(prefix,".pdf"))
plot.new()
plot.window(xlim=xlims,ylim=ylims)
points(dd$x+runif(nrow(dd),min=-plotjitter,max=plotjitter),
dd$pct.off,col=dd$cols,pch=19)
axis(1,at=xticks,labels=xlabs,cex.axis=0.6)
axis(2,at=yticks,las=2,cex.axis=0.6)
title(ylab=mdl,main=prefix)
for(i in 1:nrow(exemplarData)){
# Estimate point
points(exemplarData[i,"x"]+2*plotjitter,
exemplarData[i,"Prob"]*100,pch=19)
# Up and Down CI Line
lines(rep(exemplarData[i,"x"],2)+2*plotjitter,
c(exemplarData[i,"Prob.lwr"],exemplarData[i,"Prob.upr"])*100,lwd=2)
lines(rep(exemplarData[i,"x"],2)+2*plotjitter+c(-1,1)*plotjitter,
rep(exemplarData[i,"Prob.lwr"],2)*100,lwd=2)
lines(rep(exemplarData[i,"x"],2)+2*plotjitter+c(-1,1)*plotjitter,
rep(exemplarData[i,"Prob.upr"],2)*100,lwd=2)
}
dev.off() |
## These are functions able to create a special matrix object that can be inverted
## When the inverse is requested, it searchs for the inverse stored on cache
## if the matrix has not changed. If not, the inverse is computed.
## Try this example to know how it works, the matrix generated is psuedo-random
## My_Matrix <- makeCacheMatrix(matrix(rexp(9), 3)*100)
## My_Inverse <- cacheSolve(A)
## This function is able to create a special matrix object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
#Defines x as a matrix and saves it to cache
# Creates MyInverse (a null variable for now, will have a value later)
MyInverse <- NULL
# Function to be called: Sets and stores the matrix for the variable
Set <- function(y) {
x <<- y
MyInverse <<- NULL
}
# Function to be called: Gets the matrix from the variable
Get <- function() x
# Function to be called: Allows you to manually set the inverse matrix for the variable
SetInverse <- function(solve) MyInverse <<- solve
# Function to be called: Get the inverse matrix from the variable
GetInverse <- function() MyInverse
# Create a list containing the matrix and its inverse (solve function)
# If this variable these are assigned to is "A", then you can call A$Get, A$Set(matrix_name_here), etc
list(Set = Set, Get = Get, SetInverse = SetInverse, GetInverse = GetInverse)
}
## This function computes the inverse of the supplied, cached matrix returned by function makeCacheMatrix above.
## It evaluates whether the matrix not changed or not, then the function cacheSolve retrieves the inverse from
## the cache or calculates it and stores it in the variable, whicheveris required.
cacheSolve <- function(x, ...) {
# Get the inverse from the input x
MyInverse <- x$GetInverse()
# If the matrix has not been changed and the inverse is calculated, get the inverse from the cache
if(!is.null(MyInverse)) {
message("Getting cached data")
# Return the inverse
return(MyInverse)
}
# If the inverse has not been calculated, get the matrix
MyData <- x$Get()
# Solve the inverse
MyInverse <- solve(MyData, ...)
# Cache the inverse in the variable
x$SetInverse(MyInverse)
# Return the inverse
MyInverse
} | /cachematrix.R | no_license | school1/ProgrammingAssignment2 | R | false | false | 2,302 | r | ## These are functions able to create a special matrix object that can be inverted
## When the inverse is requested, it searchs for the inverse stored on cache
## if the matrix has not changed. If not, the inverse is computed.
## Try this example to know how it works, the matrix generated is psuedo-random
## My_Matrix <- makeCacheMatrix(matrix(rexp(9), 3)*100)
## My_Inverse <- cacheSolve(A)
## This function is able to create a special matrix object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
#Defines x as a matrix and saves it to cache
# Creates MyInverse (a null variable for now, will have a value later)
MyInverse <- NULL
# Function to be called: Sets and stores the matrix for the variable
Set <- function(y) {
x <<- y
MyInverse <<- NULL
}
# Function to be called: Gets the matrix from the variable
Get <- function() x
# Function to be called: Allows you to manually set the inverse matrix for the variable
SetInverse <- function(solve) MyInverse <<- solve
# Function to be called: Get the inverse matrix from the variable
GetInverse <- function() MyInverse
# Create a list containing the matrix and its inverse (solve function)
# If this variable these are assigned to is "A", then you can call A$Get, A$Set(matrix_name_here), etc
list(Set = Set, Get = Get, SetInverse = SetInverse, GetInverse = GetInverse)
}
## This function computes the inverse of the supplied, cached matrix returned by function makeCacheMatrix above.
## It evaluates whether the matrix not changed or not, then the function cacheSolve retrieves the inverse from
## the cache or calculates it and stores it in the variable, whicheveris required.
cacheSolve <- function(x, ...) {
# Get the inverse from the input x
MyInverse <- x$GetInverse()
# If the matrix has not been changed and the inverse is calculated, get the inverse from the cache
if(!is.null(MyInverse)) {
message("Getting cached data")
# Return the inverse
return(MyInverse)
}
# If the inverse has not been calculated, get the matrix
MyData <- x$Get()
# Solve the inverse
MyInverse <- solve(MyData, ...)
# Cache the inverse in the variable
x$SetInverse(MyInverse)
# Return the inverse
MyInverse
} |
#' Desescalar coeficientes previamente escalados y centrados.
#'
#' Permite desescalar y descentrar los coeficientes de un modelo ajustado con las variables
#' escaladas. Esta funci?n tambien permite graficar las variables del eje X en su escala original.
#' @param coef Coeficientes del modelo.
#' @param scaled_covariate variable escalada que contiene la media y la desviaci?n estandar como atributos.
#'
#' @return coeficientes o medias reconvertidas en su escala original
#' @export
#'
#' @examples
#' #unscale.coef(-0.3440, using_scale)
#' #unscale.coef(coefs, df_scaled)
#' #
#' #
#' #Si lo que se quiere es graficar los efectos de un modelo en el que se
#' #usaron variables escaladas y se desea graficar en su escala original:
#' #Primero se escala la variable y se guarda en un objeto con
#' #los atributos de la media y SD
#' #alt.sc<- scale(DataR$Altitud, center = TRUE, scale = TRUE)
#' #library(ggeffects)
#' #dat<- ggeffect(m, c("Altitud", "Monta?a"))
#' #luego se retransforman los valores predichos por el modelo a su escala
#' #original usando la variable escalada.
#' #dat$x <- unscale.coef(dat$x, alt.sc)
#' #plot(dat)
#' @encoding UTF-8
unscale.coef <- function(coef, scaled_covariate){
# collect mean and standard deviation from scaled covariate
mean_sd <- unlist(attributes(scaled_covariate)[-1])
# reverse the z-transformation
answer <- (coef * mean_sd[2]) + mean_sd[1]
# this value will have a name, remove it
names(answer) <- NULL
# return unscaled coef
return(answer)
}
| /man/R/unscale.coef.R | permissive | mariosandovalmx/tlamatini | R | false | false | 1,517 | r | #' Desescalar coeficientes previamente escalados y centrados.
#'
#' Permite desescalar y descentrar los coeficientes de un modelo ajustado con las variables
#' escaladas. Esta funci?n tambien permite graficar las variables del eje X en su escala original.
#' @param coef Coeficientes del modelo.
#' @param scaled_covariate variable escalada que contiene la media y la desviaci?n estandar como atributos.
#'
#' @return coeficientes o medias reconvertidas en su escala original
#' @export
#'
#' @examples
#' #unscale.coef(-0.3440, using_scale)
#' #unscale.coef(coefs, df_scaled)
#' #
#' #
#' #Si lo que se quiere es graficar los efectos de un modelo en el que se
#' #usaron variables escaladas y se desea graficar en su escala original:
#' #Primero se escala la variable y se guarda en un objeto con
#' #los atributos de la media y SD
#' #alt.sc<- scale(DataR$Altitud, center = TRUE, scale = TRUE)
#' #library(ggeffects)
#' #dat<- ggeffect(m, c("Altitud", "Monta?a"))
#' #luego se retransforman los valores predichos por el modelo a su escala
#' #original usando la variable escalada.
#' #dat$x <- unscale.coef(dat$x, alt.sc)
#' #plot(dat)
#' @encoding UTF-8
unscale.coef <- function(coef, scaled_covariate){
# collect mean and standard deviation from scaled covariate
mean_sd <- unlist(attributes(scaled_covariate)[-1])
# reverse the z-transformation
answer <- (coef * mean_sd[2]) + mean_sd[1]
# this value will have a name, remove it
names(answer) <- NULL
# return unscaled coef
return(answer)
}
|
\name{XLView}
\alias{XLView}
\alias{XLKill}
\alias{ToXL}
\alias{ToXL.data.frame}
\alias{ToXL.matrix}
\alias{ToXL.default}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Use MS-Excel as Viewer for a Data.Frame
%% ~~function to do ... ~~
}
\description{
\code{XLView} can be used to view and edit a data.frame directly in MS-Excel, resp. to create a new data.frame in MS-Excel.
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
XLView(x, col.names = TRUE, row.names = FALSE, na = "", preserveStrings = FALSE)
ToXL(x, at, ..., xl=DescToolsOptions("lastXL"))
\method{ToXL}{data.frame}(x, at, ..., xl=DescToolsOptions("lastXL"))
\method{ToXL}{matrix}(x, at, ..., xl=DescToolsOptions("lastXL"))
\method{ToXL}{default}(x, at, byrow = FALSE, ..., xl=DescToolsOptions("lastXL"))
XLKill()
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{ is a data.frame to be transferred to MS-Excel. If data is missing a new file will be created.
%% ~~Describe \code{data} here~~
}
\item{row.names}{ either a logical value indicating whether the row names of x are to be written along with x, or
a character vector of row names to be written.}
\item{col.names}{either a logical value indicating whether the column names of x are to be written
along with x, or a character vector of column names to be written.
See the section on 'CSV files' \code{\link{write.table}} for the meaning of \code{col.names = NA}. }
\item{na}{the string to use for missing values in the data.}
\item{preserveStrings}{logical, will preserve strings from being converted to numerics when imported in MS-Excel. See details. Default is \code{FALSE}.}
\item{at}{can be a range adress as character (e.g. \code{"A1"}), a vector of 2 integers (e.g \code{c(1,1)}) or a cell object as it is returned by \code{xl$Cells(1,1)}, denominating the left upper cell, where the data.frame will be placed in the MS-Excel sheet.}
\item{byrow}{logical, defines if the vector should be inserted by row or by column (default).}
\item{xl}{the pointer to a MS-Excel instance. An new instance can be created with \code{GetNewXL()}, returning the appropriate handle. A handle to an already running instance is returned by \code{GetCurrXL()}.
Default is the last created pointer stored in \code{DescToolsOptions("lastXL")}.}
\item{\dots}{further arguments are not used.}
}
\details{
The data.frame will be exported in CSV format and then imported in MS-Excel. When importing data, MS-Excel will potentially change characters to numeric values. If this seems undesirable (maybe we're loosing leading zeros) then you should enclose the text in quotes and preset a =.
x <- \code{gettextf('="\%s"', x)} would do the trick. \cr\cr
Take care: Changes to the data made in MS-Excel will NOT automatically be updated in the original data.frame.
The user will have to read the csv-file into R again.
See examples how to get this done.\cr
\code{ToXL()} is used to export data frames or vectors directly to MS-Excel, without export the data to a csv-file and import it on the XL side. So it it possible to export several data.frames into one Workbook and edit the tables after ones needs.
\code{XLKill} will kill a running XL instance (which might be invisible). Background is the fact, that the simple XL$quit() command
would not terminate a running XL task, but only set it invisible (observe the TaskManager). This ghost version may sometimes confuse XLView and hinder to create a new instance. In such cases you have to do the garbage collection...
}
\value{the name/path of the temporary file edited in MS-Excel.
}
\author{
Andri Signorell <andri@signorell.net>, \code{ToXL()} is based on code of Duncan Temple Lang <duncan@r-project.org>
}
\note{The function works only in Windows and requires \bold{RDCOMClient} to be installed (see: Additional_repositories in DESCRIPTION of the package).
%% ~~further notes~~
}
\seealso{\code{\link{GetNewXL}}, \code{\link{XLGetRange}}, \code{\link{XLGetWorkbook}}
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
\dontrun{
# Windows-specific example
XLView(d.diamonds)
# edit an existing data.frame in MS-Excel, make changes and save there, return the filename
fn <- XLView(d.diamonds)
# read the changed file and store in new data.frame
d.frm <- read.table(fn, header=TRUE, quote="", sep=";")
# Create a new file, edit it in MS-Excel...
fn <- XLView()
# ... and read it into a data.frame when in R again
d.set <- read.table(fn, header=TRUE, quote="", sep=";")
# Export a ftable object, quite elegant...
XLView(format(ftable(Titanic), quote=FALSE), row.names = FALSE, col.names = FALSE)
# Export a data.frame directly to XL, combined with subsequent formatting
xl <- GetNewXL()
owb <- xl[["Workbooks"]]$Add()
sheet <- xl$Sheets()$Add()
sheet[["name"]] <- "pizza"
ToXL(d.pizza[1:10, 1:10], xl$Cells(1,1))
obj <- xl$Cells()$CurrentRegion()
obj[["VerticalAlignment"]] <- xlConst$xlTop
row <- xl$Cells()$CurrentRegion()$rows(1)
# does not work: row$font()[["bold"]] <- TRUE
# works:
obj <- row$font()
obj[["bold"]] <- TRUE
obj <- row$borders(xlConst$xlEdgeBottom)
obj[["linestyle"]] <- xlConst$xlContinuous
cols <- xl$Cells()$CurrentRegion()$columns(1)
cols[["HorizontalAlignment"]] <- xlConst$xlLeft
xl$Cells()$CurrentRegion()[["EntireColumn"]]$AutoFit()
cols <- xl$Cells()$CurrentRegion()$columns(4)
cols[["WrapText"]] <- TRUE
cols[["ColumnWidth"]] <- 80
xl$Cells()$CurrentRegion()[["EntireRow"]]$AutoFit()
sheet <- xl$Sheets()$Add()
sheet[["name"]] <- "whisky"
ToXL(d.whisky[1:10, 1:10], xl$Cells(1,1))}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ manip }
\keyword{MS-Office}
| /man/XLView.Rd | no_license | mainwaringb/DescTools | R | false | false | 5,742 | rd | \name{XLView}
\alias{XLView}
\alias{XLKill}
\alias{ToXL}
\alias{ToXL.data.frame}
\alias{ToXL.matrix}
\alias{ToXL.default}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Use MS-Excel as Viewer for a Data.Frame
%% ~~function to do ... ~~
}
\description{
\code{XLView} can be used to view and edit a data.frame directly in MS-Excel, resp. to create a new data.frame in MS-Excel.
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
XLView(x, col.names = TRUE, row.names = FALSE, na = "", preserveStrings = FALSE)
ToXL(x, at, ..., xl=DescToolsOptions("lastXL"))
\method{ToXL}{data.frame}(x, at, ..., xl=DescToolsOptions("lastXL"))
\method{ToXL}{matrix}(x, at, ..., xl=DescToolsOptions("lastXL"))
\method{ToXL}{default}(x, at, byrow = FALSE, ..., xl=DescToolsOptions("lastXL"))
XLKill()
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{ is a data.frame to be transferred to MS-Excel. If data is missing a new file will be created.
%% ~~Describe \code{data} here~~
}
\item{row.names}{ either a logical value indicating whether the row names of x are to be written along with x, or
a character vector of row names to be written.}
\item{col.names}{either a logical value indicating whether the column names of x are to be written
along with x, or a character vector of column names to be written.
See the section on 'CSV files' \code{\link{write.table}} for the meaning of \code{col.names = NA}. }
\item{na}{the string to use for missing values in the data.}
\item{preserveStrings}{logical, will preserve strings from being converted to numerics when imported in MS-Excel. See details. Default is \code{FALSE}.}
\item{at}{can be a range adress as character (e.g. \code{"A1"}), a vector of 2 integers (e.g \code{c(1,1)}) or a cell object as it is returned by \code{xl$Cells(1,1)}, denominating the left upper cell, where the data.frame will be placed in the MS-Excel sheet.}
\item{byrow}{logical, defines if the vector should be inserted by row or by column (default).}
\item{xl}{the pointer to a MS-Excel instance. An new instance can be created with \code{GetNewXL()}, returning the appropriate handle. A handle to an already running instance is returned by \code{GetCurrXL()}.
Default is the last created pointer stored in \code{DescToolsOptions("lastXL")}.}
\item{\dots}{further arguments are not used.}
}
\details{
The data.frame will be exported in CSV format and then imported in MS-Excel. When importing data, MS-Excel will potentially change characters to numeric values. If this seems undesirable (maybe we're loosing leading zeros) then you should enclose the text in quotes and preset a =.
x <- \code{gettextf('="\%s"', x)} would do the trick. \cr\cr
Take care: Changes to the data made in MS-Excel will NOT automatically be updated in the original data.frame.
The user will have to read the csv-file into R again.
See examples how to get this done.\cr
\code{ToXL()} is used to export data frames or vectors directly to MS-Excel, without export the data to a csv-file and import it on the XL side. So it it possible to export several data.frames into one Workbook and edit the tables after ones needs.
\code{XLKill} will kill a running XL instance (which might be invisible). Background is the fact, that the simple XL$quit() command
would not terminate a running XL task, but only set it invisible (observe the TaskManager). This ghost version may sometimes confuse XLView and hinder to create a new instance. In such cases you have to do the garbage collection...
}
\value{the name/path of the temporary file edited in MS-Excel.
}
\author{
Andri Signorell <andri@signorell.net>, \code{ToXL()} is based on code of Duncan Temple Lang <duncan@r-project.org>
}
\note{The function works only in Windows and requires \bold{RDCOMClient} to be installed (see: Additional_repositories in DESCRIPTION of the package).
%% ~~further notes~~
}
\seealso{\code{\link{GetNewXL}}, \code{\link{XLGetRange}}, \code{\link{XLGetWorkbook}}
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
\dontrun{
# Windows-specific example
XLView(d.diamonds)
# edit an existing data.frame in MS-Excel, make changes and save there, return the filename
fn <- XLView(d.diamonds)
# read the changed file and store in new data.frame
d.frm <- read.table(fn, header=TRUE, quote="", sep=";")
# Create a new file, edit it in MS-Excel...
fn <- XLView()
# ... and read it into a data.frame when in R again
d.set <- read.table(fn, header=TRUE, quote="", sep=";")
# Export a ftable object, quite elegant...
XLView(format(ftable(Titanic), quote=FALSE), row.names = FALSE, col.names = FALSE)
# Export a data.frame directly to XL, combined with subsequent formatting
xl <- GetNewXL()
owb <- xl[["Workbooks"]]$Add()
sheet <- xl$Sheets()$Add()
sheet[["name"]] <- "pizza"
ToXL(d.pizza[1:10, 1:10], xl$Cells(1,1))
obj <- xl$Cells()$CurrentRegion()
obj[["VerticalAlignment"]] <- xlConst$xlTop
row <- xl$Cells()$CurrentRegion()$rows(1)
# does not work: row$font()[["bold"]] <- TRUE
# works:
obj <- row$font()
obj[["bold"]] <- TRUE
obj <- row$borders(xlConst$xlEdgeBottom)
obj[["linestyle"]] <- xlConst$xlContinuous
cols <- xl$Cells()$CurrentRegion()$columns(1)
cols[["HorizontalAlignment"]] <- xlConst$xlLeft
xl$Cells()$CurrentRegion()[["EntireColumn"]]$AutoFit()
cols <- xl$Cells()$CurrentRegion()$columns(4)
cols[["WrapText"]] <- TRUE
cols[["ColumnWidth"]] <- 80
xl$Cells()$CurrentRegion()[["EntireRow"]]$AutoFit()
sheet <- xl$Sheets()$Add()
sheet[["name"]] <- "whisky"
ToXL(d.whisky[1:10, 1:10], xl$Cells(1,1))}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ manip }
\keyword{MS-Office}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/classes.R
\docType{class}
\name{biasFactor.hyfo-class}
\alias{biasFactor.hyfo-class}
\title{An S4 class, representing the biasFactor of hyfo file.}
\description{
An S4 class, representing the biasFactor of hyfo file.
}
\section{Slots}{
\describe{
\item{\code{lonLatDim}}{lists of biasFactor}
}}
| /man/biasFactor.hyfo-class.Rd | no_license | Yuanchao-Xu/hyfo | R | false | true | 375 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/classes.R
\docType{class}
\name{biasFactor.hyfo-class}
\alias{biasFactor.hyfo-class}
\title{An S4 class, representing the biasFactor of hyfo file.}
\description{
An S4 class, representing the biasFactor of hyfo file.
}
\section{Slots}{
\describe{
\item{\code{lonLatDim}}{lists of biasFactor}
}}
|
# Figure 18.11
xy <- scan('surgery.dat',what=list(x=0,y=0))
x<- xy$x
y<- xy$y
# glm
x0 <- x-mean(x)
xglm <- glm(y~x,family=binomial)
lx<-x
npar<- 21
xx<- seq(min(lx)-.0001,max(lx),len=npar)
cutx<- cut(lx,breaks=xx)
midx<- xx[-1] - (xx[2]-xx[1])/2
# bin statistics
n <- as.numeric(table(cutx))
ysum<- as.numeric(tapply(y,cutx,sum))
ysum[is.na(ysum)]<- 0.
delta<- matrix(0,ncol=(npar-1),nrow=(npar-2))
for (i in 1:(npar-2)){
delta[i,i]<- -1
delta[i,i+1]<- 1
}
#R1 <- t(delta) %*% delta
delta2<- delta[-1,-1]%*% delta
R1 <- t(delta2) %*% delta2
Yfun<- function(beta,b){
eta <- beta + b
p<- exp(eta)/ (1+ exp(eta))
err<- (ysum-n*p)/(n*p*(1-p)+.000001)
Y <- eta + err
wt <- n*p*(1-p)
return(Y=c(Y),wt=c(wt),eta=c(eta),p=c(p))
}
# .......... starting values
beta<- log(mean(y))- log((1-mean(y)))
b<-0
s2b<- .01
lambda<- 1/s2b
olds2<- 1000
par(mfrow=c(2,2))
plot(x,y,xlab='Age',ylab='Death rate',type='n',ylim=c(0,1))
points(x,y,cex=.6)
lines(x,xglm$fit,lty='dotted',lwd=1.5)
iter<-0
while(abs(olds2-s2b)/s2b >.0001){
iter<- iter+1
olds2 <- s2b
# step 1
a<- Yfun(beta,b); Y<- a$Y; wt<- a$wt
b <- solve((diag(wt) + lambda*R1),wt*(Y-beta))
# step 2
beta<- sum(wt*(Y - b))/sum(wt)
# step 3
smat <- solve(diag(wt) + lambda*R1)
df <- sum(diag(smat)*wt)
brb <- c(b%*%R1 %*%b)
mat <- smat * R1
s2b <- 1/(npar-1-2)*(brb + sum(mat)) # sum(mat) = trace(smat %*% R1)
# since R1 is symmetric
cat('iter= ', iter,'s2b= ',s2b,'df= ', df,'\n')
lambda<- 1/s2b
} # end iter
a<- Yfun(beta,b);
lines(midx,a$p)
title(expression(paste('(a) ',hat(sigma)[b]^2,'= 0.017')))
# prediction band
plot(x,y,xlab='Age',ylab='Death rate',type='n',ylim=c(0,1))
points(x,y,cex=.6)
lines(midx,a$p)
eta<- a$eta+ 1.96* sqrt(diag(smat))
upper<- exp(eta)/(1+ exp(eta))
lines(midx,upper,lwd=.4)
eta<- a$eta- 1.96* sqrt(diag(smat))
lower<- exp(eta)/(1+ exp(eta))
lines(midx,lower,lwd=.4)
title(expression('(b) Prediction band'))
| /R/LKPACK/FIG18-11.R | permissive | yanliangs/in-all-likelihood | R | false | false | 2,072 | r | # Figure 18.11
xy <- scan('surgery.dat',what=list(x=0,y=0))
x<- xy$x
y<- xy$y
# glm
x0 <- x-mean(x)
xglm <- glm(y~x,family=binomial)
lx<-x
npar<- 21
xx<- seq(min(lx)-.0001,max(lx),len=npar)
cutx<- cut(lx,breaks=xx)
midx<- xx[-1] - (xx[2]-xx[1])/2
# bin statistics
n <- as.numeric(table(cutx))
ysum<- as.numeric(tapply(y,cutx,sum))
ysum[is.na(ysum)]<- 0.
delta<- matrix(0,ncol=(npar-1),nrow=(npar-2))
for (i in 1:(npar-2)){
delta[i,i]<- -1
delta[i,i+1]<- 1
}
#R1 <- t(delta) %*% delta
delta2<- delta[-1,-1]%*% delta
R1 <- t(delta2) %*% delta2
Yfun<- function(beta,b){
eta <- beta + b
p<- exp(eta)/ (1+ exp(eta))
err<- (ysum-n*p)/(n*p*(1-p)+.000001)
Y <- eta + err
wt <- n*p*(1-p)
return(Y=c(Y),wt=c(wt),eta=c(eta),p=c(p))
}
# .......... starting values
beta<- log(mean(y))- log((1-mean(y)))
b<-0
s2b<- .01
lambda<- 1/s2b
olds2<- 1000
par(mfrow=c(2,2))
plot(x,y,xlab='Age',ylab='Death rate',type='n',ylim=c(0,1))
points(x,y,cex=.6)
lines(x,xglm$fit,lty='dotted',lwd=1.5)
iter<-0
while(abs(olds2-s2b)/s2b >.0001){
iter<- iter+1
olds2 <- s2b
# step 1
a<- Yfun(beta,b); Y<- a$Y; wt<- a$wt
b <- solve((diag(wt) + lambda*R1),wt*(Y-beta))
# step 2
beta<- sum(wt*(Y - b))/sum(wt)
# step 3
smat <- solve(diag(wt) + lambda*R1)
df <- sum(diag(smat)*wt)
brb <- c(b%*%R1 %*%b)
mat <- smat * R1
s2b <- 1/(npar-1-2)*(brb + sum(mat)) # sum(mat) = trace(smat %*% R1)
# since R1 is symmetric
cat('iter= ', iter,'s2b= ',s2b,'df= ', df,'\n')
lambda<- 1/s2b
} # end iter
a<- Yfun(beta,b);
lines(midx,a$p)
title(expression(paste('(a) ',hat(sigma)[b]^2,'= 0.017')))
# prediction band
plot(x,y,xlab='Age',ylab='Death rate',type='n',ylim=c(0,1))
points(x,y,cex=.6)
lines(midx,a$p)
eta<- a$eta+ 1.96* sqrt(diag(smat))
upper<- exp(eta)/(1+ exp(eta))
lines(midx,upper,lwd=.4)
eta<- a$eta- 1.96* sqrt(diag(smat))
lower<- exp(eta)/(1+ exp(eta))
lines(midx,lower,lwd=.4)
title(expression('(b) Prediction band'))
|
setValidity("BatchModel", function(object){
msg <- TRUE
if(length(p(object)) != k(object)){
msg <- "Mixture probability vector must be the same length as k"
return(msg)
}
if(ncol(theta(object)) != k(object)){
msg <- "theta matrix must have k columns"
return(msg)
}
if(ncol(sigma(object)) != k(object)){
msg <- "sigma matrix must have k columns"
return(msg)
}
if(length(mu(object)) != k(object)){
msg <- "mu vector must be length k "
return(msg)
}
if(length(tau(object)) != k(object)){
msg <- "tau vector must be length k "
return(msg)
}
if(k(object) != k(hyperParams(object))){
msg <- "k must be the same in the hyperparameters and in the model object"
return(msg)
}
msg
})
#' Constructor for list of batch models
#'
#' An object of class BatchModel is constructed for each k, creating a list of
#' BatchModels.
#'
#' @param data numeric vector of average log R ratios
#' @param batch vector of batch labels
#' @param mcmc.params a \code{McmcParams} object
#' @param k numeric vector indicating the number of mixture components for each model
#' @param ... additional arguments to \code{HyperparametersBatch}
#' @return a list. Each element of the list is a \code{BatchModel}
#' @seealso \code{\link{BatchModel}}. For single-batch data, use \code{\link{MarginalModelList}}.
#' @examples
#' mlist <- BatchModelList(data=y(BatchModelExample), k=1:4, batch=batch(BatchModelExample))
#' mcmcParams(mlist) <- McmcParams(iter=1, burnin=1, nStarts=0)
#' mlist2 <- posteriorSimulation(mlist)
#' @export
BatchModelList <- function(data=numeric(),
k=numeric(),
batch,
mcmc.params=McmcParams(),
...){
.Deprecated("See MultiBatchModelList")
model.list <- vector("list", length(k))
for(i in seq_along(k)){
hypp <- HyperparametersBatch(k=k[i], ...)
model.list[[i]] <- BatchModel(data=data, k=k[i], batch=batch,
mcmc.params=mcmc.params,
hypp=hypp)
}
model.list
}
#' Constructor for list of batch models
#'
#' An object of class MultiBatchModel is constructed for each k, creating a list of
#' BatchModels.
#'
#' @param data numeric vector of average log R ratios
#' @param batch vector of batch labels
#' @param mcmc.params a \code{McmcParams} object
#' @param k numeric vector indicating the number of mixture components for each model
#' @param ... additional arguments to \code{HyperparametersBatch}
#' @return a list. Each element of the list is a \code{BatchModel}
#' @seealso \code{\link{BatchModel}}. For single-batch data, use \code{\link{MarginalModelList}}.
#' @examples
#' mlist <- BatchModelList(data=y(MultiBatchModelExample), k=1:4, batch=batch(MultiBatchModelExample))
#' mcmcParams(mlist) <- McmcParams(iter=1, burnin=1, nStarts=0)
#' mlist2 <- posteriorSimulation(mlist)
#' @export
MultiBatchModelList <- function(data=numeric(),
k=numeric(),
batch,
mcmc.params=McmcParams(),
...){
model.list <- vector("list", length(k))
for(i in seq_along(k)){
hypp <- HyperparametersMultiBatch(k=k[i], ...)
model.list[[i]] <- MultiBatchModel(data=data, k=k[i], batch=batch,
mcmc.params=mcmc.params,
hypp=hypp)
}
model.list
}
#' Create an object for running hierarchical MCMC simulations.
#' @examples
#' model <- BatchModel(rnorm(10), k=1, batch=rep(1:2, each=5))
#' @param data the data for the simulation.
#' @param k An integer value specifying the number of latent classes.
#' @param batch a vector of the different batch numbers (must be sorted)
#' @param hypp An object of class `Hyperparameters` used to specify the hyperparameters of the model.
#' @param mcmc.params An object of class 'McmcParams'
#' @return An object of class `BatchModel`
#' @export
BatchModel <- function(data=numeric(),
k=3,
batch,
hypp,
mcmc.params){
if(missing(batch)) batch <- as.integer(factor(rep("a", length(data))))
if(missing(mcmc.params)) mcmc.params <- McmcParams(iter=1000, burnin=100)
if(missing(hypp)) hypp <- HyperparametersBatch(k=k)
if(missing(k) & !missing(hypp)){
k <- k(hypp)
}
mcmc.chains <- McmcChains()
bf <- factor(batch)
batch <- as.integer(bf)
ub <- unique(batch)
##ix <- order(batch)
ix <- seq_along(batch)
nbatch <- setNames(as.integer(table(batch)), levels(bf))
B <- length(ub)
if(B==1 && length(data) > 0){
if(missing(hypp)) hypp <- HyperparametersMarginal(k=k)
zz <- as.integer(factor(numeric(k)))
zfreq <- as.integer(table(zz))
obj <- SingleBatchModel(data, k, hypp, mcmc.params)
return(obj)
}
if(k == 1) {
if(missing(hypp)) hypp <- HyperparametersBatch(k=1)
obj <- UnivariateBatchModel(data, k, batch, hypp, mcmc.params)
return(obj)
}
if(missing(hypp)) hypp <- HyperparametersBatch(k=k)
zz <- integer(length(data))
zfreq <- as.integer(table(zz))
if(length(data) != length(batch)) {
stop("batch vector must be the same length as data")
}
obj <- new("BatchModel",
k=as.integer(k),
hyperparams=hypp,
theta=matrix(NA, B, k),
sigma2=matrix(NA, B, k),
mu=numeric(k),
tau2=numeric(k),
nu.0=numeric(1),
sigma2.0=numeric(1),
pi=numeric(k),
data=data[ix],
data.mean=matrix(NA, B, k),
data.prec=matrix(NA, B, k),
z=zz,
zfreq=zfreq,
probz=matrix(0, length(data), k),
logprior=numeric(1),
loglik=numeric(1),
mcmc.chains=mcmc.chains,
mcmc.params=mcmc.params,
batch=batch[ix],
batchElements=nbatch,
label_switch=FALSE,
.internal.constraint=5e-4,
.internal.counter=0L)
obj <- startingValues(obj)
obj
}
.empty_batch_model <- function(hp){
K <- k(hp)
B <- 0
N <- 0
obj <- new("BatchModel",
k=as.integer(K),
hyperparams=hp,
theta=matrix(NA, 0, K),
sigma2=matrix(NA, 0, K),
mu=numeric(K),
tau2=numeric(K),
nu.0=numeric(1),
sigma2.0=numeric(1),
pi=numeric(K),
data=numeric(0),
data.mean=matrix(NA, B, K),
data.prec=matrix(NA, B, K),
z=integer(0),
zfreq=integer(K),
probz=matrix(0, N, K),
logprior=numeric(1),
loglik=numeric(1),
mcmc.chains=McmcChains(),
mcmc.params=mp,
batch=integer(0),
batchElements=integer(0),
label_switch=FALSE,
marginal_lik=as.numeric(NA),
.internal.constraint=5e-4,
.internal.counter=0L)
chains(obj) <- McmcChains(obj)
obj
}
#' Create an object for running hierarchical MCMC simulations.
#' @examples
#' model <- MultiBatchModel(rnorm(10), k=1, batch=rep(1:2, each=5))
#' @param data the data for the simulation.
#' @param k An integer value specifying the number of latent classes.
#' @param batch a vector of the different batch numbers (must be sorted)
#' @param hypp An object of class `Hyperparameters` used to specify the hyperparameters of the model.
#' @param mcmc.params An object of class 'McmcParams'
#' @return An object of class `MultiBatchModel`
#' @export
MultiBatchModel <- function(data=numeric(),
k=3,
batch,
hypp,
mcmc.params){
if(missing(batch)) batch <- as.integer(factor(rep("a", length(data))))
if(missing(mcmc.params)) mcmc.params <- McmcParams(iter=1000, burnin=100)
if(missing(hypp)) hypp <- HyperparametersMultiBatch(k=k)
if(missing(k) & !missing(hypp)){
k <- k(hypp)
}
mcmc.chains <- McmcChains()
bf <- factor(batch)
batch <- as.integer(bf)
ub <- unique(batch)
##ix <- order(batch)
ix <- seq_along(batch)
nbatch <- setNames(as.integer(table(batch)), levels(bf))
B <- length(ub)
if(B==1 && length(data) > 0){
if(missing(hypp)) hypp <- HyperparametersMarginal(k=k)
zz <- as.integer(factor(numeric(k)))
zfreq <- as.integer(table(zz))
obj <- SingleBatchModel(data, k, hypp, mcmc.params)
return(obj)
}
if(k == 1) {
if(missing(hypp)) hypp <- HyperparametersMultiBatch(k=1)
obj <- UnivariateBatchModel(data, k, batch, hypp, mcmc.params)
return(obj)
}
if(missing(hypp)) hypp <- HyperparametersMultiBatch(k=k)
zz <- integer(length(data))
zfreq <- as.integer(table(zz))
if(length(data) != length(batch)) {
stop("batch vector must be the same length as data")
}
obj <- new("MultiBatchModel",
k=as.integer(k),
hyperparams=hypp,
theta=matrix(NA, B, k),
sigma2=matrix(NA, B, k),
mu=numeric(k),
tau2=numeric(k),
nu.0=numeric(1),
sigma2.0=numeric(1),
pi=numeric(k),
data=data[ix],
data.mean=matrix(NA, B, k),
data.prec=matrix(NA, B, k),
z=zz,
zfreq=zfreq,
probz=matrix(0, length(data), k),
logprior=numeric(1),
loglik=numeric(1),
mcmc.chains=mcmc.chains,
mcmc.params=mcmc.params,
batch=batch[ix],
batchElements=nbatch,
label_switch=FALSE,
.internal.constraint=5e-4,
.internal.counter=0L)
obj <- startingValues(obj)
obj
}
MultiBatchModel2 <- function(dat=numeric(),
hp=HyperparametersMultiBatch(),
mp=McmcParams(),
batches=integer()){
if(length(dat) == 0){
return(.empty_batch_model(hp))
}
ub <- unique(batches)
nbatch <- setNames(as.integer(table(batches)), ub)
B <- length(ub)
N <- length(dat)
## move to setValidity
if(length(dat) != length(batches)) {
stop("batch vector must be the same length as data")
}
K <- k(hp)
## mu_k is the average across batches of the thetas for component k
## tau_k is the sd of the batch means for component k
mu <- sort(rnorm(k(hp), mu.0(hp), sqrt(tau2.0(hp))))
tau2 <- 1/rgamma(k(hp), 1/2*eta.0(hp), 1/2*eta.0(hp) * m2.0(hp))
p <- rdirichlet(1, alpha(hp))[1, ]
sim_theta <- function(mu, tau, B) sort(rnorm(B, mu, tau))
##library(magrittr)
thetas <- map2(mu, sqrt(tau2), sim_theta, B) %>%
do.call(cbind, .) %>%
apply(., 1, sort) %>%
t
if(K == 1) thetas <- t(thetas)
nu.0 <- 3.5
sigma2.0 <- 0.25
sigma2s <- 1/rgamma(k(hp) * B, 0.5 * nu.0, 0.5 * nu.0 * sigma2.0) %>%
matrix(B, k(hp))
obj <- new("BatchModel",
k=as.integer(K),
hyperparams=hp,
theta=thetas,
sigma2=sigma2s,
mu=mu,
tau2=tau2,
nu.0=nu.0,
sigma2.0=sigma2.0,
pi=p,
data=dat,
data.mean=matrix(NA, B, K),
data.prec=matrix(NA, B, K),
z=integer(N),
zfreq=integer(K),
probz=matrix(0, N, K),
logprior=numeric(1),
loglik=numeric(1),
mcmc.chains=McmcChains(),
mcmc.params=mp,
batch=batches,
batchElements=nbatch,
label_switch=FALSE,
marginal_lik=as.numeric(NA),
.internal.constraint=5e-4,
.internal.counter=0L)
chains(obj) <- McmcChains(obj)
obj
}
ensureAllComponentsObserved <- function(obj){
zz <- table(batch(obj), z(obj))
K <- seq_len(k(obj))
if(any(zz<=1)){
index <- which(rowSums(zz<=1) > 0)
for(i in seq_along(index)){
j <- index[i]
zup <- z(obj)[batch(obj) == j]
zfact <- factor(zup, levels=K)
minz <- as.integer(names(table(zfact))[table(zfact) <= 1])
##missingk <- K[!K %in% unique(zup)]
maxk <- names(table(zfact))[which.max(table(zfact))]
nreplace <- length(minz)*2
zup[sample(which(zup == maxk), nreplace)] <- as.integer(minz)
obj@z[batch(obj) == j] <- as.integer(zup)
}
}
obj
}
##
## Multiple batches, but only 1 component
##
UnivariateBatchModel <- function(data, k=1, batch, hypp, mcmc.params){
mcmc.chains <- McmcChains()
zz <- integer(length(data))
zfreq <- as.integer(table(zz))
bf <- factor(batch)
batch <- as.integer(bf)
ix <- order(bf)
##B <- length(levels(batch))
nbatch <- elementNROWS(split(batch, batch))
B <- length(nbatch)
if(missing(hypp)) hypp <- HyperparametersMultiBatch(k=1)
obj <- new("UnivariateBatchModel",
k=as.integer(k),
hyperparams=hypp,
theta=matrix(NA, B, 1),
sigma2=matrix(NA, B, 1),
mu=numeric(k),
tau2=numeric(k),
nu.0=numeric(1),
sigma2.0=numeric(1),
pi=numeric(k),
##pi=matrix(NA, B, 1),
data=data[ix],
data.mean=matrix(NA, B, 1),
data.prec=matrix(NA, B, 1),
z=integer(length(data)),
zfreq=zfreq,
probz=matrix(0, length(data), 1),
logprior=numeric(1),
loglik=numeric(1),
mcmc.chains=mcmc.chains,
mcmc.params=mcmc.params,
batch=batch[ix],
batchElements=nbatch,
label_switch=FALSE,
.internal.constraint=5e-4)
obj <- startingValues(obj)
if(!is.null(obj)){
obj@probz[, 1] <- 1
}
obj
}
##
## This is the problem with calling new("BatchModel", ) with placeholders for values
## setValidity("BatchModel", function(object){
## msg <- TRUE
## ztab <- table(batch(object), z(object))
## ## if(any(ztab < 1)){
## ## msg <- "All components in each batch must have 1 or more observations"
## ## return(msg)
## ## }
## ## if(ncol(ztab) != nrow(ztab)){
## ## msg <- "All batches much have at least one observation from each component"
## ## return(msg)
## ## }
## ## A valid batch model should have data ordered by batch
## ## deltas <- diff(batch(object))
## ## if(!all(deltas > 0)){
## ## msg <- "Constructor for BatchModel should return data and batch assignment in batch-order"
## ## return(msg)
## ## }
## ## if(length(y(object)) > 0){
## ## pz <- probz(object)
## ## maxprob <- max(pz)
## ## if(maxprob > 1 ){
## ## msg <- "Posterior probabilities exceed 1"
## ## return(msg)
## ## }
## ## }
## msg
## })
#' extract data, latent variable, and batch for given observation
#' @name extract
#' @param x An object of class BatchModel, McmcChains, or McmcParams
#' @param i An element of the instance to be extracted.
#' @param j Not used.
#' @param ... Not used.
#' @param drop Not used.
#' @return An object of class 'BatchModel'
#' @aliases [,BatchModel-method [,BatchModel,ANY-method [,BatchModel,ANY,ANY-method [,BatchModel,ANY,ANY,ANY-method
#' @docType methods
#' @rdname extract-methods
setMethod("[", "BatchModel", function(x, i, j, ..., drop=FALSE){
if(!missing(i)){
y(x) <- y(x)[i]
z(x) <- z(x)[i]
batch(x) <- batch(x)[i]
}
x
})
#' extract data, latent variable, and batch for given observation
#' @name extract
#' @param x An object of class MultiBatchModel, McmcChains, or McmcParams
#' @param i An element of the instance to be extracted.
#' @param j Not used.
#' @param ... Not used.
#' @param drop Not used.
#' @return An object of class 'MultiBatchModel'
#' @aliases [,MultiBatchModel-method [,MultiBatchModel,ANY-method [,MultiBatchModel,ANY,ANY-method [,MultiBatchModel,ANY,ANY,ANY-method
#' @docType methods
#' @rdname extract-methods
setMethod("[", "MultiBatchModel", function(x, i, j, ..., drop=FALSE){
if(!missing(i)){
y(x) <- y(x)[i]
z(x) <- z(x)[i]
batch(x) <- batch(x)[i]
}
x
})
#' @rdname bic-method
#' @aliases bic,BatchModel-method
setMethod("bic", "BatchModel", function(object){
object <- useModes(object)
## K: number of free parameters to be estimated
## - component and batch-specific parameters: theta, sigma2 ( k(model) * nBatch(model))
## - mixing probabilities: (k-1)*nBatch
## - component-specific parameters: mu, tau2 2 x k(model)
## - length-one parameters: sigma2.0, nu.0 +2
K <- 2*k(object)*nBatch(object) + (k(object)-1) + 2*k(object) + 2
n <- length(y(object))
bicstat <- -2*(log_lik(object) + logPrior(object)) + K*(log(n) - log(2*pi))
bicstat
})
#' @rdname bic-method
#' @aliases bic,MultiBatchModel-method
setMethod("bic", "MultiBatchModel", function(object){
object <- useModes(object)
## K: number of free parameters to be estimated
## - component and batch-specific parameters: theta, sigma2 ( k(model) * nBatch(model))
## - mixing probabilities: (k-1)*nBatch
## - component-specific parameters: mu, tau2 2 x k(model)
## - length-one parameters: sigma2.0, nu.0 +2
K <- 2*k(object)*nBatch(object) + (k(object)-1) + 2*k(object) + 2
n <- length(y(object))
bicstat <- -2*(log_lik(object) + logPrior(object)) + K*(log(n) - log(2*pi))
bicstat
})
#' @rdname collapseBatch-method
#' @aliases collapseBatch,BatchModel-method
setMethod("collapseBatch", "BatchModel", function(object){
collapseBatch(y(object), as.character(batch(object)))
})
#' @rdname collapseBatch-method
#' @aliases collapseBatch,MultiBatchModel-method
setMethod("collapseBatch", "MultiBatchModel", function(object){
collapseBatch(y(object), as.character(batch(object)))
})
batchLik <- function(x, p, mean, sd) p*dnorm(x, mean, sd)
setMethod("computeMeans", "BatchModel", function(object){
compute_means_batch(object)
})
setMethod("computeMeans", "MultiBatchModel", function(object){
compute_means_batch(object)
})
setMethod("computePrec", "BatchModel", function(object){
compute_prec_batch(object)
})
setMethod("computePrec", "MultiBatchModel", function(object){
compute_prec_batch(object)
})
setMethod("computePrior", "BatchModel", function(object){
compute_logprior_batch(object)
})
setMethod("computePrior", "MultiBatchModel", function(object){
compute_logprior_batch(object)
})
.computeModesBatch <- function(object){
i <- argMax(object)
mc <- chains(object)
B <- nBatch(object)
K <- k(object)
thetamax <- matrix(theta(mc)[i, ], B, K)
sigma2max <- matrix(sigma2(mc)[i, ], B, K)
pmax <- p(mc)[i, ]
mumax <- mu(mc)[i, ]
tau2max <- tau2(mc)[i,]
modes <- list(theta=thetamax,
sigma2=sigma2max,
mixprob=pmax,
mu=mumax,
tau2=tau2max,
nu0=nu.0(mc)[i],
sigma2.0=sigma2.0(mc)[i],
zfreq=zFreq(mc)[i, ],
loglik=log_lik(mc)[i],
logprior=logPrior(mc)[i])
modes
}
setMethod("computeModes", "BatchModel", function(object){
.computeModesBatch(object)
})
setMethod("computeModes", "MultiBatchModel", function(object){
.computeModesBatch(object)
})
componentVariances <- function(y, z) v <- sapply(split(y, z), var)
setMethod("computeVars", "BatchModel", function(object){
compute_vars_batch(object)
})
setMethod("computeVars", "MultiBatchModel", function(object){
compute_vars_batch(object)
})
#' @rdname mu-method
#' @aliases mu,BatchModel-method
setMethod("mu", "BatchModel", function(object) object@mu)
#' @rdname mu-method
#' @aliases mu,MultiBatchModel-method
setMethod("mu", "MultiBatchModel", function(object) object@mu)
setReplaceMethod("mu", "BatchModel", function(object, value){
object@mu <- value
object
})
setReplaceMethod("mu", "MultiBatchModel", function(object, value){
object@mu <- value
object
})
nBatch <- function(object) length(uniqueBatch(object))
batchElements <- function(object) object@batchElements
setReplaceMethod("p", "BatchModel", function(object, value){
object@pi <- value
object
})
setReplaceMethod("p", "MultiBatchModel", function(object, value){
object@pi <- value
object
})
setMethod("pMean", "BatchModel", function(object) {
mns <- colMeans(pic(object))
mns
})
setMethod("pMean", "MultiBatchModel", function(object) {
mns <- colMeans(pic(object))
mns
})
setMethod("showMeans", "BatchModel", function(object){
thetas <- round(theta(object), 2)
mns <- c("\n", paste0(t(cbind(thetas, "\n")), collapse="\t"))
mns <- paste0("\t", mns[2])
mns <- paste0("\n", mns[1])
mns
})
setMethod("showMeans", "MultiBatchModel", function(object){
thetas <- round(theta(object), 2)
mns <- c("\n", paste0(t(cbind(thetas, "\n")), collapse="\t"))
mns <- paste0("\t", mns[2])
mns <- paste0("\n", mns[1])
mns
})
setMethod("showSigmas", "BatchModel", function(object){
sigmas <- round(sqrt(sigma2(object)), 2)
sigmas <- c("\n", paste0(t(cbind(sigmas, "\n")), collapse="\t"))
sigmas <- paste0("\t", sigmas[2])
sigmas <- paste0("\n", sigmas[1])
sigmas
})
setMethod("showSigmas", "MultiBatchModel", function(object){
sigmas <- round(sqrt(sigma2(object)), 2)
sigmas <- c("\n", paste0(t(cbind(sigmas, "\n")), collapse="\t"))
sigmas <- paste0("\t", sigmas[2])
sigmas <- paste0("\n", sigmas[1])
sigmas
})
setReplaceMethod("sigma2", "BatchModel", function(object, value){
rownames(value) <- uniqueBatch(object)
object@sigma2 <- value
object
})
setReplaceMethod("sigma2", "MultiBatchModel", function(object, value){
rownames(value) <- uniqueBatch(object)
object@sigma2 <- value
object
})
#' @rdname sigma2-method
#' @aliases sigma2,BatchModel-method
setMethod("sigma2", "BatchModel", function(object) {
s2 <- object@sigma2
##s2 <- matrix(s2, nBatch(object), k(object))
rownames(s2) <- uniqueBatch(object)
s2
})
#' @rdname sigma2-method
#' @aliases sigma2,MultiBatchModel-method
setMethod("sigma2", "MultiBatchModel", function(object) {
s2 <- object@sigma2
##s2 <- matrix(s2, nBatch(object), k(object))
rownames(s2) <- uniqueBatch(object)
s2
})
setMethod("tablez", "BatchModel", function(object){
tab <- table(batch(object), z(object))
tab[uniqueBatch(object), , drop=FALSE]
})
setMethod("tablez", "MultiBatchModel", function(object){
tab <- table(batch(object), z(object))
tab[uniqueBatch(object), , drop=FALSE]
})
setMethod("sigmaMean", "BatchModel", function(object) {
mns <- colMeans(sigmac(object))
mns <- matrix(mns, nBatch(object), k(object))
rownames(mns) <- uniqueBatch(object)
mns
})
setMethod("sigmaMean", "MultiBatchModel", function(object) {
mns <- colMeans(sigmac(object))
mns <- matrix(mns, nBatch(object), k(object))
rownames(mns) <- uniqueBatch(object)
mns
})
#' @rdname tau2-method
#' @aliases tau2,BatchModel-method
setMethod("tau2", "BatchModel", function(object) object@tau2)
#' @rdname tau2-method
#' @aliases tau2,BatchModel-method
setMethod("tau2", "MultiBatchModel", function(object) object@tau2)
setReplaceMethod("tau2", "BatchModel", function(object, value){
object@tau2 <- value
object
})
setReplaceMethod("tau2", "MultiBatchModel", function(object, value){
object@tau2 <- value
object
})
#' @rdname theta-method
#' @aliases theta,BatchModel-method
setMethod("theta", "BatchModel", function(object) {
b <- object@theta
##b <- matrix(b, nBatch(object), k(object))
rownames(b) <- uniqueBatch(object)
b
})
#' @rdname theta-method
#' @aliases theta,MultiBatchModel-method
setMethod("theta", "MultiBatchModel", function(object) {
b <- object@theta
##b <- matrix(b, nBatch(object), k(object))
rownames(b) <- uniqueBatch(object)
b
})
setReplaceMethod("theta", "BatchModel", function(object, value){
rownames(value) <- uniqueBatch(object)
object@theta <- value
object
})
setReplaceMethod("theta", "MultiBatchModel", function(object, value){
rownames(value) <- uniqueBatch(object)
object@theta <- value
object
})
setMethod("thetaMean", "BatchModel", function(object) {
mns <- colMeans(thetac(object))
mns <- matrix(mns, nBatch(object), k(object))
rownames(mns) <- uniqueBatch(object)
mns
})
setMethod("thetaMean", "MultiBatchModel", function(object) {
mns <- colMeans(thetac(object))
mns <- matrix(mns, nBatch(object), k(object))
rownames(mns) <- uniqueBatch(object)
mns
})
setMethod("show", "BatchModel", function(object){
##callNextMethod()
cls <- class(object)
cat(paste0("An object of class ", cls), "\n")
cat(" n. obs :", length(y(object)), "\n")
cat(" n. batches :", nBatch(object), "\n")
cat(" k :", k(object), "\n")
cat(" nobs/batch :", table(batch(object)), "\n")
cat(" loglik (s) :", round(log_lik(object), 1), "\n")
cat(" logprior (s):", round(logPrior(object), 1), "\n")
})
setMethod("show", "MultiBatchModel", function(object){
##callNextMethod()
cls <- class(object)
cat(paste0("An object of class ", cls), "\n")
cat(" n. obs :", length(y(object)), "\n")
cat(" n. batches :", nBatch(object), "\n")
cat(" k :", k(object), "\n")
cat(" nobs/batch :", table(batch(object)), "\n")
cat(" loglik (s) :", round(log_lik(object), 1), "\n")
cat(" logprior (s):", round(logPrior(object), 1), "\n")
})
setMethod("tablez", "BatchModel", function(object){
tab <- table(batch(object), z(object))
tab <- tab[uniqueBatch(object), , drop=FALSE]
tab
})
setMethod("tablez", "MultiBatchModel", function(object){
tab <- table(batch(object), z(object))
tab <- tab[uniqueBatch(object), , drop=FALSE]
tab
})
uniqueBatch <- function(object) unique(batch(object))
#' Create a data.frame of the component densities for each batch
#'
#' @param object an object of class \code{BatchModel}
#' @return a \code{{data.frame}}
#' @export
#' @examples
#' nbatch <- 3
#' k <- 3
#' means <- matrix(c(-2.1, -2, -1.95, -0.41, -0.4, -0.395, -0.1,
#' 0, 0.05), nbatch, k, byrow = FALSE)
#' sds <- matrix(0.15, nbatch, k)
#' sds[, 1] <- 0.3
#' N <- 1000
#' truth <- simulateBatchData(N = N, batch = rep(letters[1:3],
#' length.out = N),
#' p = c(1/10, 1/5, 1 - 0.1 - 0.2), theta = means,
#' sds = sds)
#' mcmcp <- McmcParams(iter = 1000, burnin = 500, thin = 1,
#' nStarts = 10)
#'
#' ## this parameter setting for m2.0 allows a lot of varation of the thetas
#' ## between batch
#' hypp <- CNPBayes:::HyperparametersMultiBatch(m2.0 = 1/60, eta.0 = 1800,
#' k = 3, a = 1/6, b = 180)
#' model <- BatchModel(data = y(truth), batch = batch(truth),
#' k = 3, mcmc.params = mcmcp, hypp = hypp)
#' model <- posteriorSimulation(model)
#' df <- multiBatchDensities(model)
#' df.observed <- data.frame(y=observed(model), batch=batch(model))
#' library(ggplot2)
#' ggplot(df, aes(x, d)) +
#' geom_histogram(data=df.observed,
#' aes(y, ..density..),
#' bins=300, inherit.aes=FALSE) +
#' geom_area(stat="identity", aes(color=name, fill=name),
#' alpha=0.4) +
#' xlab("quantiles") + ylab("density") +
#' scale_color_manual(values=colors) +
#' scale_fill_manual(values=colors) +
#' guides(fill=guide_legend(""), color=guide_legend("")) +
#' facet_wrap(~batch, nrow=2)
multiBatchDensities <- function(object){
probs <- p(object)
thetas <- theta(object)
sigmas <- sigma(object)
P <- matrix(probs, nrow(thetas), ncol(thetas), byrow=TRUE)
rownames(P) <- uniqueBatch(object)
avglrrs <- observed(object)
quantiles <- seq(min(avglrrs), max(avglrrs), length.out=500)
batchPr <- table(batch(object))/length(y(object))
dens.list <- batchDensities(quantiles, uniqueBatch(object),
thetas, sigmas, P, batchPr)
##component <- lapply(dens.list, rowSums)
##overall <- rowSums(do.call(cbind, component))
ix <- order(thetas[1, ])
d <- do.call(rbind, dens.list[ix])
K <- ncol(thetas)
NB <- nBatch(object)
over <- Reduce("+", dens.list)
batches.overall <- rep(1:2, each=nrow(over))
quantile.overall <- rep(quantiles, 2)
overall <- as.numeric(over)
d.vec <- as.numeric(d, overall)
d.vec <- c(d.vec, overall)
batches <- c(rep(uniqueBatch(object), each=nrow(d)),
batches.overall)
K <- seq_len(ncol(thetas))
name <- paste0("cn", K-1)
name <- rep(rep(name, elementNROWS(dens.list)), 2)
name <- c(name, rep("overall", length(overall)))
x <- rep(rep(quantiles, length(dens.list)), 2)
x <- c(x, quantile.overall)
df <- data.frame(x=x, d=d.vec, name=name, batch=batches)
df$batch <- factor(df$batch, uniqueBatch(object))
df$name <- factor(df$name, levels=c("overall", paste0("cn", K-1)))
df
}
| /R/methods-MultiBatchModel.R | no_license | muschellij2/CNPBayes | R | false | false | 29,010 | r | setValidity("BatchModel", function(object){
msg <- TRUE
if(length(p(object)) != k(object)){
msg <- "Mixture probability vector must be the same length as k"
return(msg)
}
if(ncol(theta(object)) != k(object)){
msg <- "theta matrix must have k columns"
return(msg)
}
if(ncol(sigma(object)) != k(object)){
msg <- "sigma matrix must have k columns"
return(msg)
}
if(length(mu(object)) != k(object)){
msg <- "mu vector must be length k "
return(msg)
}
if(length(tau(object)) != k(object)){
msg <- "tau vector must be length k "
return(msg)
}
if(k(object) != k(hyperParams(object))){
msg <- "k must be the same in the hyperparameters and in the model object"
return(msg)
}
msg
})
#' Constructor for list of batch models
#'
#' An object of class BatchModel is constructed for each k, creating a list of
#' BatchModels.
#'
#' @param data numeric vector of average log R ratios
#' @param batch vector of batch labels
#' @param mcmc.params a \code{McmcParams} object
#' @param k numeric vector indicating the number of mixture components for each model
#' @param ... additional arguments to \code{HyperparametersBatch}
#' @return a list. Each element of the list is a \code{BatchModel}
#' @seealso \code{\link{BatchModel}}. For single-batch data, use \code{\link{MarginalModelList}}.
#' @examples
#' mlist <- BatchModelList(data=y(BatchModelExample), k=1:4, batch=batch(BatchModelExample))
#' mcmcParams(mlist) <- McmcParams(iter=1, burnin=1, nStarts=0)
#' mlist2 <- posteriorSimulation(mlist)
#' @export
BatchModelList <- function(data=numeric(),
k=numeric(),
batch,
mcmc.params=McmcParams(),
...){
.Deprecated("See MultiBatchModelList")
model.list <- vector("list", length(k))
for(i in seq_along(k)){
hypp <- HyperparametersBatch(k=k[i], ...)
model.list[[i]] <- BatchModel(data=data, k=k[i], batch=batch,
mcmc.params=mcmc.params,
hypp=hypp)
}
model.list
}
#' Constructor for list of batch models
#'
#' An object of class MultiBatchModel is constructed for each k, creating a list of
#' BatchModels.
#'
#' @param data numeric vector of average log R ratios
#' @param batch vector of batch labels
#' @param mcmc.params a \code{McmcParams} object
#' @param k numeric vector indicating the number of mixture components for each model
#' @param ... additional arguments to \code{HyperparametersBatch}
#' @return a list. Each element of the list is a \code{BatchModel}
#' @seealso \code{\link{BatchModel}}. For single-batch data, use \code{\link{MarginalModelList}}.
#' @examples
#' mlist <- BatchModelList(data=y(MultiBatchModelExample), k=1:4, batch=batch(MultiBatchModelExample))
#' mcmcParams(mlist) <- McmcParams(iter=1, burnin=1, nStarts=0)
#' mlist2 <- posteriorSimulation(mlist)
#' @export
MultiBatchModelList <- function(data=numeric(),
k=numeric(),
batch,
mcmc.params=McmcParams(),
...){
model.list <- vector("list", length(k))
for(i in seq_along(k)){
hypp <- HyperparametersMultiBatch(k=k[i], ...)
model.list[[i]] <- MultiBatchModel(data=data, k=k[i], batch=batch,
mcmc.params=mcmc.params,
hypp=hypp)
}
model.list
}
#' Create an object for running hierarchical MCMC simulations.
#' @examples
#' model <- BatchModel(rnorm(10), k=1, batch=rep(1:2, each=5))
#' @param data the data for the simulation.
#' @param k An integer value specifying the number of latent classes.
#' @param batch a vector of the different batch numbers (must be sorted)
#' @param hypp An object of class `Hyperparameters` used to specify the hyperparameters of the model.
#' @param mcmc.params An object of class 'McmcParams'
#' @return An object of class `BatchModel`
#' @export
BatchModel <- function(data=numeric(),
k=3,
batch,
hypp,
mcmc.params){
if(missing(batch)) batch <- as.integer(factor(rep("a", length(data))))
if(missing(mcmc.params)) mcmc.params <- McmcParams(iter=1000, burnin=100)
if(missing(hypp)) hypp <- HyperparametersBatch(k=k)
if(missing(k) & !missing(hypp)){
k <- k(hypp)
}
mcmc.chains <- McmcChains()
bf <- factor(batch)
batch <- as.integer(bf)
ub <- unique(batch)
##ix <- order(batch)
ix <- seq_along(batch)
nbatch <- setNames(as.integer(table(batch)), levels(bf))
B <- length(ub)
if(B==1 && length(data) > 0){
if(missing(hypp)) hypp <- HyperparametersMarginal(k=k)
zz <- as.integer(factor(numeric(k)))
zfreq <- as.integer(table(zz))
obj <- SingleBatchModel(data, k, hypp, mcmc.params)
return(obj)
}
if(k == 1) {
if(missing(hypp)) hypp <- HyperparametersBatch(k=1)
obj <- UnivariateBatchModel(data, k, batch, hypp, mcmc.params)
return(obj)
}
if(missing(hypp)) hypp <- HyperparametersBatch(k=k)
zz <- integer(length(data))
zfreq <- as.integer(table(zz))
if(length(data) != length(batch)) {
stop("batch vector must be the same length as data")
}
obj <- new("BatchModel",
k=as.integer(k),
hyperparams=hypp,
theta=matrix(NA, B, k),
sigma2=matrix(NA, B, k),
mu=numeric(k),
tau2=numeric(k),
nu.0=numeric(1),
sigma2.0=numeric(1),
pi=numeric(k),
data=data[ix],
data.mean=matrix(NA, B, k),
data.prec=matrix(NA, B, k),
z=zz,
zfreq=zfreq,
probz=matrix(0, length(data), k),
logprior=numeric(1),
loglik=numeric(1),
mcmc.chains=mcmc.chains,
mcmc.params=mcmc.params,
batch=batch[ix],
batchElements=nbatch,
label_switch=FALSE,
.internal.constraint=5e-4,
.internal.counter=0L)
obj <- startingValues(obj)
obj
}
.empty_batch_model <- function(hp){
K <- k(hp)
B <- 0
N <- 0
obj <- new("BatchModel",
k=as.integer(K),
hyperparams=hp,
theta=matrix(NA, 0, K),
sigma2=matrix(NA, 0, K),
mu=numeric(K),
tau2=numeric(K),
nu.0=numeric(1),
sigma2.0=numeric(1),
pi=numeric(K),
data=numeric(0),
data.mean=matrix(NA, B, K),
data.prec=matrix(NA, B, K),
z=integer(0),
zfreq=integer(K),
probz=matrix(0, N, K),
logprior=numeric(1),
loglik=numeric(1),
mcmc.chains=McmcChains(),
mcmc.params=mp,
batch=integer(0),
batchElements=integer(0),
label_switch=FALSE,
marginal_lik=as.numeric(NA),
.internal.constraint=5e-4,
.internal.counter=0L)
chains(obj) <- McmcChains(obj)
obj
}
#' Create an object for running hierarchical MCMC simulations.
#' @examples
#' model <- MultiBatchModel(rnorm(10), k=1, batch=rep(1:2, each=5))
#' @param data the data for the simulation.
#' @param k An integer value specifying the number of latent classes.
#' @param batch a vector of the different batch numbers (must be sorted)
#' @param hypp An object of class `Hyperparameters` used to specify the hyperparameters of the model.
#' @param mcmc.params An object of class 'McmcParams'
#' @return An object of class `MultiBatchModel`
#' @export
MultiBatchModel <- function(data=numeric(),
k=3,
batch,
hypp,
mcmc.params){
if(missing(batch)) batch <- as.integer(factor(rep("a", length(data))))
if(missing(mcmc.params)) mcmc.params <- McmcParams(iter=1000, burnin=100)
if(missing(hypp)) hypp <- HyperparametersMultiBatch(k=k)
if(missing(k) & !missing(hypp)){
k <- k(hypp)
}
mcmc.chains <- McmcChains()
bf <- factor(batch)
batch <- as.integer(bf)
ub <- unique(batch)
##ix <- order(batch)
ix <- seq_along(batch)
nbatch <- setNames(as.integer(table(batch)), levels(bf))
B <- length(ub)
if(B==1 && length(data) > 0){
if(missing(hypp)) hypp <- HyperparametersMarginal(k=k)
zz <- as.integer(factor(numeric(k)))
zfreq <- as.integer(table(zz))
obj <- SingleBatchModel(data, k, hypp, mcmc.params)
return(obj)
}
if(k == 1) {
if(missing(hypp)) hypp <- HyperparametersMultiBatch(k=1)
obj <- UnivariateBatchModel(data, k, batch, hypp, mcmc.params)
return(obj)
}
if(missing(hypp)) hypp <- HyperparametersMultiBatch(k=k)
zz <- integer(length(data))
zfreq <- as.integer(table(zz))
if(length(data) != length(batch)) {
stop("batch vector must be the same length as data")
}
obj <- new("MultiBatchModel",
k=as.integer(k),
hyperparams=hypp,
theta=matrix(NA, B, k),
sigma2=matrix(NA, B, k),
mu=numeric(k),
tau2=numeric(k),
nu.0=numeric(1),
sigma2.0=numeric(1),
pi=numeric(k),
data=data[ix],
data.mean=matrix(NA, B, k),
data.prec=matrix(NA, B, k),
z=zz,
zfreq=zfreq,
probz=matrix(0, length(data), k),
logprior=numeric(1),
loglik=numeric(1),
mcmc.chains=mcmc.chains,
mcmc.params=mcmc.params,
batch=batch[ix],
batchElements=nbatch,
label_switch=FALSE,
.internal.constraint=5e-4,
.internal.counter=0L)
obj <- startingValues(obj)
obj
}
MultiBatchModel2 <- function(dat=numeric(),
hp=HyperparametersMultiBatch(),
mp=McmcParams(),
batches=integer()){
if(length(dat) == 0){
return(.empty_batch_model(hp))
}
ub <- unique(batches)
nbatch <- setNames(as.integer(table(batches)), ub)
B <- length(ub)
N <- length(dat)
## move to setValidity
if(length(dat) != length(batches)) {
stop("batch vector must be the same length as data")
}
K <- k(hp)
## mu_k is the average across batches of the thetas for component k
## tau_k is the sd of the batch means for component k
mu <- sort(rnorm(k(hp), mu.0(hp), sqrt(tau2.0(hp))))
tau2 <- 1/rgamma(k(hp), 1/2*eta.0(hp), 1/2*eta.0(hp) * m2.0(hp))
p <- rdirichlet(1, alpha(hp))[1, ]
sim_theta <- function(mu, tau, B) sort(rnorm(B, mu, tau))
##library(magrittr)
thetas <- map2(mu, sqrt(tau2), sim_theta, B) %>%
do.call(cbind, .) %>%
apply(., 1, sort) %>%
t
if(K == 1) thetas <- t(thetas)
nu.0 <- 3.5
sigma2.0 <- 0.25
sigma2s <- 1/rgamma(k(hp) * B, 0.5 * nu.0, 0.5 * nu.0 * sigma2.0) %>%
matrix(B, k(hp))
obj <- new("BatchModel",
k=as.integer(K),
hyperparams=hp,
theta=thetas,
sigma2=sigma2s,
mu=mu,
tau2=tau2,
nu.0=nu.0,
sigma2.0=sigma2.0,
pi=p,
data=dat,
data.mean=matrix(NA, B, K),
data.prec=matrix(NA, B, K),
z=integer(N),
zfreq=integer(K),
probz=matrix(0, N, K),
logprior=numeric(1),
loglik=numeric(1),
mcmc.chains=McmcChains(),
mcmc.params=mp,
batch=batches,
batchElements=nbatch,
label_switch=FALSE,
marginal_lik=as.numeric(NA),
.internal.constraint=5e-4,
.internal.counter=0L)
chains(obj) <- McmcChains(obj)
obj
}
ensureAllComponentsObserved <- function(obj){
zz <- table(batch(obj), z(obj))
K <- seq_len(k(obj))
if(any(zz<=1)){
index <- which(rowSums(zz<=1) > 0)
for(i in seq_along(index)){
j <- index[i]
zup <- z(obj)[batch(obj) == j]
zfact <- factor(zup, levels=K)
minz <- as.integer(names(table(zfact))[table(zfact) <= 1])
##missingk <- K[!K %in% unique(zup)]
maxk <- names(table(zfact))[which.max(table(zfact))]
nreplace <- length(minz)*2
zup[sample(which(zup == maxk), nreplace)] <- as.integer(minz)
obj@z[batch(obj) == j] <- as.integer(zup)
}
}
obj
}
##
## Multiple batches, but only 1 component
##
UnivariateBatchModel <- function(data, k=1, batch, hypp, mcmc.params){
mcmc.chains <- McmcChains()
zz <- integer(length(data))
zfreq <- as.integer(table(zz))
bf <- factor(batch)
batch <- as.integer(bf)
ix <- order(bf)
##B <- length(levels(batch))
nbatch <- elementNROWS(split(batch, batch))
B <- length(nbatch)
if(missing(hypp)) hypp <- HyperparametersMultiBatch(k=1)
obj <- new("UnivariateBatchModel",
k=as.integer(k),
hyperparams=hypp,
theta=matrix(NA, B, 1),
sigma2=matrix(NA, B, 1),
mu=numeric(k),
tau2=numeric(k),
nu.0=numeric(1),
sigma2.0=numeric(1),
pi=numeric(k),
##pi=matrix(NA, B, 1),
data=data[ix],
data.mean=matrix(NA, B, 1),
data.prec=matrix(NA, B, 1),
z=integer(length(data)),
zfreq=zfreq,
probz=matrix(0, length(data), 1),
logprior=numeric(1),
loglik=numeric(1),
mcmc.chains=mcmc.chains,
mcmc.params=mcmc.params,
batch=batch[ix],
batchElements=nbatch,
label_switch=FALSE,
.internal.constraint=5e-4)
obj <- startingValues(obj)
if(!is.null(obj)){
obj@probz[, 1] <- 1
}
obj
}
##
## This is the problem with calling new("BatchModel", ) with placeholders for values
## setValidity("BatchModel", function(object){
## msg <- TRUE
## ztab <- table(batch(object), z(object))
## ## if(any(ztab < 1)){
## ## msg <- "All components in each batch must have 1 or more observations"
## ## return(msg)
## ## }
## ## if(ncol(ztab) != nrow(ztab)){
## ## msg <- "All batches much have at least one observation from each component"
## ## return(msg)
## ## }
## ## A valid batch model should have data ordered by batch
## ## deltas <- diff(batch(object))
## ## if(!all(deltas > 0)){
## ## msg <- "Constructor for BatchModel should return data and batch assignment in batch-order"
## ## return(msg)
## ## }
## ## if(length(y(object)) > 0){
## ## pz <- probz(object)
## ## maxprob <- max(pz)
## ## if(maxprob > 1 ){
## ## msg <- "Posterior probabilities exceed 1"
## ## return(msg)
## ## }
## ## }
## msg
## })
#' extract data, latent variable, and batch for given observation
#' @name extract
#' @param x An object of class BatchModel, McmcChains, or McmcParams
#' @param i An element of the instance to be extracted.
#' @param j Not used.
#' @param ... Not used.
#' @param drop Not used.
#' @return An object of class 'BatchModel'
#' @aliases [,BatchModel-method [,BatchModel,ANY-method [,BatchModel,ANY,ANY-method [,BatchModel,ANY,ANY,ANY-method
#' @docType methods
#' @rdname extract-methods
setMethod("[", "BatchModel", function(x, i, j, ..., drop=FALSE){
if(!missing(i)){
y(x) <- y(x)[i]
z(x) <- z(x)[i]
batch(x) <- batch(x)[i]
}
x
})
#' extract data, latent variable, and batch for given observation
#' @name extract
#' @param x An object of class MultiBatchModel, McmcChains, or McmcParams
#' @param i An element of the instance to be extracted.
#' @param j Not used.
#' @param ... Not used.
#' @param drop Not used.
#' @return An object of class 'MultiBatchModel'
#' @aliases [,MultiBatchModel-method [,MultiBatchModel,ANY-method [,MultiBatchModel,ANY,ANY-method [,MultiBatchModel,ANY,ANY,ANY-method
#' @docType methods
#' @rdname extract-methods
setMethod("[", "MultiBatchModel", function(x, i, j, ..., drop=FALSE){
if(!missing(i)){
y(x) <- y(x)[i]
z(x) <- z(x)[i]
batch(x) <- batch(x)[i]
}
x
})
#' @rdname bic-method
#' @aliases bic,BatchModel-method
setMethod("bic", "BatchModel", function(object){
object <- useModes(object)
## K: number of free parameters to be estimated
## - component and batch-specific parameters: theta, sigma2 ( k(model) * nBatch(model))
## - mixing probabilities: (k-1)*nBatch
## - component-specific parameters: mu, tau2 2 x k(model)
## - length-one parameters: sigma2.0, nu.0 +2
K <- 2*k(object)*nBatch(object) + (k(object)-1) + 2*k(object) + 2
n <- length(y(object))
bicstat <- -2*(log_lik(object) + logPrior(object)) + K*(log(n) - log(2*pi))
bicstat
})
#' @rdname bic-method
#' @aliases bic,MultiBatchModel-method
setMethod("bic", "MultiBatchModel", function(object){
object <- useModes(object)
## K: number of free parameters to be estimated
## - component and batch-specific parameters: theta, sigma2 ( k(model) * nBatch(model))
## - mixing probabilities: (k-1)*nBatch
## - component-specific parameters: mu, tau2 2 x k(model)
## - length-one parameters: sigma2.0, nu.0 +2
K <- 2*k(object)*nBatch(object) + (k(object)-1) + 2*k(object) + 2
n <- length(y(object))
bicstat <- -2*(log_lik(object) + logPrior(object)) + K*(log(n) - log(2*pi))
bicstat
})
#' @rdname collapseBatch-method
#' @aliases collapseBatch,BatchModel-method
setMethod("collapseBatch", "BatchModel", function(object){
collapseBatch(y(object), as.character(batch(object)))
})
#' @rdname collapseBatch-method
#' @aliases collapseBatch,MultiBatchModel-method
setMethod("collapseBatch", "MultiBatchModel", function(object){
collapseBatch(y(object), as.character(batch(object)))
})
batchLik <- function(x, p, mean, sd) p*dnorm(x, mean, sd)
setMethod("computeMeans", "BatchModel", function(object){
compute_means_batch(object)
})
setMethod("computeMeans", "MultiBatchModel", function(object){
compute_means_batch(object)
})
setMethod("computePrec", "BatchModel", function(object){
compute_prec_batch(object)
})
setMethod("computePrec", "MultiBatchModel", function(object){
compute_prec_batch(object)
})
setMethod("computePrior", "BatchModel", function(object){
compute_logprior_batch(object)
})
setMethod("computePrior", "MultiBatchModel", function(object){
compute_logprior_batch(object)
})
.computeModesBatch <- function(object){
i <- argMax(object)
mc <- chains(object)
B <- nBatch(object)
K <- k(object)
thetamax <- matrix(theta(mc)[i, ], B, K)
sigma2max <- matrix(sigma2(mc)[i, ], B, K)
pmax <- p(mc)[i, ]
mumax <- mu(mc)[i, ]
tau2max <- tau2(mc)[i,]
modes <- list(theta=thetamax,
sigma2=sigma2max,
mixprob=pmax,
mu=mumax,
tau2=tau2max,
nu0=nu.0(mc)[i],
sigma2.0=sigma2.0(mc)[i],
zfreq=zFreq(mc)[i, ],
loglik=log_lik(mc)[i],
logprior=logPrior(mc)[i])
modes
}
setMethod("computeModes", "BatchModel", function(object){
.computeModesBatch(object)
})
setMethod("computeModes", "MultiBatchModel", function(object){
.computeModesBatch(object)
})
componentVariances <- function(y, z) v <- sapply(split(y, z), var)
setMethod("computeVars", "BatchModel", function(object){
compute_vars_batch(object)
})
setMethod("computeVars", "MultiBatchModel", function(object){
compute_vars_batch(object)
})
#' @rdname mu-method
#' @aliases mu,BatchModel-method
setMethod("mu", "BatchModel", function(object) object@mu)
#' @rdname mu-method
#' @aliases mu,MultiBatchModel-method
setMethod("mu", "MultiBatchModel", function(object) object@mu)
setReplaceMethod("mu", "BatchModel", function(object, value){
object@mu <- value
object
})
setReplaceMethod("mu", "MultiBatchModel", function(object, value){
object@mu <- value
object
})
nBatch <- function(object) length(uniqueBatch(object))
batchElements <- function(object) object@batchElements
setReplaceMethod("p", "BatchModel", function(object, value){
object@pi <- value
object
})
setReplaceMethod("p", "MultiBatchModel", function(object, value){
object@pi <- value
object
})
setMethod("pMean", "BatchModel", function(object) {
mns <- colMeans(pic(object))
mns
})
setMethod("pMean", "MultiBatchModel", function(object) {
mns <- colMeans(pic(object))
mns
})
setMethod("showMeans", "BatchModel", function(object){
thetas <- round(theta(object), 2)
mns <- c("\n", paste0(t(cbind(thetas, "\n")), collapse="\t"))
mns <- paste0("\t", mns[2])
mns <- paste0("\n", mns[1])
mns
})
setMethod("showMeans", "MultiBatchModel", function(object){
thetas <- round(theta(object), 2)
mns <- c("\n", paste0(t(cbind(thetas, "\n")), collapse="\t"))
mns <- paste0("\t", mns[2])
mns <- paste0("\n", mns[1])
mns
})
setMethod("showSigmas", "BatchModel", function(object){
sigmas <- round(sqrt(sigma2(object)), 2)
sigmas <- c("\n", paste0(t(cbind(sigmas, "\n")), collapse="\t"))
sigmas <- paste0("\t", sigmas[2])
sigmas <- paste0("\n", sigmas[1])
sigmas
})
setMethod("showSigmas", "MultiBatchModel", function(object){
sigmas <- round(sqrt(sigma2(object)), 2)
sigmas <- c("\n", paste0(t(cbind(sigmas, "\n")), collapse="\t"))
sigmas <- paste0("\t", sigmas[2])
sigmas <- paste0("\n", sigmas[1])
sigmas
})
setReplaceMethod("sigma2", "BatchModel", function(object, value){
rownames(value) <- uniqueBatch(object)
object@sigma2 <- value
object
})
setReplaceMethod("sigma2", "MultiBatchModel", function(object, value){
rownames(value) <- uniqueBatch(object)
object@sigma2 <- value
object
})
#' @rdname sigma2-method
#' @aliases sigma2,BatchModel-method
setMethod("sigma2", "BatchModel", function(object) {
s2 <- object@sigma2
##s2 <- matrix(s2, nBatch(object), k(object))
rownames(s2) <- uniqueBatch(object)
s2
})
#' @rdname sigma2-method
#' @aliases sigma2,MultiBatchModel-method
setMethod("sigma2", "MultiBatchModel", function(object) {
s2 <- object@sigma2
##s2 <- matrix(s2, nBatch(object), k(object))
rownames(s2) <- uniqueBatch(object)
s2
})
setMethod("tablez", "BatchModel", function(object){
tab <- table(batch(object), z(object))
tab[uniqueBatch(object), , drop=FALSE]
})
setMethod("tablez", "MultiBatchModel", function(object){
tab <- table(batch(object), z(object))
tab[uniqueBatch(object), , drop=FALSE]
})
setMethod("sigmaMean", "BatchModel", function(object) {
mns <- colMeans(sigmac(object))
mns <- matrix(mns, nBatch(object), k(object))
rownames(mns) <- uniqueBatch(object)
mns
})
setMethod("sigmaMean", "MultiBatchModel", function(object) {
mns <- colMeans(sigmac(object))
mns <- matrix(mns, nBatch(object), k(object))
rownames(mns) <- uniqueBatch(object)
mns
})
#' @rdname tau2-method
#' @aliases tau2,BatchModel-method
setMethod("tau2", "BatchModel", function(object) object@tau2)
#' @rdname tau2-method
#' @aliases tau2,BatchModel-method
setMethod("tau2", "MultiBatchModel", function(object) object@tau2)
setReplaceMethod("tau2", "BatchModel", function(object, value){
object@tau2 <- value
object
})
setReplaceMethod("tau2", "MultiBatchModel", function(object, value){
object@tau2 <- value
object
})
#' @rdname theta-method
#' @aliases theta,BatchModel-method
setMethod("theta", "BatchModel", function(object) {
b <- object@theta
##b <- matrix(b, nBatch(object), k(object))
rownames(b) <- uniqueBatch(object)
b
})
#' @rdname theta-method
#' @aliases theta,MultiBatchModel-method
setMethod("theta", "MultiBatchModel", function(object) {
b <- object@theta
##b <- matrix(b, nBatch(object), k(object))
rownames(b) <- uniqueBatch(object)
b
})
setReplaceMethod("theta", "BatchModel", function(object, value){
rownames(value) <- uniqueBatch(object)
object@theta <- value
object
})
setReplaceMethod("theta", "MultiBatchModel", function(object, value){
rownames(value) <- uniqueBatch(object)
object@theta <- value
object
})
setMethod("thetaMean", "BatchModel", function(object) {
mns <- colMeans(thetac(object))
mns <- matrix(mns, nBatch(object), k(object))
rownames(mns) <- uniqueBatch(object)
mns
})
setMethod("thetaMean", "MultiBatchModel", function(object) {
mns <- colMeans(thetac(object))
mns <- matrix(mns, nBatch(object), k(object))
rownames(mns) <- uniqueBatch(object)
mns
})
setMethod("show", "BatchModel", function(object){
##callNextMethod()
cls <- class(object)
cat(paste0("An object of class ", cls), "\n")
cat(" n. obs :", length(y(object)), "\n")
cat(" n. batches :", nBatch(object), "\n")
cat(" k :", k(object), "\n")
cat(" nobs/batch :", table(batch(object)), "\n")
cat(" loglik (s) :", round(log_lik(object), 1), "\n")
cat(" logprior (s):", round(logPrior(object), 1), "\n")
})
setMethod("show", "MultiBatchModel", function(object){
##callNextMethod()
cls <- class(object)
cat(paste0("An object of class ", cls), "\n")
cat(" n. obs :", length(y(object)), "\n")
cat(" n. batches :", nBatch(object), "\n")
cat(" k :", k(object), "\n")
cat(" nobs/batch :", table(batch(object)), "\n")
cat(" loglik (s) :", round(log_lik(object), 1), "\n")
cat(" logprior (s):", round(logPrior(object), 1), "\n")
})
setMethod("tablez", "BatchModel", function(object){
tab <- table(batch(object), z(object))
tab <- tab[uniqueBatch(object), , drop=FALSE]
tab
})
setMethod("tablez", "MultiBatchModel", function(object){
tab <- table(batch(object), z(object))
tab <- tab[uniqueBatch(object), , drop=FALSE]
tab
})
uniqueBatch <- function(object) unique(batch(object))
#' Create a data.frame of the component densities for each batch
#'
#' @param object an object of class \code{BatchModel}
#' @return a \code{{data.frame}}
#' @export
#' @examples
#' nbatch <- 3
#' k <- 3
#' means <- matrix(c(-2.1, -2, -1.95, -0.41, -0.4, -0.395, -0.1,
#' 0, 0.05), nbatch, k, byrow = FALSE)
#' sds <- matrix(0.15, nbatch, k)
#' sds[, 1] <- 0.3
#' N <- 1000
#' truth <- simulateBatchData(N = N, batch = rep(letters[1:3],
#' length.out = N),
#' p = c(1/10, 1/5, 1 - 0.1 - 0.2), theta = means,
#' sds = sds)
#' mcmcp <- McmcParams(iter = 1000, burnin = 500, thin = 1,
#' nStarts = 10)
#'
#' ## this parameter setting for m2.0 allows a lot of varation of the thetas
#' ## between batch
#' hypp <- CNPBayes:::HyperparametersMultiBatch(m2.0 = 1/60, eta.0 = 1800,
#' k = 3, a = 1/6, b = 180)
#' model <- BatchModel(data = y(truth), batch = batch(truth),
#' k = 3, mcmc.params = mcmcp, hypp = hypp)
#' model <- posteriorSimulation(model)
#' df <- multiBatchDensities(model)
#' df.observed <- data.frame(y=observed(model), batch=batch(model))
#' library(ggplot2)
#' ggplot(df, aes(x, d)) +
#' geom_histogram(data=df.observed,
#' aes(y, ..density..),
#' bins=300, inherit.aes=FALSE) +
#' geom_area(stat="identity", aes(color=name, fill=name),
#' alpha=0.4) +
#' xlab("quantiles") + ylab("density") +
#' scale_color_manual(values=colors) +
#' scale_fill_manual(values=colors) +
#' guides(fill=guide_legend(""), color=guide_legend("")) +
#' facet_wrap(~batch, nrow=2)
multiBatchDensities <- function(object){
probs <- p(object)
thetas <- theta(object)
sigmas <- sigma(object)
P <- matrix(probs, nrow(thetas), ncol(thetas), byrow=TRUE)
rownames(P) <- uniqueBatch(object)
avglrrs <- observed(object)
quantiles <- seq(min(avglrrs), max(avglrrs), length.out=500)
batchPr <- table(batch(object))/length(y(object))
dens.list <- batchDensities(quantiles, uniqueBatch(object),
thetas, sigmas, P, batchPr)
##component <- lapply(dens.list, rowSums)
##overall <- rowSums(do.call(cbind, component))
ix <- order(thetas[1, ])
d <- do.call(rbind, dens.list[ix])
K <- ncol(thetas)
NB <- nBatch(object)
over <- Reduce("+", dens.list)
batches.overall <- rep(1:2, each=nrow(over))
quantile.overall <- rep(quantiles, 2)
overall <- as.numeric(over)
d.vec <- as.numeric(d, overall)
d.vec <- c(d.vec, overall)
batches <- c(rep(uniqueBatch(object), each=nrow(d)),
batches.overall)
K <- seq_len(ncol(thetas))
name <- paste0("cn", K-1)
name <- rep(rep(name, elementNROWS(dens.list)), 2)
name <- c(name, rep("overall", length(overall)))
x <- rep(rep(quantiles, length(dens.list)), 2)
x <- c(x, quantile.overall)
df <- data.frame(x=x, d=d.vec, name=name, batch=batches)
df$batch <- factor(df$batch, uniqueBatch(object))
df$name <- factor(df$name, levels=c("overall", paste0("cn", K-1)))
df
}
|
#==============================================================================#
# ---- LIBRARIES ----
#==============================================================================#
suppressPackageStartupMessages({
# Package conflicts
library("conflicted")
# File paths
library("fs")
library("here")
# Presentation
library("knitr")
library("patchwork")
library("gt")
library("glue")
# Tidyverse
library("tidyverse")
})
#==============================================================================#
# ---- CONFLICTS ----
#==============================================================================#
suppressMessages({
conflict_prefer("filter", "dplyr")
})
#==============================================================================#
# ---- KNITR ----
#==============================================================================#
DOCNAME <- knitr::current_input()
knitr::knit_hooks$set(pngquant = knitr::hook_pngquant)
knitr::opts_chunk$set(
autodep = TRUE,
cache = FALSE,
cache.comments = FALSE,
echo = FALSE,
error = FALSE,
dev = "ragg_png",
fig.path = paste0("figures/", DOCNAME, "/"),
fig.align = "center",
fig.width = 10,
fig.height = 8,
dpi = 120,
message = FALSE,
warning = FALSE,
pngquant = "--speed=1 --quality=0-50"
)
#==============================================================================#
# ---- ENVIRONMENT VARIABLES ----
#==============================================================================#
#==============================================================================#
# ---- FUNCTIONS ----
#==============================================================================#
#==============================================================================#
# ---- THEME ----
#==============================================================================#
theme_set(
theme_minimal() +
theme(
axis.text = element_text(size = 10)
)
)
#==============================================================================#
# ---- PATHS ----
#==============================================================================#
PATHS <- list(
)
| /website/R/document_setup.R | permissive | DaneseAnna/scib-reproducibility | R | false | false | 2,324 | r | #==============================================================================#
# ---- LIBRARIES ----
#==============================================================================#
suppressPackageStartupMessages({
# Package conflicts
library("conflicted")
# File paths
library("fs")
library("here")
# Presentation
library("knitr")
library("patchwork")
library("gt")
library("glue")
# Tidyverse
library("tidyverse")
})
#==============================================================================#
# ---- CONFLICTS ----
#==============================================================================#
suppressMessages({
conflict_prefer("filter", "dplyr")
})
#==============================================================================#
# ---- KNITR ----
#==============================================================================#
DOCNAME <- knitr::current_input()
knitr::knit_hooks$set(pngquant = knitr::hook_pngquant)
knitr::opts_chunk$set(
autodep = TRUE,
cache = FALSE,
cache.comments = FALSE,
echo = FALSE,
error = FALSE,
dev = "ragg_png",
fig.path = paste0("figures/", DOCNAME, "/"),
fig.align = "center",
fig.width = 10,
fig.height = 8,
dpi = 120,
message = FALSE,
warning = FALSE,
pngquant = "--speed=1 --quality=0-50"
)
#==============================================================================#
# ---- ENVIRONMENT VARIABLES ----
#==============================================================================#
#==============================================================================#
# ---- FUNCTIONS ----
#==============================================================================#
#==============================================================================#
# ---- THEME ----
#==============================================================================#
theme_set(
theme_minimal() +
theme(
axis.text = element_text(size = 10)
)
)
#==============================================================================#
# ---- PATHS ----
#==============================================================================#
PATHS <- list(
)
|
#Setting work directory
setwd('C:/Users/Martin/Downloads/UCI HAR Dataset')
#Preparing feature variable names
features=read.table("features.txt",h=F)
features=as.character(features[,2])
#You must note that some variable names have the error
#that the word 'Body' repeats 2 times. We correct that
#with the following:
for(i in grep('BodyBody',features)){
a=unlist(strsplit(features[grep('BodyBody',features)][i],'Body'))
features[i]=paste(a[1],'Body',a[3],sep='')
}
#Preparing activity labels
labels=read.table("activity_labels.txt",h=F)
labels[,2]=as.character(labels[,2])
#Reading the test dataset
setwd('C:/Users/Martin/Downloads/UCI HAR Dataset/test')
test=read.table("X_test.txt",h=F)
tind=read.table("subject_test.txt",h=F)
tact=read.table("y_test.txt",h=F)
test$subject=tind[,1];test$activity=tact[,1]
#Reading the train dataset
setwd('C:/Users/Martin/Downloads/UCI HAR Dataset/train')
train=read.table("X_train.txt",h=F)
trind=read.table("subject_train.txt",h=F)
tract=read.table("y_train.txt",h=F)
train$subject=trind[,1];train$activity=tract[,1]
#Merging 'test' and 'train' datasets in 'tidy' dataset
tidy=rbind(test,train)
#Labelling the data set with descriptive variable names.
names(tidy)[1:length(features)]=features
#Subsetting 'tidy' dataset extracting only the measurements on
#the mean and standard deviation for each measurement.
tidy=tidy[,c((ncol(tidy)-1),ncol(tidy),grep('mean()',names(tidy)),grep('std()',names(tidy)))]
tidy=tidy[,grep('Freq',names(tidy),invert=T)]
#Preparing descriptive variable names
for(i in 3:ncol(tidy)){
names(tidy)[i]=sub('..','',make.names(names(tidy)[i]),fixed=T)
}
#Labelling 'activity' variable with activity labels
activity=rep(NA,nrow(tidy))
for(i in 1:nrow(labels)){
activity[as.numeric(row.names(tidy[tidy$activity==i,]))]=labels[i,2]
}
tidy$activity=activity
#Generating a 'final' dataset with the average of each variable for each activity and each subject.
final=matrix(ncol=ncol(tidy),nrow=length(table(tidy$subject))*nrow(labels),dimnames=list(1:(length(table(tidy$subject))*nrow(labels)),names(tidy)))
for(j in 1:length(table(tidy$subject))){
for(i in 1:nrow(labels)){
final[6*(j-1)+i,3:ncol(final)]=colMeans(tidy[tidy$subject==j&tidy$activity==labels[i,2],-(1:2)])
final[6*(j-1)+i,1]=as.numeric(names(table(tidy$subject)))[j]
final[6*(j-1)+i,2]=labels[i,2]
}
}
#Saving the 'final' dataset in a .txt file
write.table(final,'final.txt',row.names=F)
##Please only run the code below if you want to see the tidy data into R into their readMe#################
####################################################################################################
address="https://s3.amazonaws.com/coursera-uploads/user-71404fce0a73801a55083cb2/975115/asst-3/aa0b9a40417e11e59260092beb391fd2.txt"
address=sub("^https", "http", address)
data=read.table(url(address), header = TRUE)
View(data)
####################################################################################################
| /run_analysis.R | no_license | msy89/Getting_and_Cleaning_Data | R | false | false | 2,949 | r | #Setting work directory
setwd('C:/Users/Martin/Downloads/UCI HAR Dataset')
#Preparing feature variable names
features=read.table("features.txt",h=F)
features=as.character(features[,2])
#You must note that some variable names have the error
#that the word 'Body' repeats 2 times. We correct that
#with the following:
for(i in grep('BodyBody',features)){
a=unlist(strsplit(features[grep('BodyBody',features)][i],'Body'))
features[i]=paste(a[1],'Body',a[3],sep='')
}
#Preparing activity labels
labels=read.table("activity_labels.txt",h=F)
labels[,2]=as.character(labels[,2])
#Reading the test dataset
setwd('C:/Users/Martin/Downloads/UCI HAR Dataset/test')
test=read.table("X_test.txt",h=F)
tind=read.table("subject_test.txt",h=F)
tact=read.table("y_test.txt",h=F)
test$subject=tind[,1];test$activity=tact[,1]
#Reading the train dataset
setwd('C:/Users/Martin/Downloads/UCI HAR Dataset/train')
train=read.table("X_train.txt",h=F)
trind=read.table("subject_train.txt",h=F)
tract=read.table("y_train.txt",h=F)
train$subject=trind[,1];train$activity=tract[,1]
#Merging 'test' and 'train' datasets in 'tidy' dataset
tidy=rbind(test,train)
#Labelling the data set with descriptive variable names.
names(tidy)[1:length(features)]=features
#Subsetting 'tidy' dataset extracting only the measurements on
#the mean and standard deviation for each measurement.
tidy=tidy[,c((ncol(tidy)-1),ncol(tidy),grep('mean()',names(tidy)),grep('std()',names(tidy)))]
tidy=tidy[,grep('Freq',names(tidy),invert=T)]
#Preparing descriptive variable names
for(i in 3:ncol(tidy)){
names(tidy)[i]=sub('..','',make.names(names(tidy)[i]),fixed=T)
}
#Labelling 'activity' variable with activity labels
activity=rep(NA,nrow(tidy))
for(i in 1:nrow(labels)){
activity[as.numeric(row.names(tidy[tidy$activity==i,]))]=labels[i,2]
}
tidy$activity=activity
#Generating a 'final' dataset with the average of each variable for each activity and each subject.
final=matrix(ncol=ncol(tidy),nrow=length(table(tidy$subject))*nrow(labels),dimnames=list(1:(length(table(tidy$subject))*nrow(labels)),names(tidy)))
for(j in 1:length(table(tidy$subject))){
for(i in 1:nrow(labels)){
final[6*(j-1)+i,3:ncol(final)]=colMeans(tidy[tidy$subject==j&tidy$activity==labels[i,2],-(1:2)])
final[6*(j-1)+i,1]=as.numeric(names(table(tidy$subject)))[j]
final[6*(j-1)+i,2]=labels[i,2]
}
}
#Saving the 'final' dataset in a .txt file
write.table(final,'final.txt',row.names=F)
##Please only run the code below if you want to see the tidy data into R into their readMe#################
####################################################################################################
address="https://s3.amazonaws.com/coursera-uploads/user-71404fce0a73801a55083cb2/975115/asst-3/aa0b9a40417e11e59260092beb391fd2.txt"
address=sub("^https", "http", address)
data=read.table(url(address), header = TRUE)
View(data)
####################################################################################################
|
## Set Working Directory
setwd("/Users/gregwalsh/Github/DataScience/DSSA-5201-MACHINE-LEARNING-FUNDAMENTALS/KMeans")
require(dplyr)
require(ggplot2)
library(tidyverse)
set.seed(1234)
set1=mvrnorm(n = 300, c(-4,10), matrix(c(1.5,1,1,1.5),2))
set2=mvrnorm(n = 300, c(5,7), matrix(c(1,2,2,6),2))
set3=mvrnorm(n = 300, c(-1,1), matrix(c(4,0,0,4),2))
set4=mvrnorm(n = 300, c(10,-10), matrix(c(4,0,0,4),2))
set5=mvrnorm(n = 300, c(3,-3), matrix(c(4,0,0,4),2))
DF=data.frame(rbind(set1,set2,set3,set4,set5),cluster=as.factor(c(rep(1:5,each=300))))
ggplot(DF,aes(x=X1,y=X2,color=cluster))+geom_point()
## Import Data
KMeansData_Group1 <- read_csv("KMeansData_Group1.csv")
kmeans=function(data,K=4,stop_crit=10e-5)
{
#Initialisation of clusters
centroids = data[sample.int(nrow(data),K),]
#print(centroids)
current_stop_crit = 1000
cluster = rep(0,nrow(data))
converged = FALSE
it = 1
while(current_stop_crit>=stop_crit && converged == FALSE)
{
it=it+1
#ifelse(condition, do_if_true, do_if_false)
#sapply(current_stop_crit, function(x) if x <= stop_crit })
print(current_stop_crit, stop_crit)
if (current_stop_crit <= stop_crit)
{
converged = TRUE
}
old_centroids=centroids
#print(old_centroids)
##Assigning each point to a centroid
for (i in 1:nrow(data))
{
min_dist=10000000000
for (centroid in 1:nrow(centroids))
{
distance_to_centroid=sum((centroids[centroid,]-data[i,])^2)
#print(distance_to_centroid)
if (distance_to_centroid<=min_dist)
{
cluster[i] = centroid
min_dist = distance_to_centroid
}
}
}
##Assigning each point to a centroid
for (i in 1:nrow(centroids))
{
centroids[i,]=apply(data[cluster==i,],2,mean)
}
current_stop_crit = lapply((old_centroids-centroids)^2, mean, na.rm = TRUE)
}
return(list(data=data.frame(data,cluster),centroids=centroids))
}
res=kmeans(KMeansData_Group1[1:2],K=5)
#res=kmeans(DF[1:2],K=5)
res$centroids$cluster=1:5
res$data$isCentroid=F
res$centroids$isCentroid=T
data_plot=rbind(res$centroids,res$data)
ggplot(data_plot,aes(x=X1,y=X2,color=as.factor(cluster),size=isCentroid,alpha=isCentroid))+geom_point()
| /DSSA-5201-MACHINE-LEARNING-FUNDAMENTALS/KMeans/test.R | no_license | walshg3/DataScience | R | false | false | 2,240 | r | ## Set Working Directory
setwd("/Users/gregwalsh/Github/DataScience/DSSA-5201-MACHINE-LEARNING-FUNDAMENTALS/KMeans")
require(dplyr)
require(ggplot2)
library(tidyverse)
set.seed(1234)
set1=mvrnorm(n = 300, c(-4,10), matrix(c(1.5,1,1,1.5),2))
set2=mvrnorm(n = 300, c(5,7), matrix(c(1,2,2,6),2))
set3=mvrnorm(n = 300, c(-1,1), matrix(c(4,0,0,4),2))
set4=mvrnorm(n = 300, c(10,-10), matrix(c(4,0,0,4),2))
set5=mvrnorm(n = 300, c(3,-3), matrix(c(4,0,0,4),2))
DF=data.frame(rbind(set1,set2,set3,set4,set5),cluster=as.factor(c(rep(1:5,each=300))))
ggplot(DF,aes(x=X1,y=X2,color=cluster))+geom_point()
## Import Data
KMeansData_Group1 <- read_csv("KMeansData_Group1.csv")
kmeans=function(data,K=4,stop_crit=10e-5)
{
#Initialisation of clusters
centroids = data[sample.int(nrow(data),K),]
#print(centroids)
current_stop_crit = 1000
cluster = rep(0,nrow(data))
converged = FALSE
it = 1
while(current_stop_crit>=stop_crit && converged == FALSE)
{
it=it+1
#ifelse(condition, do_if_true, do_if_false)
#sapply(current_stop_crit, function(x) if x <= stop_crit })
print(current_stop_crit, stop_crit)
if (current_stop_crit <= stop_crit)
{
converged = TRUE
}
old_centroids=centroids
#print(old_centroids)
##Assigning each point to a centroid
for (i in 1:nrow(data))
{
min_dist=10000000000
for (centroid in 1:nrow(centroids))
{
distance_to_centroid=sum((centroids[centroid,]-data[i,])^2)
#print(distance_to_centroid)
if (distance_to_centroid<=min_dist)
{
cluster[i] = centroid
min_dist = distance_to_centroid
}
}
}
##Assigning each point to a centroid
for (i in 1:nrow(centroids))
{
centroids[i,]=apply(data[cluster==i,],2,mean)
}
current_stop_crit = lapply((old_centroids-centroids)^2, mean, na.rm = TRUE)
}
return(list(data=data.frame(data,cluster),centroids=centroids))
}
res=kmeans(KMeansData_Group1[1:2],K=5)
#res=kmeans(DF[1:2],K=5)
res$centroids$cluster=1:5
res$data$isCentroid=F
res$centroids$isCentroid=T
data_plot=rbind(res$centroids,res$data)
ggplot(data_plot,aes(x=X1,y=X2,color=as.factor(cluster),size=isCentroid,alpha=isCentroid))+geom_point()
|
library(survival)
library(dplyr)
library(tidyr)
library(lattice)
library(splines)
library(risksetROC)
source('/mnt/workspace/DCRI/Progs/get_auc.R')
hyp=read.csv("/mnt/workspace/DCRI/Data/analysis_ds_clusters.csv")
hyp$cluster1=factor(hyp$cluster1, levels=1:4)
hyp$cluster2=factor(hyp$cluster2, levels=1:5)
hyp$type_hyp=relevel(hyp$type_hyp, ref='Controlled')
#pulse pressure computation
hyp$pp = hyp$BP.s - hyp$BP.d
train=hyp[which(hyp$train==1),]
test=hyp[which(hyp$test==1),]
cox1=coxph(Surv(tdeath, death)~BP.s + BP.d + BP.s*BP.d, data=train)
cox1_test = test[, c('tdeath','death','BP.d','BP.s')]
#quantile(train2$BP.s, c(0.01, 0.99), na.rm=T)
#quantile(train2$BP.d, c(0.01, 0.99), na.rm=T)
BP.s=seq(105, 180, 1)
BP.d=seq(55, 100, 1)
ND = merge(BP.s, BP.d, all=T)
colnames(ND)=c('BP.s', 'BP.d')
surv_probs = survfit(cox1, ND)
summ=summary(surv_probs, times=1825)
out=t(summ$surv)
colnames(out)='Surv'
Fail=1-out[,1]
out=cbind(out, Fail)
final=cbind(ND, out)
final=final[,-3]
wide = final %>% spread(BP.s, Fail)
mat=as.matrix(wide[, 2:77])
row.names(mat) = t(wide[,1])
palette <- colorRampPalette(c("red", "blue"))( 100 )
my.at.1 <- seq(0, 0.25, length.out=99)
p0=levelplot(t(mat), at=my.at.1,
col.regions=palette, xlab='Systolic BP', cex.lab=0.8,
ylab='Diastolic BP', main=list("Risk by SBP and DBP", cex=0.8),
scales=list(x=list(at=c(1, 26, 51, 76), labels=c('105', '130', '155', '180')),
y=list(at=c(1, 11, 21, 31, 41)), labels=c('55', '65', '75', '85','95')))
#p0
c1a=concordance(Surv(tdeath, death)~ predict(cox1, newdata=cox1_test, type='risk'), data=data.frame(cox1_test))
c1af=1-c1a$concordance
cox1_test=cox1_test[which(complete.cases(cox1_test)),]
cox1auc1=get_auc(cox=cox1, xtest=cox1_test, time=cox1_test$tdeath, status=cox1_test$death, t_eval=365 )
cox1auc3=get_auc(cox=cox1, xtest=cox1_test, time=cox1_test$tdeath, status=cox1_test$death, t_eval=1095 )
c1a_final=list(c1af=c1af, cox1auc1=cox1auc1, cox1auc3=cox1auc3)
#
cox1b=coxph(Surv(tdeath, death)~BP.s + BP.d + BP.s*BP.d + age +Sex + Race2 +HxMIStr+HxDM+study.1+Toba+HMG, data=train)
cox1b_test=test[,c('tdeath','death','BP.s','BP.d', 'age', 'Sex', 'Race2', 'HxMIStr', 'HxDM', 'study.1', 'Toba', 'HMG' )]
cox1b_test=cox1b_test[which(complete.cases(cox1b_test)),]
c1b=concordance(Surv(tdeath, death)~ predict(cox1b, newdata=cox1b_test, type='risk'), data=data.frame(cox1b_test))
c1b=1-c1b$concordance
cox1bauc1=get_auc(cox=cox1b, xtest=cox1b_test, time=cox1b_test$tdeath, status=cox1b_test$death, t_eval=365 )
cox1bauc3=get_auc(cox=cox1b, xtest=cox1b_test, time=cox1b_test$tdeath, status=cox1b_test$death, t_eval=1095 )
c1b_final=list(c1b=c1b, cox1bauc1=cox1bauc1, cox1bauc3=cox1bauc3)
cox2a=coxph(Surv(tdeath, death)~pp, data=train)
cox2b=coxph(Surv(tdeath, death)~pp+ age +Sex + Race2 +HxMIStr+HxDM+study.1+Toba+HMG, data=train)
cox3a=coxph(Surv(tdeath, death)~relevel(type_hyp, ref='Controlled'), data=train)
cox3b=coxph(Surv(tdeath, death)~relevel(type_hyp, ref='Controlled') + age +Sex + Race2 +HxMIStr+HxDM+study.1+Toba+HMG, data=train)
cox4a=coxph(Surv(tdeath, death)~cluster1, data=train)
cox4b=coxph(Surv(tdeath, death)~cluster1+ age +Sex + Race2 +HxMIStr+HxDM+study.1+Toba+HMG, data=train)
cox5a=coxph(Surv(tdeath, death)~cluster2, data=train)
cox5b=coxph(Surv(tdeath, death)~cluster2+ age +Sex + Race2 +HxMIStr+HxDM+study.1+Toba+HMG, data=train)
model=function(xvars){
xtrain=train[,xvars]
xtest=test[, xvars]
xtest=xtest[which(complete.cases(xtest)),]
cox=coxph(Surv(tdeath, death)~., data=xtrain)
c1=concordance(Surv(tdeath, death)~ predict(cox, newdata=xtest, type='risk'), data=data.frame(xtest))
finalc=1-c1$concordance
auc1=get_auc(cox=cox, xtest=xtest, time=xtest$tdeath, status=xtest$death, t_eval=365 )
auc3=get_auc(cox=cox, xtest=xtest, time=xtest$tdeath, status=xtest$death, t_eval=1095 )
return(list(finalc=finalc, auc1=auc1, auc3=auc3))
}
c2a=model(xvars=c('tdeath','death','pp'))
c2b=model(xvars=c('tdeath','death','pp', 'age', 'Sex', 'Race2', 'HxMIStr', 'HxDM', 'study.1', 'Toba', 'HMG' ))
c3a=model(xvars=c('tdeath','death','type_hyp'))
c3b=model(xvars=c('tdeath','death','type_hyp', 'age', 'Sex', 'Race2', 'HxMIStr', 'HxDM', 'study.1', 'Toba', 'HMG' ))
c4a=model(xvars=c('tdeath','death','cluster1'))
c4b=model(xvars=c('tdeath','death','cluster1', 'age', 'Sex', 'Race2', 'HxMIStr', 'HxDM', 'study.1', 'Toba', 'HMG' ))
c5a=model(xvars=c('tdeath','death','cluster2'))
c5b=model(xvars=c('tdeath','death','cluster2', 'age', 'Sex', 'Race2', 'HxMIStr', 'HxDM', 'study.1', 'Toba', 'HMG' ))
| /DCRI/Progs/04_cox_BP.R | permissive | Kao-PMP/Pilot_Project | R | false | false | 4,583 | r | library(survival)
library(dplyr)
library(tidyr)
library(lattice)
library(splines)
library(risksetROC)
source('/mnt/workspace/DCRI/Progs/get_auc.R')
hyp=read.csv("/mnt/workspace/DCRI/Data/analysis_ds_clusters.csv")
hyp$cluster1=factor(hyp$cluster1, levels=1:4)
hyp$cluster2=factor(hyp$cluster2, levels=1:5)
hyp$type_hyp=relevel(hyp$type_hyp, ref='Controlled')
#pulse pressure computation
hyp$pp = hyp$BP.s - hyp$BP.d
train=hyp[which(hyp$train==1),]
test=hyp[which(hyp$test==1),]
cox1=coxph(Surv(tdeath, death)~BP.s + BP.d + BP.s*BP.d, data=train)
cox1_test = test[, c('tdeath','death','BP.d','BP.s')]
#quantile(train2$BP.s, c(0.01, 0.99), na.rm=T)
#quantile(train2$BP.d, c(0.01, 0.99), na.rm=T)
BP.s=seq(105, 180, 1)
BP.d=seq(55, 100, 1)
ND = merge(BP.s, BP.d, all=T)
colnames(ND)=c('BP.s', 'BP.d')
surv_probs = survfit(cox1, ND)
summ=summary(surv_probs, times=1825)
out=t(summ$surv)
colnames(out)='Surv'
Fail=1-out[,1]
out=cbind(out, Fail)
final=cbind(ND, out)
final=final[,-3]
wide = final %>% spread(BP.s, Fail)
mat=as.matrix(wide[, 2:77])
row.names(mat) = t(wide[,1])
palette <- colorRampPalette(c("red", "blue"))( 100 )
my.at.1 <- seq(0, 0.25, length.out=99)
p0=levelplot(t(mat), at=my.at.1,
col.regions=palette, xlab='Systolic BP', cex.lab=0.8,
ylab='Diastolic BP', main=list("Risk by SBP and DBP", cex=0.8),
scales=list(x=list(at=c(1, 26, 51, 76), labels=c('105', '130', '155', '180')),
y=list(at=c(1, 11, 21, 31, 41)), labels=c('55', '65', '75', '85','95')))
#p0
c1a=concordance(Surv(tdeath, death)~ predict(cox1, newdata=cox1_test, type='risk'), data=data.frame(cox1_test))
c1af=1-c1a$concordance
cox1_test=cox1_test[which(complete.cases(cox1_test)),]
cox1auc1=get_auc(cox=cox1, xtest=cox1_test, time=cox1_test$tdeath, status=cox1_test$death, t_eval=365 )
cox1auc3=get_auc(cox=cox1, xtest=cox1_test, time=cox1_test$tdeath, status=cox1_test$death, t_eval=1095 )
c1a_final=list(c1af=c1af, cox1auc1=cox1auc1, cox1auc3=cox1auc3)
#
cox1b=coxph(Surv(tdeath, death)~BP.s + BP.d + BP.s*BP.d + age +Sex + Race2 +HxMIStr+HxDM+study.1+Toba+HMG, data=train)
cox1b_test=test[,c('tdeath','death','BP.s','BP.d', 'age', 'Sex', 'Race2', 'HxMIStr', 'HxDM', 'study.1', 'Toba', 'HMG' )]
cox1b_test=cox1b_test[which(complete.cases(cox1b_test)),]
c1b=concordance(Surv(tdeath, death)~ predict(cox1b, newdata=cox1b_test, type='risk'), data=data.frame(cox1b_test))
c1b=1-c1b$concordance
cox1bauc1=get_auc(cox=cox1b, xtest=cox1b_test, time=cox1b_test$tdeath, status=cox1b_test$death, t_eval=365 )
cox1bauc3=get_auc(cox=cox1b, xtest=cox1b_test, time=cox1b_test$tdeath, status=cox1b_test$death, t_eval=1095 )
c1b_final=list(c1b=c1b, cox1bauc1=cox1bauc1, cox1bauc3=cox1bauc3)
cox2a=coxph(Surv(tdeath, death)~pp, data=train)
cox2b=coxph(Surv(tdeath, death)~pp+ age +Sex + Race2 +HxMIStr+HxDM+study.1+Toba+HMG, data=train)
cox3a=coxph(Surv(tdeath, death)~relevel(type_hyp, ref='Controlled'), data=train)
cox3b=coxph(Surv(tdeath, death)~relevel(type_hyp, ref='Controlled') + age +Sex + Race2 +HxMIStr+HxDM+study.1+Toba+HMG, data=train)
cox4a=coxph(Surv(tdeath, death)~cluster1, data=train)
cox4b=coxph(Surv(tdeath, death)~cluster1+ age +Sex + Race2 +HxMIStr+HxDM+study.1+Toba+HMG, data=train)
cox5a=coxph(Surv(tdeath, death)~cluster2, data=train)
cox5b=coxph(Surv(tdeath, death)~cluster2+ age +Sex + Race2 +HxMIStr+HxDM+study.1+Toba+HMG, data=train)
model=function(xvars){
xtrain=train[,xvars]
xtest=test[, xvars]
xtest=xtest[which(complete.cases(xtest)),]
cox=coxph(Surv(tdeath, death)~., data=xtrain)
c1=concordance(Surv(tdeath, death)~ predict(cox, newdata=xtest, type='risk'), data=data.frame(xtest))
finalc=1-c1$concordance
auc1=get_auc(cox=cox, xtest=xtest, time=xtest$tdeath, status=xtest$death, t_eval=365 )
auc3=get_auc(cox=cox, xtest=xtest, time=xtest$tdeath, status=xtest$death, t_eval=1095 )
return(list(finalc=finalc, auc1=auc1, auc3=auc3))
}
c2a=model(xvars=c('tdeath','death','pp'))
c2b=model(xvars=c('tdeath','death','pp', 'age', 'Sex', 'Race2', 'HxMIStr', 'HxDM', 'study.1', 'Toba', 'HMG' ))
c3a=model(xvars=c('tdeath','death','type_hyp'))
c3b=model(xvars=c('tdeath','death','type_hyp', 'age', 'Sex', 'Race2', 'HxMIStr', 'HxDM', 'study.1', 'Toba', 'HMG' ))
c4a=model(xvars=c('tdeath','death','cluster1'))
c4b=model(xvars=c('tdeath','death','cluster1', 'age', 'Sex', 'Race2', 'HxMIStr', 'HxDM', 'study.1', 'Toba', 'HMG' ))
c5a=model(xvars=c('tdeath','death','cluster2'))
c5b=model(xvars=c('tdeath','death','cluster2', 'age', 'Sex', 'Race2', 'HxMIStr', 'HxDM', 'study.1', 'Toba', 'HMG' ))
|
library(ggplot2)
waarnemingen <- read.csv('/home/gehau/git/watervogels/data/occurrence.csv', sep = ';')
summary(waarnemingen)
#eerste 1000 lijnen, dat gaat sneller
waarnemingen <- head(waarnemingen,10000)
#View(waarnemingen)
ggplot(waarnemingen, aes(x=eventDate, y=individualCount)) +
geom_point(shape=1) + # Use hollow circles
geom_smooth(method=lm)
ggplot(data=waarnemingen, aes(x=eventDate, y=individualCount, group=vernacularName)) +
geom_line()+
geom_point()
ggplot(data=waarnemingen, aes(x=eventDate, y=individualCount, group=vernacularName, fill=vernacularName, color=vernacularName)) + stat_summary(fun.y = sum, na.rm=TRUE, geom='line')
ggplot(subset(waarnemingen, individualCount %in% c("MA", "TX")),
aes(x=Date,
y=Home.Value,
color=State))+
geom_point() | /watervogels.R | no_license | gezever/watervogels | R | false | false | 811 | r | library(ggplot2)
waarnemingen <- read.csv('/home/gehau/git/watervogels/data/occurrence.csv', sep = ';')
summary(waarnemingen)
#eerste 1000 lijnen, dat gaat sneller
waarnemingen <- head(waarnemingen,10000)
#View(waarnemingen)
ggplot(waarnemingen, aes(x=eventDate, y=individualCount)) +
geom_point(shape=1) + # Use hollow circles
geom_smooth(method=lm)
ggplot(data=waarnemingen, aes(x=eventDate, y=individualCount, group=vernacularName)) +
geom_line()+
geom_point()
ggplot(data=waarnemingen, aes(x=eventDate, y=individualCount, group=vernacularName, fill=vernacularName, color=vernacularName)) + stat_summary(fun.y = sum, na.rm=TRUE, geom='line')
ggplot(subset(waarnemingen, individualCount %in% c("MA", "TX")),
aes(x=Date,
y=Home.Value,
color=State))+
geom_point() |
#' @title
#' Does a field exist?
#'
#' @description
#' Logical that checks if a field exists in a table. The `field` argument is formatted into lowercase prior to being checked.
#'
#'
#' @inheritParams base_args
#' @param field Character string to check for in the given table.
#'
#' @rdname field_exists
#' @export
#' @family logical functions
field_exists <-
function(conn,
schema,
table,
field) {
fields <- ls_fields(conn = conn,
schema = schema,
table = table,
verbose = FALSE,
render_sql = FALSE)
if (tolower(field) %in% fields) {
TRUE
} else {
FALSE
}
}
#' @title
#' Does a schema exist?
#'
#' @description
#' Logical that checks if a schema exists in the database. The `schema` argument is in formatted in all lowercase prior to checking against what is present in the database.
#'
#'
#' @inheritParams base_args
#'
#' @rdname schema_exists
#' @export
#' @family logical functions
schema_exists <-
function(conn,
schema) {
schemas <-
ls_schema(conn = conn,
verbose = FALSE,
render_sql = FALSE)
if (tolower(schema) %in% schemas) {
TRUE
} else {
FALSE
}
}
#' @title
#' Does a table exist?
#'
#' @inheritParams base_args
#'
#' @rdname table_exists
#'
#' @export
#' @family logical functions
table_exists <-
function(conn,
schema,
table_name) {
tables <- ls_tables(conn = conn,
schema = schema,
verbose = FALSE,
render_sql = FALSE)
if (toupper(table_name) %in% tables) {
TRUE
} else {
FALSE
}
}
#' @title
#' Does a database exist?
#'
#' @inheritParams base_args
#'
#' @rdname db_exists
#'
#' @export
#' @family logical functions
db_exists <-
function(conn,
db_name) {
dbs <- ls_db(conn = conn,
verbose = FALSE,
render_sql = FALSE)
if (tolower(db_name) %in% dbs) {
TRUE
} else {
FALSE
}
}
#' @export
reserved_words <-
function() {
c("ADD", "ALL", "ALTER", "AND", "ANY", "AS", "ASC", "AUTHORIZATION", "BACKUP", "BEGIN", "BETWEEN", "BREAK", "BROWSE", "BULK", "BY", "CASCADE", "CASE", "CHECK", "CHECKPOINT", "CLOSE", "CLUSTERED", "COALESCE", "COLLATE", "COLUMN", "COMMIT", "COMPUTE", "CONSTRAINT", "CONTAINS", "CONTAINSTABLE", "CONTINUE", "CONVERT", "CREATE", "CROSS", "CURRENT", "CURRENT_DATE", "CURRENT_TIME", "CURRENT_TIMESTAMP", "CURRENT_USER", "CURSOR", "DATABASE", "DBCC", "DEALLOCATE", "DECLARE", "DEFAULT", "DELETE", "DENY", "DESC", "DISK", "DISTINCT", "DISTRIBUTED", "DOUBLE", "DROP", "DUMP", "ELSE", "END", "ERRLVL", "ESCAPE", "EXCEPT", "EXEC", "EXECUTE", "EXISTS", "EXIT", "ABSOLUTE", "ACTION", "ADA", "ALLOCATE", "ARE", "ASSERTION", "AT", "AVG", "BIT", "BIT_LENGTH", "BOTH", "CASCADED", "CAST", "CATALOG", "CHAR", "CHAR_LENGTH", "CHARACTER", "CHARACTER_LENGTH", "COLLATION", "CONNECT", "CONNECTION", "CONSTRAINTS", "CORRESPONDING", "COUNT", "DATE", "DAY", "DEC", "DECIMAL", "DEFERRABLE", "DEFERRED", "DESCRIBE", "DESCRIPTOR", "DIAGNOSTICS", "DISCONNECT", "DOMAIN", "END-EXEC", "EXCEPTION", "ADMIN", "AFTER", "AGGREGATE", "ALIAS", "ARRAY", "ASENSITIVE", "ASYMMETRIC", "ATOMIC", "BEFORE", "BINARY", "BLOB", "BOOLEAN", "BREADTH", "CALL", "CALLED", "CARDINALITY", "CLASS", "CLOB", "COLLECT", "COMPLETION", "CONDITION", "CONSTRUCTOR", "CORR", "COVAR_POP", "COVAR_SAMP", "CUBE", "CUME_DIST", "CURRENT_CATALOG", "CURRENT_DEFAULT_TRANSFORM_GROUP", "CURRENT_PATH", "CURRENT_ROLE", "CURRENT_SCHEMA", "CURRENT_TRANSFORM_GROUP_FOR_TYPE", "CYCLE", "DATA", "DEPTH", "DEREF", "DESTROY", "DESTRUCTOR", "DETERMINISTIC", "DICTIONARY", "DYNAMIC", "EACH", "ELEMENT", "EQUALS", "EVERY", "FALSE", "FILTER", "FIRST", "FLOAT", "FOUND", "FREE", "FULLTEXTTABLE", "FUSION", "GENERAL", "GET", "GLOBAL", "GO", "GROUPING", "HOLD", "EXTERNAL", "FETCH", "FILE", "FILLFACTOR", "FOR", "FOREIGN", "FREETEXT", "FREETEXTTABLE", "FROM", "FULL", "FUNCTION", "GOTO", "GRANT", "GROUP", "HAVING", "HOLDLOCK", "IDENTITY", "IDENTITY_INSERT", "IDENTITYCOL", "IF", "IN", "INDEX", "INNER", "INSERT", "INTERSECT", "INTO", "IS", "JOIN", "KEY", "KILL", "LEFT", "LIKE", "LINENO", "LOAD", "MERGE", "NATIONAL", "NOCHECK", "NONCLUSTERED", "NOT", "NULL", "NULLIF", "OF", "OFF", "OFFSETS", "ON", "OPEN", "OPENDATASOURCE", "OPENQUERY", "OPENROWSET", "OPENXML", "OPTION", "OR", "ORDER", "OUTER", "OVER", "PERCENT", "PIVOT", "PLAN", "PRECISION", "PRIMARY", "PRINT", "PROC", "EXTRACT", "FORTRAN", "HOUR", "IMMEDIATE", "INCLUDE", "INDICATOR", "INITIALLY", "INPUT", "INSENSITIVE", "INT", "INTEGER", "INTERVAL", "ISOLATION", "LANGUAGE", "LAST", "LEADING", "LEVEL", "LOCAL", "LOWER", "MATCH", "MAX", "MIN", "MINUTE", "MODULE", "MONTH", "NAMES", "NATURAL", "NCHAR", "NEXT", "NO", "NONE", "NUMERIC", "OCTET_LENGTH", "ONLY", "OUTPUT", "HOST", "IGNORE", "INITIALIZE", "INOUT", "INTERSECTION", "ITERATE", "LARGE", "LATERAL", "LESS", "LIKE_REGEX", "LIMIT", "LN", "LOCALTIME", "LOCALTIMESTAMP", "LOCATOR", "MAP", "MEMBER", "METHOD", "MOD", "MODIFIES", "MODIFY", "MULTISET", "NCLOB", "NEW", "NORMALIZE", "OBJECT", "OCCURRENCES_REGEX", "OLD", "OPERATION", "ORDINALITY", "OUT", "OVERLAY", "PAD", "PARAMETER", "PARAMETERS", "PARTIAL", "PARTITION", "PATH", "POSTFIX", "PREFIX", "PREORDER", "PREPARE", "PERCENT_RANK", "PERCENTILE_CONT", "PERCENTILE_DISC", "POSITION_REGEX", "PRESERVE", "PRIOR", "PRIVILEGES", "RANGE", "READS", "REAL", "RECURSIVE", "REF", "REFERENCING", "REGR_AVGX", "REGR_AVGY", "REGR_COUNT", "REGR_INTERCEPT", "REGR_R2", "REGR_SLOPE", "REGR_SXX", "REGR_SXY", "REGR_SYY", "PROCEDURE", "PUBLIC", "RAISERROR", "READ", "READTEXT", "RECONFIGURE", "REFERENCES", "REPLICATION", "RESTORE", "RESTRICT", "RETURN", "REVERT", "REVOKE", "RIGHT", "ROLLBACK", "ROWCOUNT", "ROWGUIDCOL", "RULE", "SAVE", "SCHEMA", "SECURITYAUDIT", "SELECT", "SEMANTICKEYPHRASETABLE", "SEMANTICSIMILARITYDETAILSTABLE", "SEMANTICSIMILARITYTABLE", "SESSION_USER", "SET", "SETUSER", "SHUTDOWN", "SOME", "STATISTICS", "SYSTEM_USER", "TABLE", "TABLESAMPLE", "TEXTSIZE", "THEN", "TO", "TOP", "TRAN", "TRANSACTION", "TRIGGER", "TRUNCATE", "TRY_CONVERT", "TSEQUAL", "UNION", "UNIQUE", "UNPIVOT", "UPDATE", "UPDATETEXT", "USE", "USER", "VALUES", "VARYING", "VIEW", "WAITFOR", "WHEN", "WHERE", "WHILE", "WITH", "WITHIN GROUP", "WRITETEXT", "OVERLAPS", "PASCAL", "POSITION", "RELATIVE", "ROWS", "SCROLL", "SECOND", "SECTION", "SESSION", "SIZE", "SMALLINT", "SPACE", "SQL", "SQLCA", "SQLCODE", "SQLERROR", "SQLSTATE", "SQLWARNING", "SUBSTRING", "SUM", "TEMPORARY", "TIME", "TIMESTAMP", "TIMEZONE_HOUR", "TIMEZONE_MINUTE", "TRAILING", "TRANSLATE", "TRANSLATION", "TRIM", "TRUE", "UNKNOWN", "UPPER", "USAGE", "USING", "VALUE", "VARCHAR", "WHENEVER", "WORK", "WRITE", "YEAR", "ZONE", "RELEASE", "RESULT", "RETURNS", "ROLE", "ROLLUP", "ROUTINE", "ROW", "SAVEPOINT", "SCOPE", "SEARCH", "SENSITIVE", "SEQUENCE", "SETS", "SIMILAR", "SPECIFIC", "SPECIFICTYPE", "SQLEXCEPTION", "START", "STATE", "STATEMENT", "STATIC", "STDDEV_POP", "STDDEV_SAMP", "STRUCTURE", "SUBMULTISET", "SUBSTRING_REGEX", "SYMMETRIC", "SYSTEM", "TERMINATE", "THAN", "TRANSLATE_REGEX", "TREAT", "UESCAPE", "UNDER", "UNNEST", "VAR_POP", "VAR_SAMP", "VARIABLE", "WIDTH_BUCKET", "WITHOUT", "WINDOW", "WITHIN", "XMLAGG", "XMLATTRIBUTES", "XMLBINARY", "XMLCAST", "XMLCOMMENT", "XMLCONCAT", "XMLDOCUMENT", "XMLELEMENT", "XMLEXISTS", "XMLFOREST", "XMLITERATE", "XMLNAMESPACES", "XMLPARSE", "XMLPI", "XMLQUERY", "XMLSERIALIZE", "XMLTABLE", "XMLTEXT", "XMLVALIDATE")
}
#' @title
#' Is a string a reserve word?
#'
#' @export
is_reserved <-
function(...) {
args <- list(...)
args <- unlist(args)
args <- toupper(args)
sapply(Args, function(x) x %in% reservedWords())
}
| /R/utils-logical.R | no_license | jimsforks/pg13 | R | false | false | 8,539 | r | #' @title
#' Does a field exist?
#'
#' @description
#' Logical that checks if a field exists in a table. The `field` argument is formatted into lowercase prior to being checked.
#'
#'
#' @inheritParams base_args
#' @param field Character string to check for in the given table.
#'
#' @rdname field_exists
#' @export
#' @family logical functions
field_exists <-
function(conn,
schema,
table,
field) {
fields <- ls_fields(conn = conn,
schema = schema,
table = table,
verbose = FALSE,
render_sql = FALSE)
if (tolower(field) %in% fields) {
TRUE
} else {
FALSE
}
}
#' @title
#' Does a schema exist?
#'
#' @description
#' Logical that checks if a schema exists in the database. The `schema` argument is in formatted in all lowercase prior to checking against what is present in the database.
#'
#'
#' @inheritParams base_args
#'
#' @rdname schema_exists
#' @export
#' @family logical functions
schema_exists <-
function(conn,
schema) {
schemas <-
ls_schema(conn = conn,
verbose = FALSE,
render_sql = FALSE)
if (tolower(schema) %in% schemas) {
TRUE
} else {
FALSE
}
}
#' @title
#' Does a table exist?
#'
#' @inheritParams base_args
#'
#' @rdname table_exists
#'
#' @export
#' @family logical functions
table_exists <-
function(conn,
schema,
table_name) {
tables <- ls_tables(conn = conn,
schema = schema,
verbose = FALSE,
render_sql = FALSE)
if (toupper(table_name) %in% tables) {
TRUE
} else {
FALSE
}
}
#' @title
#' Does a database exist?
#'
#' @inheritParams base_args
#'
#' @rdname db_exists
#'
#' @export
#' @family logical functions
db_exists <-
function(conn,
db_name) {
dbs <- ls_db(conn = conn,
verbose = FALSE,
render_sql = FALSE)
if (tolower(db_name) %in% dbs) {
TRUE
} else {
FALSE
}
}
#' @export
reserved_words <-
function() {
c("ADD", "ALL", "ALTER", "AND", "ANY", "AS", "ASC", "AUTHORIZATION", "BACKUP", "BEGIN", "BETWEEN", "BREAK", "BROWSE", "BULK", "BY", "CASCADE", "CASE", "CHECK", "CHECKPOINT", "CLOSE", "CLUSTERED", "COALESCE", "COLLATE", "COLUMN", "COMMIT", "COMPUTE", "CONSTRAINT", "CONTAINS", "CONTAINSTABLE", "CONTINUE", "CONVERT", "CREATE", "CROSS", "CURRENT", "CURRENT_DATE", "CURRENT_TIME", "CURRENT_TIMESTAMP", "CURRENT_USER", "CURSOR", "DATABASE", "DBCC", "DEALLOCATE", "DECLARE", "DEFAULT", "DELETE", "DENY", "DESC", "DISK", "DISTINCT", "DISTRIBUTED", "DOUBLE", "DROP", "DUMP", "ELSE", "END", "ERRLVL", "ESCAPE", "EXCEPT", "EXEC", "EXECUTE", "EXISTS", "EXIT", "ABSOLUTE", "ACTION", "ADA", "ALLOCATE", "ARE", "ASSERTION", "AT", "AVG", "BIT", "BIT_LENGTH", "BOTH", "CASCADED", "CAST", "CATALOG", "CHAR", "CHAR_LENGTH", "CHARACTER", "CHARACTER_LENGTH", "COLLATION", "CONNECT", "CONNECTION", "CONSTRAINTS", "CORRESPONDING", "COUNT", "DATE", "DAY", "DEC", "DECIMAL", "DEFERRABLE", "DEFERRED", "DESCRIBE", "DESCRIPTOR", "DIAGNOSTICS", "DISCONNECT", "DOMAIN", "END-EXEC", "EXCEPTION", "ADMIN", "AFTER", "AGGREGATE", "ALIAS", "ARRAY", "ASENSITIVE", "ASYMMETRIC", "ATOMIC", "BEFORE", "BINARY", "BLOB", "BOOLEAN", "BREADTH", "CALL", "CALLED", "CARDINALITY", "CLASS", "CLOB", "COLLECT", "COMPLETION", "CONDITION", "CONSTRUCTOR", "CORR", "COVAR_POP", "COVAR_SAMP", "CUBE", "CUME_DIST", "CURRENT_CATALOG", "CURRENT_DEFAULT_TRANSFORM_GROUP", "CURRENT_PATH", "CURRENT_ROLE", "CURRENT_SCHEMA", "CURRENT_TRANSFORM_GROUP_FOR_TYPE", "CYCLE", "DATA", "DEPTH", "DEREF", "DESTROY", "DESTRUCTOR", "DETERMINISTIC", "DICTIONARY", "DYNAMIC", "EACH", "ELEMENT", "EQUALS", "EVERY", "FALSE", "FILTER", "FIRST", "FLOAT", "FOUND", "FREE", "FULLTEXTTABLE", "FUSION", "GENERAL", "GET", "GLOBAL", "GO", "GROUPING", "HOLD", "EXTERNAL", "FETCH", "FILE", "FILLFACTOR", "FOR", "FOREIGN", "FREETEXT", "FREETEXTTABLE", "FROM", "FULL", "FUNCTION", "GOTO", "GRANT", "GROUP", "HAVING", "HOLDLOCK", "IDENTITY", "IDENTITY_INSERT", "IDENTITYCOL", "IF", "IN", "INDEX", "INNER", "INSERT", "INTERSECT", "INTO", "IS", "JOIN", "KEY", "KILL", "LEFT", "LIKE", "LINENO", "LOAD", "MERGE", "NATIONAL", "NOCHECK", "NONCLUSTERED", "NOT", "NULL", "NULLIF", "OF", "OFF", "OFFSETS", "ON", "OPEN", "OPENDATASOURCE", "OPENQUERY", "OPENROWSET", "OPENXML", "OPTION", "OR", "ORDER", "OUTER", "OVER", "PERCENT", "PIVOT", "PLAN", "PRECISION", "PRIMARY", "PRINT", "PROC", "EXTRACT", "FORTRAN", "HOUR", "IMMEDIATE", "INCLUDE", "INDICATOR", "INITIALLY", "INPUT", "INSENSITIVE", "INT", "INTEGER", "INTERVAL", "ISOLATION", "LANGUAGE", "LAST", "LEADING", "LEVEL", "LOCAL", "LOWER", "MATCH", "MAX", "MIN", "MINUTE", "MODULE", "MONTH", "NAMES", "NATURAL", "NCHAR", "NEXT", "NO", "NONE", "NUMERIC", "OCTET_LENGTH", "ONLY", "OUTPUT", "HOST", "IGNORE", "INITIALIZE", "INOUT", "INTERSECTION", "ITERATE", "LARGE", "LATERAL", "LESS", "LIKE_REGEX", "LIMIT", "LN", "LOCALTIME", "LOCALTIMESTAMP", "LOCATOR", "MAP", "MEMBER", "METHOD", "MOD", "MODIFIES", "MODIFY", "MULTISET", "NCLOB", "NEW", "NORMALIZE", "OBJECT", "OCCURRENCES_REGEX", "OLD", "OPERATION", "ORDINALITY", "OUT", "OVERLAY", "PAD", "PARAMETER", "PARAMETERS", "PARTIAL", "PARTITION", "PATH", "POSTFIX", "PREFIX", "PREORDER", "PREPARE", "PERCENT_RANK", "PERCENTILE_CONT", "PERCENTILE_DISC", "POSITION_REGEX", "PRESERVE", "PRIOR", "PRIVILEGES", "RANGE", "READS", "REAL", "RECURSIVE", "REF", "REFERENCING", "REGR_AVGX", "REGR_AVGY", "REGR_COUNT", "REGR_INTERCEPT", "REGR_R2", "REGR_SLOPE", "REGR_SXX", "REGR_SXY", "REGR_SYY", "PROCEDURE", "PUBLIC", "RAISERROR", "READ", "READTEXT", "RECONFIGURE", "REFERENCES", "REPLICATION", "RESTORE", "RESTRICT", "RETURN", "REVERT", "REVOKE", "RIGHT", "ROLLBACK", "ROWCOUNT", "ROWGUIDCOL", "RULE", "SAVE", "SCHEMA", "SECURITYAUDIT", "SELECT", "SEMANTICKEYPHRASETABLE", "SEMANTICSIMILARITYDETAILSTABLE", "SEMANTICSIMILARITYTABLE", "SESSION_USER", "SET", "SETUSER", "SHUTDOWN", "SOME", "STATISTICS", "SYSTEM_USER", "TABLE", "TABLESAMPLE", "TEXTSIZE", "THEN", "TO", "TOP", "TRAN", "TRANSACTION", "TRIGGER", "TRUNCATE", "TRY_CONVERT", "TSEQUAL", "UNION", "UNIQUE", "UNPIVOT", "UPDATE", "UPDATETEXT", "USE", "USER", "VALUES", "VARYING", "VIEW", "WAITFOR", "WHEN", "WHERE", "WHILE", "WITH", "WITHIN GROUP", "WRITETEXT", "OVERLAPS", "PASCAL", "POSITION", "RELATIVE", "ROWS", "SCROLL", "SECOND", "SECTION", "SESSION", "SIZE", "SMALLINT", "SPACE", "SQL", "SQLCA", "SQLCODE", "SQLERROR", "SQLSTATE", "SQLWARNING", "SUBSTRING", "SUM", "TEMPORARY", "TIME", "TIMESTAMP", "TIMEZONE_HOUR", "TIMEZONE_MINUTE", "TRAILING", "TRANSLATE", "TRANSLATION", "TRIM", "TRUE", "UNKNOWN", "UPPER", "USAGE", "USING", "VALUE", "VARCHAR", "WHENEVER", "WORK", "WRITE", "YEAR", "ZONE", "RELEASE", "RESULT", "RETURNS", "ROLE", "ROLLUP", "ROUTINE", "ROW", "SAVEPOINT", "SCOPE", "SEARCH", "SENSITIVE", "SEQUENCE", "SETS", "SIMILAR", "SPECIFIC", "SPECIFICTYPE", "SQLEXCEPTION", "START", "STATE", "STATEMENT", "STATIC", "STDDEV_POP", "STDDEV_SAMP", "STRUCTURE", "SUBMULTISET", "SUBSTRING_REGEX", "SYMMETRIC", "SYSTEM", "TERMINATE", "THAN", "TRANSLATE_REGEX", "TREAT", "UESCAPE", "UNDER", "UNNEST", "VAR_POP", "VAR_SAMP", "VARIABLE", "WIDTH_BUCKET", "WITHOUT", "WINDOW", "WITHIN", "XMLAGG", "XMLATTRIBUTES", "XMLBINARY", "XMLCAST", "XMLCOMMENT", "XMLCONCAT", "XMLDOCUMENT", "XMLELEMENT", "XMLEXISTS", "XMLFOREST", "XMLITERATE", "XMLNAMESPACES", "XMLPARSE", "XMLPI", "XMLQUERY", "XMLSERIALIZE", "XMLTABLE", "XMLTEXT", "XMLVALIDATE")
}
#' @title
#' Is a string a reserve word?
#'
#' @export
is_reserved <-
function(...) {
args <- list(...)
args <- unlist(args)
args <- toupper(args)
sapply(Args, function(x) x %in% reservedWords())
}
|
# Tools to make it run
deparse_all <- function(x) {
deparse2 <- function(x) paste(deparse(x, width.cutoff = 500L), collapse = "")
vapply(x, deparse2, FUN.VALUE = character(1))
}
dt_env <- function(dt, env) {
env <- new.env(parent = env, size = 2L)
env$dt <- dt
env$vars <- deparse_all(dplyr::groups(dt))
env
}
# code for manip.r
#' mutate selected rows
#'
#' change values of columns only in rows that satisfy the .if condition
#' Note: you cannot create new columns with mutate_if but only change
#' values in selected rows of existing columns
#'
#' @param .data the data
#' @param .if a logical condition that selects rows, e.g. a=="B"
#' @param ... the command to mutate existing columns
#' @export
mutate_if = function (.data,.if,...) {
UseMethod("mutate_if")
}
# for tbl-data.frame.R
#' @export
mutate_if.data.frame =function (.data,.if,...)
{
dt = data.table::as.data.table(as.data.frame(.data))
.if.quoted = substitute(.if)
as.data.frame(mutate_if.data.table(.data=dt,.if.quoted=.if.quoted,...,inplace=TRUE, .parent.env = parent.frame()))
}
# for manip-df.r
#' @export
mutate_if.tbl_df <- function (.data,.if,...) {
dt = data.table::as.data.table(as.data.frame(.data))
.if.quoted = substitute(.if)
tbl_df(mutate_if.data.table(.data=dt,.if.quoted=.if.quoted,...,inplace=TRUE, .parent.env = parent.frame()))
}
#' @export
mutate_if.tbl_dt <- function(.data,.if, ...) {
.if.quoted = substitute(.if)
tbl_dt(
mutate_if.data.table(.data=.data,.if.quoted=.if.quoted,...,inplace=TRUE, .parent.env = parent.frame())
)
}
# for manip-dt.r
#' @export
mutate_if.data.table <- function (.data,.if, ..., inplace = FALSE,.if.quoted=NULL, .parent.env=parent.frame())
{
if (is.null(.if.quoted))
.if.quoted = substitute(.if)
if (!inplace)
.data <- copy(.data)
env <- new.env(parent = .parent.env, size = 1L)
env$data <- .data
cols <- dplyr:::named_dots(...)
for (i in seq_along(cols)) {
call <- substitute(data[.if.quoted, `:=`(lhs, rhs)], list(lhs = as.name(names(cols)[[i]]), rhs = cols[[i]], .if.quoted =.if.quoted))
eval(call, env)
}
.data
}
# for manip-grouped-dt.r
#' @export
mutate_if.grouped_dt <- function(.data,.if, ..., inplace = FALSE, .if.quoted=NULL) {
data <- .data
if (is.null(.if.quoted))
.if.quoted = substitute(.if)
if (!inplace) data <- copy(data)
env <- dt_env(data, parent.frame())
cols <- dplyr:::named_dots(...)
# For each new variable, generate a call of the form df[, new := expr]
for(col in names(cols)) {
call <- substitute(dt[.if.quoted, lhs := rhs, by = vars],
list(lhs = as.name(col), rhs = cols[[col]], .if.quoted=.if.quoted))
eval(call, env)
}
grouped_dt(
data = data,
vars = dplyr::groups(.data)
)
}
#' @export
mutate_if.grouped_df <- function(.data,.if, ...) {
# This function is currently extremely unelegant and inefficient
# Problem: when transforming to data.table row order will be changed
# by group_by operation at least in dplyr 0.1.3
# So I manually restore the original row order
if (NROW(.data)==0)
return(.data)
.if.quoted = substitute(.if)
vars = dplyr::groups(.data)
dt = data.table::as.data.table(as.data.frame(.data))
class(dt) = c("data.table","data.frame")
# does not seem to work correctly
#mutate(dt, INDEX.ROW__ = 1:NROW(.data), inplace=TRUE)
dt$INDEX.ROW__ = 1:NROW(.data) # slower but seems to work
gdt = grouped_dt(dt, vars=vars)
gdt = mutate_if.grouped_dt(gdt,.if.quoted=.if.quoted,..., inplace=TRUE)
data = dplyr:::grouped_df(data=as.data.frame(gdt), vars=vars)
# restore original order
data = select(arrange(data, INDEX.ROW__), -INDEX.ROW__)
data
}
examples.mutate_if = function() {
library(microbenchmark)
library(dplyr)
library(data.table)
library(dplyrExtras)
# create a data
set.seed(123456)
n = 10
df = data.frame(a= sample(1:3,n,replace=TRUE),
b= sample(1:100,n,replace=TRUE),
x=rnorm(n))
dt = data.table::as.data.table(df)
# different calls to mutate_if
mutate_if(df,a==3,y=100)
mutate_if(tbl_df(df),a==1,x=200)
mutate_if(as.tbl(df),a==1,x=300,b=400)
mutate_if(dt,a==1 | a==2,x=400)
mutate_if(group_by(dt,a),a==1 | a==2,x=mean(b))
mutate_if(group_by(df,a),a==1 | a==2,x=mean(b))
# if you create a new column rows that don't
# match the if condition have an NA
mutate_if(df,a==3,z=100)
# You can only have one if condition in a mutate_if call
# So multiple changes require nesting or piping
library(magrittr)
df %>% mutate_if(a==3,z=300) %>%
mutate_if(a==2,z=200)
# Small benchmark: compare with mutate + ifelse
n = 1e6
df = data.frame(a= sample(1:3,n,replace=TRUE),
b= sample(1:100,n,replace=TRUE),
x=rnorm(n))
microbenchmark(times = 5L,
mutate(df, x=ifelse(a==2,x+100,x)),
mutate_if(df, a==2, x=x+100),
)
#Unit: milliseconds
# expr min lq median uq max neval
# mutate(df, x = ifelse(a == 2, x + 100, x)) 749.2954 754.4179 815.06681 820.95872 860.79326 5
# mutate_if(df, a == 2, x = x + 100) 72.2886 75.4189 77.47787 83.64689 86.33666 5
}
| /R/mutate_if.r | no_license | kendonB/dplyrExtras | R | false | false | 5,117 | r |
# Tools to make it run
deparse_all <- function(x) {
deparse2 <- function(x) paste(deparse(x, width.cutoff = 500L), collapse = "")
vapply(x, deparse2, FUN.VALUE = character(1))
}
dt_env <- function(dt, env) {
env <- new.env(parent = env, size = 2L)
env$dt <- dt
env$vars <- deparse_all(dplyr::groups(dt))
env
}
# code for manip.r
#' mutate selected rows
#'
#' change values of columns only in rows that satisfy the .if condition
#' Note: you cannot create new columns with mutate_if but only change
#' values in selected rows of existing columns
#'
#' @param .data the data
#' @param .if a logical condition that selects rows, e.g. a=="B"
#' @param ... the command to mutate existing columns
#' @export
mutate_if = function (.data,.if,...) {
UseMethod("mutate_if")
}
# for tbl-data.frame.R
#' @export
mutate_if.data.frame =function (.data,.if,...)
{
dt = data.table::as.data.table(as.data.frame(.data))
.if.quoted = substitute(.if)
as.data.frame(mutate_if.data.table(.data=dt,.if.quoted=.if.quoted,...,inplace=TRUE, .parent.env = parent.frame()))
}
# for manip-df.r
#' @export
mutate_if.tbl_df <- function (.data,.if,...) {
dt = data.table::as.data.table(as.data.frame(.data))
.if.quoted = substitute(.if)
tbl_df(mutate_if.data.table(.data=dt,.if.quoted=.if.quoted,...,inplace=TRUE, .parent.env = parent.frame()))
}
#' @export
mutate_if.tbl_dt <- function(.data,.if, ...) {
.if.quoted = substitute(.if)
tbl_dt(
mutate_if.data.table(.data=.data,.if.quoted=.if.quoted,...,inplace=TRUE, .parent.env = parent.frame())
)
}
# for manip-dt.r
#' @export
mutate_if.data.table <- function (.data,.if, ..., inplace = FALSE,.if.quoted=NULL, .parent.env=parent.frame())
{
if (is.null(.if.quoted))
.if.quoted = substitute(.if)
if (!inplace)
.data <- copy(.data)
env <- new.env(parent = .parent.env, size = 1L)
env$data <- .data
cols <- dplyr:::named_dots(...)
for (i in seq_along(cols)) {
call <- substitute(data[.if.quoted, `:=`(lhs, rhs)], list(lhs = as.name(names(cols)[[i]]), rhs = cols[[i]], .if.quoted =.if.quoted))
eval(call, env)
}
.data
}
# for manip-grouped-dt.r
#' @export
mutate_if.grouped_dt <- function(.data,.if, ..., inplace = FALSE, .if.quoted=NULL) {
data <- .data
if (is.null(.if.quoted))
.if.quoted = substitute(.if)
if (!inplace) data <- copy(data)
env <- dt_env(data, parent.frame())
cols <- dplyr:::named_dots(...)
# For each new variable, generate a call of the form df[, new := expr]
for(col in names(cols)) {
call <- substitute(dt[.if.quoted, lhs := rhs, by = vars],
list(lhs = as.name(col), rhs = cols[[col]], .if.quoted=.if.quoted))
eval(call, env)
}
grouped_dt(
data = data,
vars = dplyr::groups(.data)
)
}
#' @export
mutate_if.grouped_df <- function(.data,.if, ...) {
# This function is currently extremely unelegant and inefficient
# Problem: when transforming to data.table row order will be changed
# by group_by operation at least in dplyr 0.1.3
# So I manually restore the original row order
if (NROW(.data)==0)
return(.data)
.if.quoted = substitute(.if)
vars = dplyr::groups(.data)
dt = data.table::as.data.table(as.data.frame(.data))
class(dt) = c("data.table","data.frame")
# does not seem to work correctly
#mutate(dt, INDEX.ROW__ = 1:NROW(.data), inplace=TRUE)
dt$INDEX.ROW__ = 1:NROW(.data) # slower but seems to work
gdt = grouped_dt(dt, vars=vars)
gdt = mutate_if.grouped_dt(gdt,.if.quoted=.if.quoted,..., inplace=TRUE)
data = dplyr:::grouped_df(data=as.data.frame(gdt), vars=vars)
# restore original order
data = select(arrange(data, INDEX.ROW__), -INDEX.ROW__)
data
}
examples.mutate_if = function() {
library(microbenchmark)
library(dplyr)
library(data.table)
library(dplyrExtras)
# create a data
set.seed(123456)
n = 10
df = data.frame(a= sample(1:3,n,replace=TRUE),
b= sample(1:100,n,replace=TRUE),
x=rnorm(n))
dt = data.table::as.data.table(df)
# different calls to mutate_if
mutate_if(df,a==3,y=100)
mutate_if(tbl_df(df),a==1,x=200)
mutate_if(as.tbl(df),a==1,x=300,b=400)
mutate_if(dt,a==1 | a==2,x=400)
mutate_if(group_by(dt,a),a==1 | a==2,x=mean(b))
mutate_if(group_by(df,a),a==1 | a==2,x=mean(b))
# if you create a new column rows that don't
# match the if condition have an NA
mutate_if(df,a==3,z=100)
# You can only have one if condition in a mutate_if call
# So multiple changes require nesting or piping
library(magrittr)
df %>% mutate_if(a==3,z=300) %>%
mutate_if(a==2,z=200)
# Small benchmark: compare with mutate + ifelse
n = 1e6
df = data.frame(a= sample(1:3,n,replace=TRUE),
b= sample(1:100,n,replace=TRUE),
x=rnorm(n))
microbenchmark(times = 5L,
mutate(df, x=ifelse(a==2,x+100,x)),
mutate_if(df, a==2, x=x+100),
)
#Unit: milliseconds
# expr min lq median uq max neval
# mutate(df, x = ifelse(a == 2, x + 100, x)) 749.2954 754.4179 815.06681 820.95872 860.79326 5
# mutate_if(df, a == 2, x = x + 100) 72.2886 75.4189 77.47787 83.64689 86.33666 5
}
|
# install.packages("stringi")
# get libraries
library(dplyr)
library(gdata)
library(zipcode)
library(stringi)
getwd()
# get rid of weird formats
clean <- function(data, sortby) {
data[, sortby] <- as.numeric(as.character(data[, sortby]))
return(data)
}
CleanZip <- function(zip) {
zip.length <- stri_length(as.character(zip))
if (zip.length > 5) {
diff <- zip.length - 5
zip <-trunc((zip / (10 ^ diff)), prec = -2)
}
return(zip)
}
# import the '05-'07 dataset, get rid of weird stringsAsFactors stuff
data.05.07 <- read.xls('Oncampuscrime050607.xls', header = TRUE)
write.csv(data.05.07, "data.05.07.csv", row.names = FALSE)
data.05.07<- read.csv("data.05.07.csv", stringsAsFactors = FALSE)
# wrangling data to useful info, getting weird of weird nas
edited.data.05.07 <- data.05.07 %>%
mutate("RAPE5" = FORCIB5 + NONFOR5, "RAPE6" = FORCIB6 + NONFOR6, "MEN5" = men_total,
"WOMEN5" = women_total, "TOTAL5" = total) %>%
select(INSTNM, sector_desc, MEN5, WOMEN5, TOTAL5, RAPE5, RAPE6, Zip)
edited.data.05.07 <- na.omit(edited.data.05.07)
edited.data.05.07$Zip <- as.numeric(edited.data.05.07$Zip)
# import that '07 - '09 dataset, get rid of weird stringsAsFactors stuff
data.07.09 <- read.xls('Oncampuscrime070809.xls', header = TRUE)
write.csv(data.07.09, "data.07.09.csv", row.names = FALSE)
data.07.09 <- read.csv("data.07.09.csv", stringsAsFactors = FALSE)
# wrangling the data to useful info, getting weird of weird nas
edited.data.07.09 <- data.07.09 %>%
mutate("RAPE7" = FORCIB7 + NONFOR7, "RAPE8" = FORCIB8 + NONFOR8, "Zip" = ZIP,
"sector_desc" = Sector_desc, "TOTAL7" = Total, "MEN7" = men_total,
"WOMEN7" = women_total) %>%
select(INSTNM, RAPE7, RAPE8, MEN7, WOMEN7, TOTAL7, Zip, sector_desc)
# getting rid of things to join, fixing nas, etc.
edited.data.07.09$Zip <- as.numeric(edited.data.07.09$Zip)
edited.data.07.09 <- na.omit(edited.data.07.09)
# joining tables -> '05 - '09
new <- full_join(edited.data.05.07, edited.data.07.09, by = c("INSTNM", "Zip", "sector_desc"))
# import the '09 - '11 dataset
data.09.11 <- read.xls('Oncampuscrime091011.xls', header = TRUE)
write.csv(data.09.11, "data.09.11.csv", row.names = FALSE)
data.09.11 <- read.csv("data.09.11.csv", stringsAsFactors = FALSE)
# cleaning weird columns (getting rid of nas, changing odd formatting, etc.)
mod.09.11 <- clean(data.09.11, "FORCIB9")
mod.09.11 <- clean(mod.09.11, "NONFOR9")
combo <- as.vector(mod.09.11$FORCIB9 + mod.09.11$NONFOR9)
# selecting only useful columns, standardizing column names
edited.data.09.11<- data.09.11 %>%
mutate("RAPE9" = combo, "Zip" = ZIP, "TOTAL9" = Total, "WOMEN9" = women_total,
"MEN9" = men_total, "sector_desc" = Sector_desc) %>%
select(INSTNM, Zip, MEN9, WOMEN9, TOTAL9, RAPE9, sector_desc)
# getting table ready to join with full table, getting rid of nas
edited.data.09.11$Zip <- as.numeric(edited.data.09.11$Zip)
edited.data.09.11 <- na.omit(edited.data.09.11)
# making the full table -> '05 - '11
new <- full_join(new, edited.data.09.11, by = c("INSTNM", "Zip", "sector_desc"))
# import the '10 - '12 dataset
data.10.12 <- read.xls('Oncampuscrime101112.xls', header = TRUE)
write.csv(data.10.12, "data.10.12.csv", row.names = FALSE)
data.10.12 <- read.csv("data.10.12.csv", stringsAsFactors = FALSE)
# cleaning weird columns (fixing formatting, removing nas, etc)
mod.10.12 <- clean(data.10.12, "FORCIB10")
mod.10.12 <- clean(mod.10.12, "NONFOR10")
mod.10.12 <- clean(mod.10.12, "FORCIB11")
mod.10.12 <- clean(mod.10.12, "NONFOR11")
mod.10.12 <- clean(mod.10.12, "FORCIB12")
mod.10.12 <- clean(mod.10.12, "NONFOR12")
combo11 <- as.vector(mod.10.12$FORCIB11 + mod.10.12$NONFOR11)
combo12 <- as.vector(mod.10.12$FORCIB12 + mod.10.12$NONFOR12)
# selecting only useful columns / standardizing column names
edited.data.10.12<- data.10.12%>%
mutate("RAPE10" = FORCIB10, "RAPE11" = combo11, "RAPE12" = combo12, "Zip" = ZIP,
"sector_desc" = Sector_desc, "TOTAL10" = Total, "WOMEN10" = women_total, "MEN10" = men_total) %>%
select(INSTNM, Zip, sector_desc, MEN10, WOMEN10, TOTAL10, RAPE10, RAPE11, RAPE12)
# removing nas / fixing formatting to join
edited.data.10.12 <- na.omit(edited.data.10.12)
edited.data.10.12$Zip <- as.numeric(edited.data.10.12$Zip)
# adding new data so that the full table -> '05 - '12
new <- full_join(new, edited.data.10.12, by = c("INSTNM", "Zip", "sector_desc"))
# importing the '13 - '15 dataset
data.13.15 <- read.xls('Oncampuscrime131415.xls', header = TRUE)
write.csv(data.13.15, "data.13.15.csv", row.names = FALSE)
data.13.15 <- read.csv("data.13.15.csv", stringsAsFactors = FALSE)
# selecting only useful columns / standardizing column names
edited.data.13.15 <- data.13.15%>%
mutate("RAPE13" = FORCIB13 + NONFOR13, "Zip" = ZIP, "sector_desc" = Sector_desc,
"TOTAL13" = Total, "WOMEN13" = women_total, "MEN13" = men_total) %>%
select(INSTNM, Zip, sector_desc, MEN13, WOMEN13, TOTAL13, RAPE13, RAPE14, RAPE15)
# removing nas / fixing formatting to join
edited.data.13.15$Zip <- as.numeric(edited.data.13.15$Zip)
edited.data.13.15 <- na.omit(edited.data.13.15)
# joining the new dats such that the full dataset is now complete ('05 - '15)
new <- full_join(new, edited.data.13.15, by = c("INSTNM", "Zip", "sector_desc"))
# write the dataset to a .csv
write.csv(new, "Total.sexual.assaults.05.15.csv", row.names = FALSE)
edited.data.13.15<- data.13.15%>%
mutate("RAPE13" = FORCIB13 + NONFOR13, "Zip" = ZIP, "sector_desc" = Sector_desc, "total" = Total) %>%
select(INSTNM, Address, City, State, Zip, sector_cd, sector_desc, men_total, women_total, total, RAPE13, RAPE14, RAPE15)
# Zip fixes
new$Zip <- lapply(new$Zip, CleanZip)
new$Zip <- as.numeric(new$Zip)
SAVE <- new
colnames(new)
View(new)
# eliminating duplicates
new2 <- new %>% group_by(INSTNM, sector_desc) %>% summarize(RAPE13 = max(RAPE13), RAPE5 = max(RAPE5),
RAPE6 = max(RAPE6), RAPE7 = max(RAPE7),
RAPE8 = max(RAPE8), RAPE9 = max(RAPE9),
RAPE10 = max(RAPE10), RAPE11 = max(RAPE11),
RAPE12 = max(RAPE12), RAPE14 = max(RAPE14),
RAPE15 = max(RAPE15)) %>% unique()
# saving the data in csv form!
write.csv(new2, "Total.sexual.assaults.05.15.csv", row.names = FALSE)
write.csv(x, "Updated.total.05.15.csv", row.names = FALSE)
x <- read.csv("Total.sexual.assaults.05.15.csv", stringsAsFactors = FALSE)
View(x)
| /altheadatawrangling.R | no_license | anuto/info-201-final | R | false | false | 6,650 | r | # install.packages("stringi")
# get libraries
library(dplyr)
library(gdata)
library(zipcode)
library(stringi)
getwd()
# get rid of weird formats
clean <- function(data, sortby) {
data[, sortby] <- as.numeric(as.character(data[, sortby]))
return(data)
}
CleanZip <- function(zip) {
zip.length <- stri_length(as.character(zip))
if (zip.length > 5) {
diff <- zip.length - 5
zip <-trunc((zip / (10 ^ diff)), prec = -2)
}
return(zip)
}
# import the '05-'07 dataset, get rid of weird stringsAsFactors stuff
data.05.07 <- read.xls('Oncampuscrime050607.xls', header = TRUE)
write.csv(data.05.07, "data.05.07.csv", row.names = FALSE)
data.05.07<- read.csv("data.05.07.csv", stringsAsFactors = FALSE)
# wrangling data to useful info, getting weird of weird nas
edited.data.05.07 <- data.05.07 %>%
mutate("RAPE5" = FORCIB5 + NONFOR5, "RAPE6" = FORCIB6 + NONFOR6, "MEN5" = men_total,
"WOMEN5" = women_total, "TOTAL5" = total) %>%
select(INSTNM, sector_desc, MEN5, WOMEN5, TOTAL5, RAPE5, RAPE6, Zip)
edited.data.05.07 <- na.omit(edited.data.05.07)
edited.data.05.07$Zip <- as.numeric(edited.data.05.07$Zip)
# import that '07 - '09 dataset, get rid of weird stringsAsFactors stuff
data.07.09 <- read.xls('Oncampuscrime070809.xls', header = TRUE)
write.csv(data.07.09, "data.07.09.csv", row.names = FALSE)
data.07.09 <- read.csv("data.07.09.csv", stringsAsFactors = FALSE)
# wrangling the data to useful info, getting weird of weird nas
edited.data.07.09 <- data.07.09 %>%
mutate("RAPE7" = FORCIB7 + NONFOR7, "RAPE8" = FORCIB8 + NONFOR8, "Zip" = ZIP,
"sector_desc" = Sector_desc, "TOTAL7" = Total, "MEN7" = men_total,
"WOMEN7" = women_total) %>%
select(INSTNM, RAPE7, RAPE8, MEN7, WOMEN7, TOTAL7, Zip, sector_desc)
# getting rid of things to join, fixing nas, etc.
edited.data.07.09$Zip <- as.numeric(edited.data.07.09$Zip)
edited.data.07.09 <- na.omit(edited.data.07.09)
# joining tables -> '05 - '09
new <- full_join(edited.data.05.07, edited.data.07.09, by = c("INSTNM", "Zip", "sector_desc"))
# import the '09 - '11 dataset
data.09.11 <- read.xls('Oncampuscrime091011.xls', header = TRUE)
write.csv(data.09.11, "data.09.11.csv", row.names = FALSE)
data.09.11 <- read.csv("data.09.11.csv", stringsAsFactors = FALSE)
# cleaning weird columns (getting rid of nas, changing odd formatting, etc.)
mod.09.11 <- clean(data.09.11, "FORCIB9")
mod.09.11 <- clean(mod.09.11, "NONFOR9")
combo <- as.vector(mod.09.11$FORCIB9 + mod.09.11$NONFOR9)
# selecting only useful columns, standardizing column names
edited.data.09.11<- data.09.11 %>%
mutate("RAPE9" = combo, "Zip" = ZIP, "TOTAL9" = Total, "WOMEN9" = women_total,
"MEN9" = men_total, "sector_desc" = Sector_desc) %>%
select(INSTNM, Zip, MEN9, WOMEN9, TOTAL9, RAPE9, sector_desc)
# getting table ready to join with full table, getting rid of nas
edited.data.09.11$Zip <- as.numeric(edited.data.09.11$Zip)
edited.data.09.11 <- na.omit(edited.data.09.11)
# making the full table -> '05 - '11
new <- full_join(new, edited.data.09.11, by = c("INSTNM", "Zip", "sector_desc"))
# import the '10 - '12 dataset
data.10.12 <- read.xls('Oncampuscrime101112.xls', header = TRUE)
write.csv(data.10.12, "data.10.12.csv", row.names = FALSE)
data.10.12 <- read.csv("data.10.12.csv", stringsAsFactors = FALSE)
# cleaning weird columns (fixing formatting, removing nas, etc)
mod.10.12 <- clean(data.10.12, "FORCIB10")
mod.10.12 <- clean(mod.10.12, "NONFOR10")
mod.10.12 <- clean(mod.10.12, "FORCIB11")
mod.10.12 <- clean(mod.10.12, "NONFOR11")
mod.10.12 <- clean(mod.10.12, "FORCIB12")
mod.10.12 <- clean(mod.10.12, "NONFOR12")
combo11 <- as.vector(mod.10.12$FORCIB11 + mod.10.12$NONFOR11)
combo12 <- as.vector(mod.10.12$FORCIB12 + mod.10.12$NONFOR12)
# selecting only useful columns / standardizing column names
edited.data.10.12<- data.10.12%>%
mutate("RAPE10" = FORCIB10, "RAPE11" = combo11, "RAPE12" = combo12, "Zip" = ZIP,
"sector_desc" = Sector_desc, "TOTAL10" = Total, "WOMEN10" = women_total, "MEN10" = men_total) %>%
select(INSTNM, Zip, sector_desc, MEN10, WOMEN10, TOTAL10, RAPE10, RAPE11, RAPE12)
# removing nas / fixing formatting to join
edited.data.10.12 <- na.omit(edited.data.10.12)
edited.data.10.12$Zip <- as.numeric(edited.data.10.12$Zip)
# adding new data so that the full table -> '05 - '12
new <- full_join(new, edited.data.10.12, by = c("INSTNM", "Zip", "sector_desc"))
# importing the '13 - '15 dataset
data.13.15 <- read.xls('Oncampuscrime131415.xls', header = TRUE)
write.csv(data.13.15, "data.13.15.csv", row.names = FALSE)
data.13.15 <- read.csv("data.13.15.csv", stringsAsFactors = FALSE)
# selecting only useful columns / standardizing column names
edited.data.13.15 <- data.13.15%>%
mutate("RAPE13" = FORCIB13 + NONFOR13, "Zip" = ZIP, "sector_desc" = Sector_desc,
"TOTAL13" = Total, "WOMEN13" = women_total, "MEN13" = men_total) %>%
select(INSTNM, Zip, sector_desc, MEN13, WOMEN13, TOTAL13, RAPE13, RAPE14, RAPE15)
# removing nas / fixing formatting to join
edited.data.13.15$Zip <- as.numeric(edited.data.13.15$Zip)
edited.data.13.15 <- na.omit(edited.data.13.15)
# joining the new dats such that the full dataset is now complete ('05 - '15)
new <- full_join(new, edited.data.13.15, by = c("INSTNM", "Zip", "sector_desc"))
# write the dataset to a .csv
write.csv(new, "Total.sexual.assaults.05.15.csv", row.names = FALSE)
edited.data.13.15<- data.13.15%>%
mutate("RAPE13" = FORCIB13 + NONFOR13, "Zip" = ZIP, "sector_desc" = Sector_desc, "total" = Total) %>%
select(INSTNM, Address, City, State, Zip, sector_cd, sector_desc, men_total, women_total, total, RAPE13, RAPE14, RAPE15)
# Zip fixes
new$Zip <- lapply(new$Zip, CleanZip)
new$Zip <- as.numeric(new$Zip)
SAVE <- new
colnames(new)
View(new)
# eliminating duplicates
new2 <- new %>% group_by(INSTNM, sector_desc) %>% summarize(RAPE13 = max(RAPE13), RAPE5 = max(RAPE5),
RAPE6 = max(RAPE6), RAPE7 = max(RAPE7),
RAPE8 = max(RAPE8), RAPE9 = max(RAPE9),
RAPE10 = max(RAPE10), RAPE11 = max(RAPE11),
RAPE12 = max(RAPE12), RAPE14 = max(RAPE14),
RAPE15 = max(RAPE15)) %>% unique()
# saving the data in csv form!
write.csv(new2, "Total.sexual.assaults.05.15.csv", row.names = FALSE)
write.csv(x, "Updated.total.05.15.csv", row.names = FALSE)
x <- read.csv("Total.sexual.assaults.05.15.csv", stringsAsFactors = FALSE)
View(x)
|
# Simpsons paradox, in plots
library(dplyr)
library(ggplot2)
library(MASS)
# Via https://simplystatistics.org/2017/08/08/code-for-my-educational-gifs/
## simulate data
N <- 100
Sigma <- matrix(c(1,0.75,0.75, 1), 2, 2)*1.5
means <- list(c(11,3), c(9,5), c(7,7), c(5,9), c(3,11))
dat <- lapply(means, function(mu) mvrnorm(N, mu, Sigma))
dat <- tbl_df(Reduce(rbind, dat)) %>%
mutate(Z = as.character(rep(seq_along(means), each = N)))
names(dat) <- c("X", "Y", "Z")
## First plot
sim_p1 <- ggplot(dat, aes(X,Y)) +
geom_point(size = 2, alpha = .8) +
geom_smooth(method = lm, color = "red", se = FALSE)
## second plot
means <- tbl_df(Reduce(rbind, means)) %>%
setNames(c("x","y")) %>%
mutate(z = as.character(seq_along(means)))
corrs <- dat %>% group_by(Z) %>% summarize(cor = cor(X,Y)) %>% .$cor
sim_p2 <- ggplot(dat, aes(X, Y, color = Z)) +
geom_point(size = 2, show.legend = FALSE, alpha = 0.8) +
scale_color_brewer(palette = "Set1", guide = FALSE)
## third plot
sim_p3 <- sim_p2 +
geom_smooth(method = lm, se = FALSE) +
annotate("label", x = means$x, y = means$y, alpha = .6,
label = paste("Z=", means$z), cex = 9) +
ggtitle(paste("Pearson's r = ", paste(signif(corrs, 2), collapse = ", ")))
sim_p1
sim_p2
sim_p3
| /simpsons_paradox.R | permissive | tadaadata/didactical_plots | R | false | false | 1,258 | r | # Simpsons paradox, in plots
library(dplyr)
library(ggplot2)
library(MASS)
# Via https://simplystatistics.org/2017/08/08/code-for-my-educational-gifs/
## simulate data
N <- 100
Sigma <- matrix(c(1,0.75,0.75, 1), 2, 2)*1.5
means <- list(c(11,3), c(9,5), c(7,7), c(5,9), c(3,11))
dat <- lapply(means, function(mu) mvrnorm(N, mu, Sigma))
dat <- tbl_df(Reduce(rbind, dat)) %>%
mutate(Z = as.character(rep(seq_along(means), each = N)))
names(dat) <- c("X", "Y", "Z")
## First plot
sim_p1 <- ggplot(dat, aes(X,Y)) +
geom_point(size = 2, alpha = .8) +
geom_smooth(method = lm, color = "red", se = FALSE)
## second plot
means <- tbl_df(Reduce(rbind, means)) %>%
setNames(c("x","y")) %>%
mutate(z = as.character(seq_along(means)))
corrs <- dat %>% group_by(Z) %>% summarize(cor = cor(X,Y)) %>% .$cor
sim_p2 <- ggplot(dat, aes(X, Y, color = Z)) +
geom_point(size = 2, show.legend = FALSE, alpha = 0.8) +
scale_color_brewer(palette = "Set1", guide = FALSE)
## third plot
sim_p3 <- sim_p2 +
geom_smooth(method = lm, se = FALSE) +
annotate("label", x = means$x, y = means$y, alpha = .6,
label = paste("Z=", means$z), cex = 9) +
ggtitle(paste("Pearson's r = ", paste(signif(corrs, 2), collapse = ", ")))
sim_p1
sim_p2
sim_p3
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/glue_operations.R
\name{glue_update_crawler_schedule}
\alias{glue_update_crawler_schedule}
\title{Updates the schedule of a crawler using a cron expression}
\usage{
glue_update_crawler_schedule(CrawlerName, Schedule = NULL)
}
\arguments{
\item{CrawlerName}{[required] The name of the crawler whose schedule to update.}
\item{Schedule}{The updated \code{cron} expression used to specify the schedule (see
\href{https://docs.aws.amazon.com/glue/latest/dg/monitor-data-warehouse-schedule.html}{Time-Based Schedules for Jobs and Crawlers}.
For example, to run something every day at 12:15 UTC, you would specify:
\verb{cron(15 12 * * ? *)}.}
}
\description{
Updates the schedule of a crawler using a \code{cron} expression.
See \url{https://www.paws-r-sdk.com/docs/glue_update_crawler_schedule/} for full documentation.
}
\keyword{internal}
| /cran/paws.analytics/man/glue_update_crawler_schedule.Rd | permissive | paws-r/paws | R | false | true | 917 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/glue_operations.R
\name{glue_update_crawler_schedule}
\alias{glue_update_crawler_schedule}
\title{Updates the schedule of a crawler using a cron expression}
\usage{
glue_update_crawler_schedule(CrawlerName, Schedule = NULL)
}
\arguments{
\item{CrawlerName}{[required] The name of the crawler whose schedule to update.}
\item{Schedule}{The updated \code{cron} expression used to specify the schedule (see
\href{https://docs.aws.amazon.com/glue/latest/dg/monitor-data-warehouse-schedule.html}{Time-Based Schedules for Jobs and Crawlers}.
For example, to run something every day at 12:15 UTC, you would specify:
\verb{cron(15 12 * * ? *)}.}
}
\description{
Updates the schedule of a crawler using a \code{cron} expression.
See \url{https://www.paws-r-sdk.com/docs/glue_update_crawler_schedule/} for full documentation.
}
\keyword{internal}
|
"Grad.box" <-
function(vecxr,ldiss,udiss, P){
N <- nrow(ldiss)
X <- matrix(vecxr[1:(N*P)],nrow=N,ncol=P)
R <- vecxr[(N*P)+1:(N*P)]
igrad <- rep(0,2*N*P)
.C("boxgrad",
arg1=as.double(igrad),
arg2=as.double(X),
arg3=as.double(R),
arg4=as.double(ldiss),
arg5=as.double(udiss),
arg6=as.integer(N),
arg7=as.integer(P)
)$arg1
}
| /smds/R/Grad.box.R | no_license | ingted/R-Examples | R | false | false | 399 | r | "Grad.box" <-
function(vecxr,ldiss,udiss, P){
N <- nrow(ldiss)
X <- matrix(vecxr[1:(N*P)],nrow=N,ncol=P)
R <- vecxr[(N*P)+1:(N*P)]
igrad <- rep(0,2*N*P)
.C("boxgrad",
arg1=as.double(igrad),
arg2=as.double(X),
arg3=as.double(R),
arg4=as.double(ldiss),
arg5=as.double(udiss),
arg6=as.integer(N),
arg7=as.integer(P)
)$arg1
}
|
#' Round, floor and ceiling for date-time objects
#'
#' @description \pkg{timechange} provides rounding to the nearest unit or multiple of a
#' unit with fractional support whenever makes sense. Units can be specified flexibly
#' as strings. All common abbreviations are supported - secs, min, mins, 2 minutes, 3
#' years, 2s, 1d etc.
#'
#' @section Civil Time vs Absolute Time rounding:
#'
#' Rounding in civil time is done on actual clock time (ymdHMS) and is affected
#' by civil time irregularities like DST. One important characteristic of civil
#' time rounding is that floor (ceiling) does not produce civil times that are
#' bigger (smaller) than the rounded civil time.
#'
#' Absolute time rounding (with `aseconds`, `aminutes` and `ahours`) is done on the
#' absolute time (number of seconds since origin), thus, allowing for fractional seconds
#' and arbitrary multi-units. See examples of rounding around DST transition where
#' rounding in civil time does not give the same result as rounding with the
#' corresponding `X aseconds`.
#'
#' Please note that absolute rounding to fractions smaller than 1ms will result
#' to large precision errors due to the floating point representation of the
#' POSIXct objects.
#'
#' @section Note on `time_round()`:
#'
#' For rounding date-times which is exactly halfway between two consecutive units,
#' the convention is to round up. Note that this is in line with the behavior of R's
#' [base::round.POSIXt()] function but does not follow the convention of the base
#' [base::round()] function which "rounds to the even digit" per IEC 60559.
#'
#'
#' @section Ceiling of `Date` objects:
#'
#' By default rounding up `Date` objects follows 3 steps:
#'
#' 1. Convert to an instant representing lower bound of the Date:
#' `2000-01-01` --> `2000-01-01 00:00:00`
#'
#' 2. Round up to the \strong{next} closest rounding unit boundary. For example,
#' if the rounding unit is `month` then next closest boundary of `2000-01-01`
#' is `2000-02-01 00:00:00`.
#'
#' The motivation for this is that the "partial" `2000-01-01` is conceptually
#' an interval (`2000-01-01 00:00:00` -- `2000-01-02 00:00:00`) and the day
#' hasn't started clocking yet at the exact boundary `00:00:00`. Thus, it
#' seems wrong to round up a day to its lower boundary.
#'
#' The behavior on the boundary can be changed by setting
#' `change_on_boundary` to a non-`NULL` value.
#'
#' 3. If rounding unit is smaller than a day, return the instant from step 2
#' (`POSIXct`), otherwise convert to and return a `Date` object.
#'
#' @name time_round
#' @param time a date-time vector (`Date`, `POSIXct` or `POSIXlt`)
#' @param unit a character string specifying a time unit or a multiple of a unit. Valid
#' base periods for civil time rounding are `second`, `minute`, `hour`, `day`, `week`,
#' `month`, `bimonth`, `quarter`, `season`, `halfyear` and `year`. The only units for
#' absolute time rounding are `asecond`, `aminute` and `ahour`. Other absolute units
#' can be achieved with multiples of `asecond` (e.g. "24ah"). See "Details" and
#' examples. Arbitrary unique English abbreviations are allowed. One letter
#' abbreviations follow `strptime` formats "y", "m", "d", "M", "H", "S". Multi-unit
#' rounding of weeks is currently not supported.
#'
#' Rounding for a unit is performed from the parent's unit origin. For example when
#' rounding to seconds origin is start of the minute. When rounding to days, origin is
#' first date of the month. See examples.
#'
#' With fractional sub-unit (unit < 1) rounding with child unit is performed
#' instead. For example 0.5mins == 30secs, .2hours == 12min etc.
#'
#' Please note that for fractions which don't match exactly to integer number of the
#' child units only the integer part is used for computation. For example .7days =
#' 16.8hours will use 16 hours during the computation.
#'
#' @param change_on_boundary If NULL (the default) don't change instants on the boundary
#' (`time_ceiling(ymd_hms('2000-01-01 00:00:00'))` is `2000-01-01 00:00:00`), but
#' round up `Date` objects to the next boundary (`time_ceiling(ymd("2000-01-01"),
#' "month")` is `"2000-02-01"`). When `TRUE`, instants on the boundary are rounded up
#' to the next boundary. When `FALSE`, date-time on the boundary are never rounded up
#' (this was the default for \pkg{lubridate} prior to `v1.6.0`. See section `Rounding
#' Up Date Objects` below for more details.
#' @param week_start When unit is `weeks`, this is the first day of the week. Defaults
#' to 1 (Monday).
#' @param origin Origin with respect to which to perform the rounding operation. For
#' absolute units only. Can be a vector of the same length as the input `time`
#' vector. Defaults to the Unix origin "1970-01-01 UTC".
#' @return An object of the same class as the input object. When input is a `Date`
#' object and unit is smaller than `day` a `POSIXct` object is returned.
#' @seealso [base::round()]
#' @examples
#'
#' ## print fractional seconds
#' options(digits.secs=6)
#'
#' x <- as.POSIXct("2009-08-03 12:01:59.23")
#' time_round(x, ".5 asec")
#' time_round(x, "sec")
#' time_round(x, "second")
#' time_round(x, "asecond")
#' time_round(x, "minute")
#' time_round(x, "5 mins")
#' time_round(x, "5M") # "M" for minute "m" for month
#' time_round(x, "hour")
#' time_round(x, "2 hours")
#' time_round(x, "2H")
#' time_round(x, "day")
#' time_round(x, "week")
#' time_round(x, "month")
#' time_round(x, "bimonth")
#' time_round(x, "quarter") == time_round(x, "3 months")
#' time_round(x, "halfyear")
#' time_round(x, "year")
#'
#' x <- as.POSIXct("2009-08-03 12:01:59.23")
#' time_floor(x, ".1 asec")
#' time_floor(x, "second")
#' time_floor(x, "minute")
#' time_floor(x, "M")
#' time_floor(x, "hour")
#' time_floor(x, ".2 ahour")
#' time_floor(x, "day")
#' time_floor(x, "week")
#' time_floor(x, "m")
#' time_floor(x, "month")
#' time_floor(x, "bimonth")
#' time_floor(x, "quarter")
#' time_floor(x, "season")
#' time_floor(x, "halfyear")
#' time_floor(x, "year")
#'
#' x <- as.POSIXct("2009-08-03 12:01:59.23")
#' time_ceiling(x, ".1 asec")
#' time_ceiling(x, "second")
#' time_ceiling(x, "minute")
#' time_ceiling(x, "5 mins")
#' time_ceiling(x, "hour")
#' time_ceiling(x, ".2 ahour")
#' time_ceiling(x, "day")
#' time_ceiling(x, "week")
#' time_ceiling(x, "month")
#' time_ceiling(x, "bimonth") == time_ceiling(x, "2 months")
#' time_ceiling(x, "quarter")
#' time_ceiling(x, "season")
#' time_ceiling(x, "halfyear")
#' time_ceiling(x, "year")
#'
#' ## behavior on the boundary
#' x <- as.Date("2000-01-01")
#' time_ceiling(x, "month")
#' time_ceiling(x, "month", change_on_boundary = FALSE)
#'
#' ## As of R 3.4.2 POSIXct printing of fractional numbers is wrong
#' as.POSIXct("2009-08-03 12:01:59.3", tz = "UTC") ## -> "2009-08-03 12:01:59.2 UTC"
#' time_ceiling(x, ".1 asec") ## -> "2009-08-03 12:01:59.2 UTC"
#'
#' ## Civil Time vs Absolute Time Rounding
#'
#' # "2014-11-02 01:59:59.5 EDT" before 1h backroll at 2AM
#' x <- .POSIXct(1414907999.5, tz = "America/New_York")
#' x
#' time_ceiling(x, "hour") # "2014-11-02 02:00:00 EST"
#' time_ceiling(x, "minute")
#' time_ceiling(x, "sec")
#' time_ceiling(x, "1ahour") # "2014-11-02 01:00:00 EST"
#' time_ceiling(x, "1asec")
#'
#' # "2014-11-02 01:00:00.5 EST" .5s after 1h backroll at 2AM
#' x <- .POSIXct(1414908000.5, tz = "America/New_York")
#' x
#' time_floor(x, "hour") # "2014-11-02 01:00:00 EST"
#' time_floor(x, "ahour") # "2014-11-02 01:00:00 EST"
#'
#' ## behavior on the boundary when rounding multi-units
#'
#' x <- as.POSIXct("2009-08-28 22:56:59.23", tz = "UTC")
#' time_ceiling(x, "3.4 secs") # "2009-08-28 22:57:03.4"
#' time_ceiling(x, "50.5 secs") # "2009-08-28 22:57:50.5"
#' time_ceiling(x, "57 min") # "2009-08-28 22:57:00"
#' time_ceiling(x, "56 min") # "2009-08-28 23:56:00"
#' time_ceiling(x, "7h") # "2009-08-29 07:00:00"
#' time_ceiling(x, "7d") # "2009-08-29 00:00:00"
#' time_ceiling(x, "8d") # "2009-09-09 00:00:00"
#' time_ceiling(x, "8m") # "2009-09-01 00:00:00"
#' time_ceiling(x, "6m") # "2010-01-01 00:00:00"
#' time_ceiling(x, "7m") # "2010-08-01 00:00:00"
#'
#' x <- as.POSIXct("2010-11-25 22:56:57", tz = "UTC")
#' time_ceiling(x, "6sec") # "2010-11-25 22:57:00"
#' time_ceiling(x, "60sec") # "2010-11-25 22:57:00"
#' time_ceiling(x, "6min") # "2010-11-25 23:00:00"
#' time_ceiling(x, "60min") # "2010-11-25 23:00:00"
#' time_ceiling(x, "4h") # "2010-11-26 00:00:00"
#' time_ceiling(x, "15d") # "2010-12-01 00:00:00"
#' time_ceiling(x, "15d") # "2010-12-01 00:00:00"
#' time_ceiling(x, "6m") # "2011-01-01 00:00:00"
#'
#'
#' ## custom origin
#' x <- as.POSIXct(c("2010-10-01 01:00:01", "2010-11-02 02:00:01"), tz = "America/New_York")
#' # 50 minutes from the day or month start
#' time_floor(x, "50amin")
#' time_floor(x, "50amin", origin = time_floor(x, "day"))
#' time_floor(x, "50amin", origin = time_floor(x, "month"))
#' time_ceiling(x, "50amin")
#' time_ceiling(x, "50amin", origin = time_floor(x, "day"))
#' time_ceiling(x, "50amin", origin = time_floor(x, "month"))
#'
#' @export
time_round <- function(time, unit = "second",
week_start = getOption("timechange.week_start", 1),
origin = unix_origin) {
if (length(time) == 0L)
return(time)
nu <- parse_rounding_unit(unit)
n <- nu$n
unit <- nu$unit
ct <- to_posixct(time)
## special case for fast absolute time rounding
if (n == 1 && (
unit %in% c("day", "hour", "minute", "second") ||
(unit == "asecond" && identical(origin, unix_origin))
)) {
out <- round.POSIXt(ct, units = base_units[[unit]])
return(from_posixlt(out, time, force_date = unit != "hour"))
}
## FIXME: Behavior or this logic is likely slightly different from the above base
## rounding around DST. It has to do with hard-coded post-pre values in ceiling and
## floor.
above <- unclass(C_time_ceiling(ct, unit, n, week_start, TRUE, origin))
mid <- unclass(ct)
below <- unclass(C_time_floor(ct, unit, n, week_start, origin))
wabove <- (above - mid) <= (mid - below)
wabove <- !is.na(wabove) & wabove
below[wabove] <- above[wabove]
from_posixct(.POSIXct(below, tz = tz(time)), time,
force_date = !unit %in% c("hour", "minute", "second", "asecond"))
}
#' @name time_round
#' @export
time_floor <- function(time, unit = "seconds",
week_start = getOption("timechange.week_start", 1),
origin = unix_origin) {
if (length(time) == 0)
return(time)
nu <- parse_rounding_unit(unit)
from_posixct(C_time_floor(to_posixct(time), nu$unit, nu$n, as.integer(week_start), origin),
time, force_date = !nu$unit %in% c("asecond", "second", "minute", "hour"))
}
#' @name time_round
#' @export
time_ceiling <- function(time, unit = "seconds",
change_on_boundary = inherits(time, "Date"),
week_start = getOption("timechange.week_start", 1),
origin = unix_origin) {
if (length(time) == 0)
return(time)
nu <- parse_rounding_unit(unit)
from_posixct(C_time_ceiling(to_posixct(time), nu$unit, nu$n, as.integer(week_start),
as.logical(change_on_boundary), origin),
time, force_date = !nu$unit %in% c("second", "minute", "hour"))
}
## UTILS
base_units <- list(second = "secs", minute = "mins", hour = "hours", day = "days")
trunc_multi_limits <- c(asecond = Inf, aminute = Inf, ahour = Inf,
second = 60, minute = 60, hour = 24, day = 31, year = Inf, week = 1,
month = 12, bimonth = 6, quarter = 4, season = 4, halfyear = 2)
parse_rounding_unit <- function(unit) {
if (length(unit) > 1) {
warning("'unit' argument has length larger than 1. Using first element.")
unit <- unit[[1]]
}
validate_rounding_nunit(.Call(C_parse_unit, as.character(unit)))
}
# cOmpat: TODO: remove once lubridate no longer uses .normalize_multi_week_unit
# https://github.com/tidyverse/lubridate/blob/8c67d9ceca5315ef636d4727348d8914aa5552ea/R/round.r#L206
parse_units <- parse_rounding_unit
validate_rounding_nunit <- function(nunit) {
if (nunit$n > trunc_multi_limits[[nunit$unit]])
stop(sprintf("Rounding with %s > %d is not supported. Use aseconds for arbitrary units.",
nunit$unit, trunc_multi_limits[[nunit$unit]]))
nunit
}
| /R/round.R | no_license | cran/timechange | R | false | false | 12,438 | r | #' Round, floor and ceiling for date-time objects
#'
#' @description \pkg{timechange} provides rounding to the nearest unit or multiple of a
#' unit with fractional support whenever makes sense. Units can be specified flexibly
#' as strings. All common abbreviations are supported - secs, min, mins, 2 minutes, 3
#' years, 2s, 1d etc.
#'
#' @section Civil Time vs Absolute Time rounding:
#'
#' Rounding in civil time is done on actual clock time (ymdHMS) and is affected
#' by civil time irregularities like DST. One important characteristic of civil
#' time rounding is that floor (ceiling) does not produce civil times that are
#' bigger (smaller) than the rounded civil time.
#'
#' Absolute time rounding (with `aseconds`, `aminutes` and `ahours`) is done on the
#' absolute time (number of seconds since origin), thus, allowing for fractional seconds
#' and arbitrary multi-units. See examples of rounding around DST transition where
#' rounding in civil time does not give the same result as rounding with the
#' corresponding `X aseconds`.
#'
#' Please note that absolute rounding to fractions smaller than 1ms will result
#' to large precision errors due to the floating point representation of the
#' POSIXct objects.
#'
#' @section Note on `time_round()`:
#'
#' For rounding date-times which is exactly halfway between two consecutive units,
#' the convention is to round up. Note that this is in line with the behavior of R's
#' [base::round.POSIXt()] function but does not follow the convention of the base
#' [base::round()] function which "rounds to the even digit" per IEC 60559.
#'
#'
#' @section Ceiling of `Date` objects:
#'
#' By default rounding up `Date` objects follows 3 steps:
#'
#' 1. Convert to an instant representing lower bound of the Date:
#' `2000-01-01` --> `2000-01-01 00:00:00`
#'
#' 2. Round up to the \strong{next} closest rounding unit boundary. For example,
#' if the rounding unit is `month` then next closest boundary of `2000-01-01`
#' is `2000-02-01 00:00:00`.
#'
#' The motivation for this is that the "partial" `2000-01-01` is conceptually
#' an interval (`2000-01-01 00:00:00` -- `2000-01-02 00:00:00`) and the day
#' hasn't started clocking yet at the exact boundary `00:00:00`. Thus, it
#' seems wrong to round up a day to its lower boundary.
#'
#' The behavior on the boundary can be changed by setting
#' `change_on_boundary` to a non-`NULL` value.
#'
#' 3. If rounding unit is smaller than a day, return the instant from step 2
#' (`POSIXct`), otherwise convert to and return a `Date` object.
#'
#' @name time_round
#' @param time a date-time vector (`Date`, `POSIXct` or `POSIXlt`)
#' @param unit a character string specifying a time unit or a multiple of a unit. Valid
#' base periods for civil time rounding are `second`, `minute`, `hour`, `day`, `week`,
#' `month`, `bimonth`, `quarter`, `season`, `halfyear` and `year`. The only units for
#' absolute time rounding are `asecond`, `aminute` and `ahour`. Other absolute units
#' can be achieved with multiples of `asecond` (e.g. "24ah"). See "Details" and
#' examples. Arbitrary unique English abbreviations are allowed. One letter
#' abbreviations follow `strptime` formats "y", "m", "d", "M", "H", "S". Multi-unit
#' rounding of weeks is currently not supported.
#'
#' Rounding for a unit is performed from the parent's unit origin. For example when
#' rounding to seconds origin is start of the minute. When rounding to days, origin is
#' first date of the month. See examples.
#'
#' With fractional sub-unit (unit < 1) rounding with child unit is performed
#' instead. For example 0.5mins == 30secs, .2hours == 12min etc.
#'
#' Please note that for fractions which don't match exactly to integer number of the
#' child units only the integer part is used for computation. For example .7days =
#' 16.8hours will use 16 hours during the computation.
#'
#' @param change_on_boundary If NULL (the default) don't change instants on the boundary
#' (`time_ceiling(ymd_hms('2000-01-01 00:00:00'))` is `2000-01-01 00:00:00`), but
#' round up `Date` objects to the next boundary (`time_ceiling(ymd("2000-01-01"),
#' "month")` is `"2000-02-01"`). When `TRUE`, instants on the boundary are rounded up
#' to the next boundary. When `FALSE`, date-time on the boundary are never rounded up
#' (this was the default for \pkg{lubridate} prior to `v1.6.0`. See section `Rounding
#' Up Date Objects` below for more details.
#' @param week_start When unit is `weeks`, this is the first day of the week. Defaults
#' to 1 (Monday).
#' @param origin Origin with respect to which to perform the rounding operation. For
#' absolute units only. Can be a vector of the same length as the input `time`
#' vector. Defaults to the Unix origin "1970-01-01 UTC".
#' @return An object of the same class as the input object. When input is a `Date`
#' object and unit is smaller than `day` a `POSIXct` object is returned.
#' @seealso [base::round()]
#' @examples
#'
#' ## print fractional seconds
#' options(digits.secs=6)
#'
#' x <- as.POSIXct("2009-08-03 12:01:59.23")
#' time_round(x, ".5 asec")
#' time_round(x, "sec")
#' time_round(x, "second")
#' time_round(x, "asecond")
#' time_round(x, "minute")
#' time_round(x, "5 mins")
#' time_round(x, "5M") # "M" for minute "m" for month
#' time_round(x, "hour")
#' time_round(x, "2 hours")
#' time_round(x, "2H")
#' time_round(x, "day")
#' time_round(x, "week")
#' time_round(x, "month")
#' time_round(x, "bimonth")
#' time_round(x, "quarter") == time_round(x, "3 months")
#' time_round(x, "halfyear")
#' time_round(x, "year")
#'
#' x <- as.POSIXct("2009-08-03 12:01:59.23")
#' time_floor(x, ".1 asec")
#' time_floor(x, "second")
#' time_floor(x, "minute")
#' time_floor(x, "M")
#' time_floor(x, "hour")
#' time_floor(x, ".2 ahour")
#' time_floor(x, "day")
#' time_floor(x, "week")
#' time_floor(x, "m")
#' time_floor(x, "month")
#' time_floor(x, "bimonth")
#' time_floor(x, "quarter")
#' time_floor(x, "season")
#' time_floor(x, "halfyear")
#' time_floor(x, "year")
#'
#' x <- as.POSIXct("2009-08-03 12:01:59.23")
#' time_ceiling(x, ".1 asec")
#' time_ceiling(x, "second")
#' time_ceiling(x, "minute")
#' time_ceiling(x, "5 mins")
#' time_ceiling(x, "hour")
#' time_ceiling(x, ".2 ahour")
#' time_ceiling(x, "day")
#' time_ceiling(x, "week")
#' time_ceiling(x, "month")
#' time_ceiling(x, "bimonth") == time_ceiling(x, "2 months")
#' time_ceiling(x, "quarter")
#' time_ceiling(x, "season")
#' time_ceiling(x, "halfyear")
#' time_ceiling(x, "year")
#'
#' ## behavior on the boundary
#' x <- as.Date("2000-01-01")
#' time_ceiling(x, "month")
#' time_ceiling(x, "month", change_on_boundary = FALSE)
#'
#' ## As of R 3.4.2 POSIXct printing of fractional numbers is wrong
#' as.POSIXct("2009-08-03 12:01:59.3", tz = "UTC") ## -> "2009-08-03 12:01:59.2 UTC"
#' time_ceiling(x, ".1 asec") ## -> "2009-08-03 12:01:59.2 UTC"
#'
#' ## Civil Time vs Absolute Time Rounding
#'
#' # "2014-11-02 01:59:59.5 EDT" before 1h backroll at 2AM
#' x <- .POSIXct(1414907999.5, tz = "America/New_York")
#' x
#' time_ceiling(x, "hour") # "2014-11-02 02:00:00 EST"
#' time_ceiling(x, "minute")
#' time_ceiling(x, "sec")
#' time_ceiling(x, "1ahour") # "2014-11-02 01:00:00 EST"
#' time_ceiling(x, "1asec")
#'
#' # "2014-11-02 01:00:00.5 EST" .5s after 1h backroll at 2AM
#' x <- .POSIXct(1414908000.5, tz = "America/New_York")
#' x
#' time_floor(x, "hour") # "2014-11-02 01:00:00 EST"
#' time_floor(x, "ahour") # "2014-11-02 01:00:00 EST"
#'
#' ## behavior on the boundary when rounding multi-units
#'
#' x <- as.POSIXct("2009-08-28 22:56:59.23", tz = "UTC")
#' time_ceiling(x, "3.4 secs") # "2009-08-28 22:57:03.4"
#' time_ceiling(x, "50.5 secs") # "2009-08-28 22:57:50.5"
#' time_ceiling(x, "57 min") # "2009-08-28 22:57:00"
#' time_ceiling(x, "56 min") # "2009-08-28 23:56:00"
#' time_ceiling(x, "7h") # "2009-08-29 07:00:00"
#' time_ceiling(x, "7d") # "2009-08-29 00:00:00"
#' time_ceiling(x, "8d") # "2009-09-09 00:00:00"
#' time_ceiling(x, "8m") # "2009-09-01 00:00:00"
#' time_ceiling(x, "6m") # "2010-01-01 00:00:00"
#' time_ceiling(x, "7m") # "2010-08-01 00:00:00"
#'
#' x <- as.POSIXct("2010-11-25 22:56:57", tz = "UTC")
#' time_ceiling(x, "6sec") # "2010-11-25 22:57:00"
#' time_ceiling(x, "60sec") # "2010-11-25 22:57:00"
#' time_ceiling(x, "6min") # "2010-11-25 23:00:00"
#' time_ceiling(x, "60min") # "2010-11-25 23:00:00"
#' time_ceiling(x, "4h") # "2010-11-26 00:00:00"
#' time_ceiling(x, "15d") # "2010-12-01 00:00:00"
#' time_ceiling(x, "15d") # "2010-12-01 00:00:00"
#' time_ceiling(x, "6m") # "2011-01-01 00:00:00"
#'
#'
#' ## custom origin
#' x <- as.POSIXct(c("2010-10-01 01:00:01", "2010-11-02 02:00:01"), tz = "America/New_York")
#' # 50 minutes from the day or month start
#' time_floor(x, "50amin")
#' time_floor(x, "50amin", origin = time_floor(x, "day"))
#' time_floor(x, "50amin", origin = time_floor(x, "month"))
#' time_ceiling(x, "50amin")
#' time_ceiling(x, "50amin", origin = time_floor(x, "day"))
#' time_ceiling(x, "50amin", origin = time_floor(x, "month"))
#'
#' @export
time_round <- function(time, unit = "second",
week_start = getOption("timechange.week_start", 1),
origin = unix_origin) {
if (length(time) == 0L)
return(time)
nu <- parse_rounding_unit(unit)
n <- nu$n
unit <- nu$unit
ct <- to_posixct(time)
## special case for fast absolute time rounding
if (n == 1 && (
unit %in% c("day", "hour", "minute", "second") ||
(unit == "asecond" && identical(origin, unix_origin))
)) {
out <- round.POSIXt(ct, units = base_units[[unit]])
return(from_posixlt(out, time, force_date = unit != "hour"))
}
## FIXME: Behavior or this logic is likely slightly different from the above base
## rounding around DST. It has to do with hard-coded post-pre values in ceiling and
## floor.
above <- unclass(C_time_ceiling(ct, unit, n, week_start, TRUE, origin))
mid <- unclass(ct)
below <- unclass(C_time_floor(ct, unit, n, week_start, origin))
wabove <- (above - mid) <= (mid - below)
wabove <- !is.na(wabove) & wabove
below[wabove] <- above[wabove]
from_posixct(.POSIXct(below, tz = tz(time)), time,
force_date = !unit %in% c("hour", "minute", "second", "asecond"))
}
#' @name time_round
#' @export
time_floor <- function(time, unit = "seconds",
week_start = getOption("timechange.week_start", 1),
origin = unix_origin) {
if (length(time) == 0)
return(time)
nu <- parse_rounding_unit(unit)
from_posixct(C_time_floor(to_posixct(time), nu$unit, nu$n, as.integer(week_start), origin),
time, force_date = !nu$unit %in% c("asecond", "second", "minute", "hour"))
}
#' @name time_round
#' @export
time_ceiling <- function(time, unit = "seconds",
change_on_boundary = inherits(time, "Date"),
week_start = getOption("timechange.week_start", 1),
origin = unix_origin) {
if (length(time) == 0)
return(time)
nu <- parse_rounding_unit(unit)
from_posixct(C_time_ceiling(to_posixct(time), nu$unit, nu$n, as.integer(week_start),
as.logical(change_on_boundary), origin),
time, force_date = !nu$unit %in% c("second", "minute", "hour"))
}
## UTILS
base_units <- list(second = "secs", minute = "mins", hour = "hours", day = "days")
trunc_multi_limits <- c(asecond = Inf, aminute = Inf, ahour = Inf,
second = 60, minute = 60, hour = 24, day = 31, year = Inf, week = 1,
month = 12, bimonth = 6, quarter = 4, season = 4, halfyear = 2)
parse_rounding_unit <- function(unit) {
if (length(unit) > 1) {
warning("'unit' argument has length larger than 1. Using first element.")
unit <- unit[[1]]
}
validate_rounding_nunit(.Call(C_parse_unit, as.character(unit)))
}
# cOmpat: TODO: remove once lubridate no longer uses .normalize_multi_week_unit
# https://github.com/tidyverse/lubridate/blob/8c67d9ceca5315ef636d4727348d8914aa5552ea/R/round.r#L206
parse_units <- parse_rounding_unit
validate_rounding_nunit <- function(nunit) {
if (nunit$n > trunc_multi_limits[[nunit$unit]])
stop(sprintf("Rounding with %s > %d is not supported. Use aseconds for arbitrary units.",
nunit$unit, trunc_multi_limits[[nunit$unit]]))
nunit
}
|
# Author: Yongyan (Carina) Zheng
# Student ID 85424581
# DATA423 Assignment 2#
# Define server logic required to draw a histogram
shinyServer(function(input, output, session) {
################## ******* ABOUT PROJECT ******* ##################
################## ******* SOURCE DATA ******* ##################
####### EDA of Source Data #######
output$edaSourceSummary <- renderPrint(
print(dfSummary(canterSource, graph.magnif = 0.8),
method = "render",
headings = TRUE,
bootstrap.css = FALSE)
)
output$edaSourcePlan <- renderPrint({
paste("Few Characters"
, "background info etc"
, sep = "\n")
})
####### Analysis Numeric Variables of Source Data #######
output$edaSourceBoxplot <- renderPlot({
standarizedData <- scale(canterSource[ , canterSourceColsType$numericList, drop = FALSE]
, center = input$edaPlotCenter, scale = input$edaPlotScale)
keyValues <- tidyr::gather(as.data.frame(standarizedData))
keyValuesDT <- data.table(keyValues)
ggplot(mapping = aes(x = keyValuesDT$key, y = keyValuesDT$value, fill = keyValuesDT$key)) +
geom_boxplot(coef = input$edaSourcePlotMultiplier, outlier.colour = "red") +
labs(title = paste("Boxplots at IQR multiplier of", input$edaPlotMultiplier),
x = "Standardised variable value", y = "Std Value") +
coord_flip()
})
####### Analysis Factor Variables of Source Data #######
output$edaSourceBarchart <- renderGvis({
visPlotList <- plotGVisColChart(canterSource, canterSourceColsType)
return(visPlotList)
})
####### Missing Data of Source Data #######
output$edaSourceMissingData <- renderPlot({visdat::vis_dat(canterSource)})
output$edaSourceMissDataPattern <- renderPlot({ naniar::gg_miss_upset(canterSource) })
####### Text of Source Data #######
output$edaSourceBoxDesc <- renderPrint({
cat(
"Unpruned.thinnings.m3.ha., Thinnings.m3.ha., Pulplog.thinnings.m3.ha. variables have 0 or 1 level of value."
, "Without Centering or Scaling data, variables Unpruned.logs.m3.ha., TRV.m3.ha., Pulplogs.m3.ha., Pruned.logs.m3.ha., Age.years. have very different value ranges."
, "With Centering and Scaling enabled, these variables are more normally distributed, except for Pruned.logs.m3.ha.."
, "Pruned.logs.m3.ha. has a lot of values outside of maximum boundary. We shall have a further check to confirm if they are true outliers."
, "At the same time, first quartile and median of Pruned.logs.m3.ha. minimum value are very close"
, sep = "\n"
)
})
output$edaSourceBarDesc <- renderPrint({
cat(
"Wood.Supply.Region and Thinning variables have only one level of value."
, "Therefore, I believe they can be excluded in further analysis."
, "Severe level of Class imbalance exsits in all factor variables: Species, Pruning, Planting.coverage, Owner.size."
, "Class imbalance should be fixed before actual modelling."
, sep = "\n"
)
})
################## ******* CLEANSED DATA ******* ##################
####### EDA of Cleansed Data #######
output$edaCleansedSummary <- renderPrint(
print(dfSummary(canterCleansed, graph.magnif = 0.8),
method = "render",
headings = TRUE,
bootstrap.css = FALSE)
)
####### Analysis Numeric Variables of Cleansed Data #######
output$edaCleansedBoxplot <- renderPlot({
standarizedData <- scale(canterCleansed[ , canterCleansedColsType$numericList, drop = FALSE]
, center = input$edaPlotCenter, scale = input$edaPlotScale)
keyValues <- tidyr::gather(as.data.frame(standarizedData))
keyValuesDT <- data.table(keyValues)
ggplot(mapping = aes(x = keyValuesDT$key, y = keyValuesDT$value, fill = keyValuesDT$key)) +
geom_boxplot(coef = input$edaCleansedPlotMultiplier, outlier.colour = "red") +
labs(title = paste("Boxplots at IQR multiplier of", input$edaPlotMultiplier),
x = "Standardised variable value", y = "Std Value") +
coord_flip()
})
####### Analysis Factor Variables of Cleansed Data #######
output$edaCleansedBarchart <- renderGvis({
visPlotList <- plotGVisColChart(canterCleansed, canterCleansedColsType)
return(visPlotList)
})
####### Missing Data of Cleansed Data #######
output$edaCleansedMissingData <- renderPlot({visdat::vis_dat(canterCleansed)})
output$edaCleansedMissDT <- renderDataTable(
canterCleansed[!complete.cases(canterCleansed), ]
, options = list(scrollX = TRUE, pageLength = 10)
)
################## ******* IMPUTATION ******* ##################
## Re-apply imputation learning when ratio is changed
observeEvent( input$imputeTrainRatio, {
## The ratio is set to percentage for the ease of user, therefore, it needs to be divided by 100
## before further calculations
imputeMaster(canterCleansed, input$imputeTrainRatio/100, "Pruning")
output$imputationResultTable <- renderDataTable({
masterImputationResultDT[ method %in% input$imputeMethods, ]
})
output$imputationAccuracyTable <- renderDataTable({
imputationAccuracyDT[ method %in% input$imputeMethods, ]
## TBD - to change var to input?
})
})
output$imputationResultTable <- renderDataTable({
masterImputationResultDT[ method %in% input$imputeMethods, ]
## TBD - to change var to input?
})
output$imputationAccuracyTable <- renderDataTable({
imputationAccuracyDT[ method %in% input$imputeMethods, ]
## TBD - to change var to input?
})
output$imputeResultBarchart <- renderPlot({
ggplot(imputationAccuracyDT[ method %in% input$imputeMethods, ], aes(method, weight = accuracy)) +
geom_bar(fill = "#FF6666") +
xlab("Selected Imputation Methods") +
ylab("Accuracy in Percentage") +
labs(title = "Accuracy visual comparsion between selected methods")
})
################## ******* MODELLING ******* ##################
## The ratio is set to percentage for the ease of user, therefore, it needs to be divided by 100
## before further calculations
## input$modelTrainRatio
## Re-apply modelling learning when ratio is changed
observeEvent( input$modelTrainRatio, {
## The ratio is set to percentage for the ease of user, therefore, it needs to be divided by 100
## before further calculations
modelMaster(input$modelTrainRatio/100)
output$masterModelResultDT <- renderDataTable({
masterModelResultDT[ method %in% input$modelMethods, ]
})
output$modelAccuracyTable <- renderDataTable({
modelAccuracyDT[ method %in% input$modelMethods, ]
## TBD - to change var to input?
})
output$modelConfMatROSE <- renderPrint({modelResultConfMatROSE})
output$modelConfMatWeighted <- renderPrint({modelResultConfMatWeighted})
output$modelConfMatRecipe <- renderPrint({modelResultConfMatRecipe})
output$modelSimpleTable <- renderPrint({modelTabSimple})
output$modelResultBarchart <- renderPlot({
ggplot(modelAccuracyDT[ method %in% input$modelMethods, ], aes(method, weight = accuracy)) +
geom_bar(fill = "#FF6666") +
xlab("Selected Modelling Methods") +
ylab("Accuracy in Percentage") +
labs(title = "Accuracy visual comparsion between selected methods")
})
})
output$masterModelResultDT <- renderDataTable({
masterModelResultDT[ method %in% input$modelMethods, ]
## TBD - to change var to input?
})
output$modelAccuracyTable <- renderDataTable({
modelAccuracyDT[ method %in% input$modelMethods, ]
## TBD - to change var to input?
})
output$modelConfMatROSE <- renderPrint({modelResultConfMatROSE})
output$modelConfMatWeighted <- renderPrint({modelResultConfMatWeighted})
output$modelConfMatRecipe <- renderPrint({modelResultConfMatRecipe})
output$modelSimpleTable <- renderPrint({modelTabSimple})
output$modelResultBarchart <- renderPlot({
ggplot(modelAccuracyDT[ method %in% input$modelMethods, ], aes(method, weight = accuracy)) +
geom_bar(fill = "#FF6666") +
xlab("Selected Modelling Methods") +
ylab("Accuracy in Percentage") +
labs(title = "Accuracy visual comparsion between selected methods")
})
################## ******* CLASSIFICATION ******* ##################
output$treeRPart <- renderPlot({
plotRPartTree()
})
################## ******* PROJECT END ******* ##################
}) | /Submission/server.R | no_license | Cococatty/DATA423_Assignment2 | R | false | false | 9,271 | r | # Author: Yongyan (Carina) Zheng
# Student ID 85424581
# DATA423 Assignment 2#
# Define server logic required to draw a histogram
shinyServer(function(input, output, session) {
################## ******* ABOUT PROJECT ******* ##################
################## ******* SOURCE DATA ******* ##################
####### EDA of Source Data #######
output$edaSourceSummary <- renderPrint(
print(dfSummary(canterSource, graph.magnif = 0.8),
method = "render",
headings = TRUE,
bootstrap.css = FALSE)
)
output$edaSourcePlan <- renderPrint({
paste("Few Characters"
, "background info etc"
, sep = "\n")
})
####### Analysis Numeric Variables of Source Data #######
output$edaSourceBoxplot <- renderPlot({
standarizedData <- scale(canterSource[ , canterSourceColsType$numericList, drop = FALSE]
, center = input$edaPlotCenter, scale = input$edaPlotScale)
keyValues <- tidyr::gather(as.data.frame(standarizedData))
keyValuesDT <- data.table(keyValues)
ggplot(mapping = aes(x = keyValuesDT$key, y = keyValuesDT$value, fill = keyValuesDT$key)) +
geom_boxplot(coef = input$edaSourcePlotMultiplier, outlier.colour = "red") +
labs(title = paste("Boxplots at IQR multiplier of", input$edaPlotMultiplier),
x = "Standardised variable value", y = "Std Value") +
coord_flip()
})
####### Analysis Factor Variables of Source Data #######
output$edaSourceBarchart <- renderGvis({
visPlotList <- plotGVisColChart(canterSource, canterSourceColsType)
return(visPlotList)
})
####### Missing Data of Source Data #######
output$edaSourceMissingData <- renderPlot({visdat::vis_dat(canterSource)})
output$edaSourceMissDataPattern <- renderPlot({ naniar::gg_miss_upset(canterSource) })
####### Text of Source Data #######
output$edaSourceBoxDesc <- renderPrint({
cat(
"Unpruned.thinnings.m3.ha., Thinnings.m3.ha., Pulplog.thinnings.m3.ha. variables have 0 or 1 level of value."
, "Without Centering or Scaling data, variables Unpruned.logs.m3.ha., TRV.m3.ha., Pulplogs.m3.ha., Pruned.logs.m3.ha., Age.years. have very different value ranges."
, "With Centering and Scaling enabled, these variables are more normally distributed, except for Pruned.logs.m3.ha.."
, "Pruned.logs.m3.ha. has a lot of values outside of maximum boundary. We shall have a further check to confirm if they are true outliers."
, "At the same time, first quartile and median of Pruned.logs.m3.ha. minimum value are very close"
, sep = "\n"
)
})
output$edaSourceBarDesc <- renderPrint({
cat(
"Wood.Supply.Region and Thinning variables have only one level of value."
, "Therefore, I believe they can be excluded in further analysis."
, "Severe level of Class imbalance exsits in all factor variables: Species, Pruning, Planting.coverage, Owner.size."
, "Class imbalance should be fixed before actual modelling."
, sep = "\n"
)
})
################## ******* CLEANSED DATA ******* ##################
####### EDA of Cleansed Data #######
output$edaCleansedSummary <- renderPrint(
print(dfSummary(canterCleansed, graph.magnif = 0.8),
method = "render",
headings = TRUE,
bootstrap.css = FALSE)
)
####### Analysis Numeric Variables of Cleansed Data #######
output$edaCleansedBoxplot <- renderPlot({
standarizedData <- scale(canterCleansed[ , canterCleansedColsType$numericList, drop = FALSE]
, center = input$edaPlotCenter, scale = input$edaPlotScale)
keyValues <- tidyr::gather(as.data.frame(standarizedData))
keyValuesDT <- data.table(keyValues)
ggplot(mapping = aes(x = keyValuesDT$key, y = keyValuesDT$value, fill = keyValuesDT$key)) +
geom_boxplot(coef = input$edaCleansedPlotMultiplier, outlier.colour = "red") +
labs(title = paste("Boxplots at IQR multiplier of", input$edaPlotMultiplier),
x = "Standardised variable value", y = "Std Value") +
coord_flip()
})
####### Analysis Factor Variables of Cleansed Data #######
output$edaCleansedBarchart <- renderGvis({
visPlotList <- plotGVisColChart(canterCleansed, canterCleansedColsType)
return(visPlotList)
})
####### Missing Data of Cleansed Data #######
output$edaCleansedMissingData <- renderPlot({visdat::vis_dat(canterCleansed)})
output$edaCleansedMissDT <- renderDataTable(
canterCleansed[!complete.cases(canterCleansed), ]
, options = list(scrollX = TRUE, pageLength = 10)
)
################## ******* IMPUTATION ******* ##################
## Re-apply imputation learning when ratio is changed
observeEvent( input$imputeTrainRatio, {
## The ratio is set to percentage for the ease of user, therefore, it needs to be divided by 100
## before further calculations
imputeMaster(canterCleansed, input$imputeTrainRatio/100, "Pruning")
output$imputationResultTable <- renderDataTable({
masterImputationResultDT[ method %in% input$imputeMethods, ]
})
output$imputationAccuracyTable <- renderDataTable({
imputationAccuracyDT[ method %in% input$imputeMethods, ]
## TBD - to change var to input?
})
})
output$imputationResultTable <- renderDataTable({
masterImputationResultDT[ method %in% input$imputeMethods, ]
## TBD - to change var to input?
})
output$imputationAccuracyTable <- renderDataTable({
imputationAccuracyDT[ method %in% input$imputeMethods, ]
## TBD - to change var to input?
})
output$imputeResultBarchart <- renderPlot({
ggplot(imputationAccuracyDT[ method %in% input$imputeMethods, ], aes(method, weight = accuracy)) +
geom_bar(fill = "#FF6666") +
xlab("Selected Imputation Methods") +
ylab("Accuracy in Percentage") +
labs(title = "Accuracy visual comparsion between selected methods")
})
################## ******* MODELLING ******* ##################
## The ratio is set to percentage for the ease of user, therefore, it needs to be divided by 100
## before further calculations
## input$modelTrainRatio
## Re-apply modelling learning when ratio is changed
observeEvent( input$modelTrainRatio, {
## The ratio is set to percentage for the ease of user, therefore, it needs to be divided by 100
## before further calculations
modelMaster(input$modelTrainRatio/100)
output$masterModelResultDT <- renderDataTable({
masterModelResultDT[ method %in% input$modelMethods, ]
})
output$modelAccuracyTable <- renderDataTable({
modelAccuracyDT[ method %in% input$modelMethods, ]
## TBD - to change var to input?
})
output$modelConfMatROSE <- renderPrint({modelResultConfMatROSE})
output$modelConfMatWeighted <- renderPrint({modelResultConfMatWeighted})
output$modelConfMatRecipe <- renderPrint({modelResultConfMatRecipe})
output$modelSimpleTable <- renderPrint({modelTabSimple})
output$modelResultBarchart <- renderPlot({
ggplot(modelAccuracyDT[ method %in% input$modelMethods, ], aes(method, weight = accuracy)) +
geom_bar(fill = "#FF6666") +
xlab("Selected Modelling Methods") +
ylab("Accuracy in Percentage") +
labs(title = "Accuracy visual comparsion between selected methods")
})
})
output$masterModelResultDT <- renderDataTable({
masterModelResultDT[ method %in% input$modelMethods, ]
## TBD - to change var to input?
})
output$modelAccuracyTable <- renderDataTable({
modelAccuracyDT[ method %in% input$modelMethods, ]
## TBD - to change var to input?
})
output$modelConfMatROSE <- renderPrint({modelResultConfMatROSE})
output$modelConfMatWeighted <- renderPrint({modelResultConfMatWeighted})
output$modelConfMatRecipe <- renderPrint({modelResultConfMatRecipe})
output$modelSimpleTable <- renderPrint({modelTabSimple})
output$modelResultBarchart <- renderPlot({
ggplot(modelAccuracyDT[ method %in% input$modelMethods, ], aes(method, weight = accuracy)) +
geom_bar(fill = "#FF6666") +
xlab("Selected Modelling Methods") +
ylab("Accuracy in Percentage") +
labs(title = "Accuracy visual comparsion between selected methods")
})
################## ******* CLASSIFICATION ******* ##################
output$treeRPart <- renderPlot({
plotRPartTree()
})
################## ******* PROJECT END ******* ##################
}) |
setwd("C:/Users/mhuffer/Documents/GitHub/ML-Server")
library(RevoScaleR)
list.files(rxGetOption("sampleDataDir"))
data_source <- file.path(rxGetOption("sampleDataDir"), "AirlineDemoSmall.xdf")
airXdf <- rxImport(data_source, outFile = "C:/Users/mhuffer/Documents/GitHub/ML-Server/airXdf.xdf", overwrite = TRUE)
rxGetInfo(airXdf, getVarInfo = TRUE)
# Use F() to quickly compute bins for each integer level
rxHistogram(~F(CRSDepTime), data = airXdf)
# Specify the number of breaks
rxHistogram(~F(CRSDepTime), numBreaks = 11, data = airXdf)
# Create panels for each of the days of the week
rxHistogram(~F(CRSDepTime) | DayOfWeek, data = airXdf)
# print the x axis labels at an angle and all panels in a row
rxHistogram(~F(CRSDepTime) | DayOfWeek, scales = list(x = list(rot = 30)), data = airXdf, layout = c(7, 1))
# Show panels for each day on a separate page
numCols <- 1
numRows <- 1
# Set ask to pause between each plot
par(ask = TRUE)
rxHistogram(~F(CRSDepTime) | DayOfWeek, data = airXdf, layout = c(numCols, numRows))
# Create a jpeg file file for each page, named myplot001.jpeg, etc
# jpeg(file="myplot",
# rxHistogram(~F(CRSDepTime) | DayOfWeek, data = airXdf,
# blocksPerRead=6, layout=c(numCols, numRows)))
# dev.off()
# rxLinePlot
# Simple scatter plot
rxLinePlot(ArrDelay ~ CRSDepTime, data = airXdf, type = "p")
airCube <- rxCube(~DayOfWeek:F(CRSDepTime), data = airXdf)
airResults <- rxResultsDF(airCube)
rxLinePlot(Counts ~ DayOfWeek , groups = DayOfWeek, data = airResults) | /rxHistogram_rxLinePlot.R | no_license | mdhuffer299/ML-Server | R | false | false | 1,507 | r | setwd("C:/Users/mhuffer/Documents/GitHub/ML-Server")
library(RevoScaleR)
list.files(rxGetOption("sampleDataDir"))
data_source <- file.path(rxGetOption("sampleDataDir"), "AirlineDemoSmall.xdf")
airXdf <- rxImport(data_source, outFile = "C:/Users/mhuffer/Documents/GitHub/ML-Server/airXdf.xdf", overwrite = TRUE)
rxGetInfo(airXdf, getVarInfo = TRUE)
# Use F() to quickly compute bins for each integer level
rxHistogram(~F(CRSDepTime), data = airXdf)
# Specify the number of breaks
rxHistogram(~F(CRSDepTime), numBreaks = 11, data = airXdf)
# Create panels for each of the days of the week
rxHistogram(~F(CRSDepTime) | DayOfWeek, data = airXdf)
# print the x axis labels at an angle and all panels in a row
rxHistogram(~F(CRSDepTime) | DayOfWeek, scales = list(x = list(rot = 30)), data = airXdf, layout = c(7, 1))
# Show panels for each day on a separate page
numCols <- 1
numRows <- 1
# Set ask to pause between each plot
par(ask = TRUE)
rxHistogram(~F(CRSDepTime) | DayOfWeek, data = airXdf, layout = c(numCols, numRows))
# Create a jpeg file file for each page, named myplot001.jpeg, etc
# jpeg(file="myplot",
# rxHistogram(~F(CRSDepTime) | DayOfWeek, data = airXdf,
# blocksPerRead=6, layout=c(numCols, numRows)))
# dev.off()
# rxLinePlot
# Simple scatter plot
rxLinePlot(ArrDelay ~ CRSDepTime, data = airXdf, type = "p")
airCube <- rxCube(~DayOfWeek:F(CRSDepTime), data = airXdf)
airResults <- rxResultsDF(airCube)
rxLinePlot(Counts ~ DayOfWeek , groups = DayOfWeek, data = airResults) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/STN_.R
\name{STN_DATA_RANGE}
\alias{STN_DATA_RANGE}
\title{STN_DATA_RANGE function}
\source{
HYDAT
}
\usage{
STN_DATA_RANGE(hydat_path = NULL, STATION_NUMBER = NULL,
PROV_TERR_STATE_LOC = NULL)
}
\arguments{
\item{hydat_path}{Directory to the hydat database. Can be set as "Hydat.sqlite3" which will look for Hydat in the working directory.
The hydat path can also be set in the \code{.Renviron} file so that it doesn't have to specified every function call. The path should
set as the variable \code{hydat}. Open the \code{.Renviron} file using this command: \code{file.edit("~/.Renviron")}.}
\item{STATION_NUMBER}{Water Survey of Canada station number. If this argument is omitted, the value of \code{PROV_TERR_STATE_LOC}
is returned.}
\item{PROV_TERR_STATE_LOC}{Province, state or territory. If this argument is omitted, the value of \code{STATION_NUMBER}
is returned. See \code{unique(allstations$PROV_TERR_STATE_LOC)}}
}
\value{
A tibble of STN_DATA_RANGE
}
\description{
STN_DATA_RANGE look-up Table
}
\examples{
\donttest{
STN_DATA_RANGE(STATION_NUMBER = c("02JE013","08MF005"))
}
}
\seealso{
Other HYDAT functions: \code{\link{AGENCY_LIST}},
\code{\link{ANNUAL_INSTANT_PEAKS}},
\code{\link{ANNUAL_STATISTICS}},
\code{\link{DATUM_LIST}}, \code{\link{DLY_FLOWS}},
\code{\link{DLY_LEVELS}}, \code{\link{MONTHLY_FLOWS}},
\code{\link{MONTHLY_LEVELS}},
\code{\link{REGIONAL_OFFICE_LIST}},
\code{\link{SED_DLY_LOADS}},
\code{\link{SED_DLY_SUSCON}},
\code{\link{SED_MONTHLY_LOADS}},
\code{\link{SED_MONTHLY_SUSCON}},
\code{\link{SED_SAMPLES_PSD}}, \code{\link{SED_SAMPLES}},
\code{\link{STATIONS}},
\code{\link{STN_DATA_COLLECTION}},
\code{\link{STN_OPERATION_SCHEDULE}},
\code{\link{STN_REGULATION}}, \code{\link{VERSION}}
}
| /man/STN_DATA_RANGE.Rd | permissive | lawinslow/tidyhydat | R | false | true | 1,839 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/STN_.R
\name{STN_DATA_RANGE}
\alias{STN_DATA_RANGE}
\title{STN_DATA_RANGE function}
\source{
HYDAT
}
\usage{
STN_DATA_RANGE(hydat_path = NULL, STATION_NUMBER = NULL,
PROV_TERR_STATE_LOC = NULL)
}
\arguments{
\item{hydat_path}{Directory to the hydat database. Can be set as "Hydat.sqlite3" which will look for Hydat in the working directory.
The hydat path can also be set in the \code{.Renviron} file so that it doesn't have to specified every function call. The path should
set as the variable \code{hydat}. Open the \code{.Renviron} file using this command: \code{file.edit("~/.Renviron")}.}
\item{STATION_NUMBER}{Water Survey of Canada station number. If this argument is omitted, the value of \code{PROV_TERR_STATE_LOC}
is returned.}
\item{PROV_TERR_STATE_LOC}{Province, state or territory. If this argument is omitted, the value of \code{STATION_NUMBER}
is returned. See \code{unique(allstations$PROV_TERR_STATE_LOC)}}
}
\value{
A tibble of STN_DATA_RANGE
}
\description{
STN_DATA_RANGE look-up Table
}
\examples{
\donttest{
STN_DATA_RANGE(STATION_NUMBER = c("02JE013","08MF005"))
}
}
\seealso{
Other HYDAT functions: \code{\link{AGENCY_LIST}},
\code{\link{ANNUAL_INSTANT_PEAKS}},
\code{\link{ANNUAL_STATISTICS}},
\code{\link{DATUM_LIST}}, \code{\link{DLY_FLOWS}},
\code{\link{DLY_LEVELS}}, \code{\link{MONTHLY_FLOWS}},
\code{\link{MONTHLY_LEVELS}},
\code{\link{REGIONAL_OFFICE_LIST}},
\code{\link{SED_DLY_LOADS}},
\code{\link{SED_DLY_SUSCON}},
\code{\link{SED_MONTHLY_LOADS}},
\code{\link{SED_MONTHLY_SUSCON}},
\code{\link{SED_SAMPLES_PSD}}, \code{\link{SED_SAMPLES}},
\code{\link{STATIONS}},
\code{\link{STN_DATA_COLLECTION}},
\code{\link{STN_OPERATION_SCHEDULE}},
\code{\link{STN_REGULATION}}, \code{\link{VERSION}}
}
|
library("gdata")
library("Biostrings")
#read taxonomy files downloaded from maarjaam
paraglom <- read.xls("/media/FULL_PATH_HERE/export_biogeo_Paraglomeromycetes.xls", sheet = 1, fileEncoding="latin1", stringsAsFactors=FALSE)
View(paraglom)
archaeo <- read.xls("/media/FULL_PATH_HERE/export_biogeo_Archaeosporomycetes.xls", sheet = 1, fileEncoding="latin1", stringsAsFactors=FALSE)
glomero <- read.xls("/media/FULL_PATH_HERE/export_biogeo_glomeromycetes.xls", sheet = 1, fileEncoding="latin1", stringsAsFactors=FALSE)
all <- rbind(paraglom,archaeo,glomero)
dim(all)
all[duplicated(all$GenBank.accession.number), ][,2]
all <- all[!duplicated(all$GenBank.accession.number), ]
dim(all)
all <- all[all$GenBank.accession.number != "YYY00000", ]
dim(all)
all.ordered <- all[order(as.character(all[,"GenBank.accession.number"])),]
dim(all.ordered)
head(all.ordered)
all.ordered_taxo <- data.frame()
for (i in 1:nrow(all.ordered)){
if (all.ordered$VTX[i] != ""){
all.ordered_taxo[i, 1] <- all.ordered[i, "GenBank.accession.number"]
all.ordered_taxo[i, 2] <- paste0("Fungi;Glomeromycota;",
all.ordered[i, "Fungal.class"],
";",
all.ordered[i, "Fungal.order"],
";",
all.ordered[i, "Fungal.family"],
";",
all.ordered[i, "Fungal.genus"],
"_",
all.ordered[i, "Fungal.species"],
"_",
all.ordered[i, "VTX"]
)
} else {
all.ordered_taxo[i, 1] <- all.ordered[i, "GenBank.accession.number"]
all.ordered_taxo[i, 2] <- paste0("Fungi;Glomeromycota;",
all.ordered[i, "Fungal.class"],
";",
all.ordered[i, "Fungal.order"],
";",
all.ordered[i, "Fungal.family"],
";",
all.ordered[i, "Fungal.genus"],
"_",
all.ordered[i, "Fungal.species"]
)
}
}
dim(all.ordered_taxo)
write.table(all.ordered_taxo, "/media/FULL_PATH_HERE/maarjAM_id_to_taxonomy.txt", sep = "\t",
row.names = FALSE, col.names = FALSE, quote = FALSE)
#read fasta files from downloaded from maarjam
paraglom.seq <- readBStringSet("/media/FULL_PATH_HERE/paraglomeromycetes_seq.txt","fasta")
names(paraglom.seq) <- gsub("gb\\|", "", names(paraglom.seq))
archaeo.seq <- readBStringSet("/media/FULL_PATH_HERE/archaeosporomycetes_seq.txt", "fasta")
names(archaeo.seq) <- gsub("gb\\|", "", names(archaeo.seq))
glomerom.seq <- readBStringSet("/media/FULL_PATH_HERE/glomeromycetes_seq.txt", "fasta")
names(glomerom.seq) <- gsub("gb\\|", "", names(glomerom.seq))
#join sequences from all files into one
all.seq <- append(paraglom.seq, c(archaeo.seq,glomerom.seq), after=length(paraglom.seq))
all.seq <- all.seq[names(all.seq) != "YYY00000"]
all.ordered.seq <- all.seq[order(as.character((names(all.seq))))]
writeXStringSet(all.ordered.seq, "/FULL_PATH_HERE/maarjAM.fasta", format="fasta")
sed -i 's/[a-z]/\U&/g' maarjAM.fasta #fix lowercase in seqs
| /maarjam_updater.r | no_license | FilipeMatteoli/microbiome_codes | R | false | false | 2,677 | r | library("gdata")
library("Biostrings")
#read taxonomy files downloaded from maarjaam
paraglom <- read.xls("/media/FULL_PATH_HERE/export_biogeo_Paraglomeromycetes.xls", sheet = 1, fileEncoding="latin1", stringsAsFactors=FALSE)
View(paraglom)
archaeo <- read.xls("/media/FULL_PATH_HERE/export_biogeo_Archaeosporomycetes.xls", sheet = 1, fileEncoding="latin1", stringsAsFactors=FALSE)
glomero <- read.xls("/media/FULL_PATH_HERE/export_biogeo_glomeromycetes.xls", sheet = 1, fileEncoding="latin1", stringsAsFactors=FALSE)
all <- rbind(paraglom,archaeo,glomero)
dim(all)
all[duplicated(all$GenBank.accession.number), ][,2]
all <- all[!duplicated(all$GenBank.accession.number), ]
dim(all)
all <- all[all$GenBank.accession.number != "YYY00000", ]
dim(all)
all.ordered <- all[order(as.character(all[,"GenBank.accession.number"])),]
dim(all.ordered)
head(all.ordered)
all.ordered_taxo <- data.frame()
for (i in 1:nrow(all.ordered)){
if (all.ordered$VTX[i] != ""){
all.ordered_taxo[i, 1] <- all.ordered[i, "GenBank.accession.number"]
all.ordered_taxo[i, 2] <- paste0("Fungi;Glomeromycota;",
all.ordered[i, "Fungal.class"],
";",
all.ordered[i, "Fungal.order"],
";",
all.ordered[i, "Fungal.family"],
";",
all.ordered[i, "Fungal.genus"],
"_",
all.ordered[i, "Fungal.species"],
"_",
all.ordered[i, "VTX"]
)
} else {
all.ordered_taxo[i, 1] <- all.ordered[i, "GenBank.accession.number"]
all.ordered_taxo[i, 2] <- paste0("Fungi;Glomeromycota;",
all.ordered[i, "Fungal.class"],
";",
all.ordered[i, "Fungal.order"],
";",
all.ordered[i, "Fungal.family"],
";",
all.ordered[i, "Fungal.genus"],
"_",
all.ordered[i, "Fungal.species"]
)
}
}
dim(all.ordered_taxo)
write.table(all.ordered_taxo, "/media/FULL_PATH_HERE/maarjAM_id_to_taxonomy.txt", sep = "\t",
row.names = FALSE, col.names = FALSE, quote = FALSE)
#read fasta files from downloaded from maarjam
paraglom.seq <- readBStringSet("/media/FULL_PATH_HERE/paraglomeromycetes_seq.txt","fasta")
names(paraglom.seq) <- gsub("gb\\|", "", names(paraglom.seq))
archaeo.seq <- readBStringSet("/media/FULL_PATH_HERE/archaeosporomycetes_seq.txt", "fasta")
names(archaeo.seq) <- gsub("gb\\|", "", names(archaeo.seq))
glomerom.seq <- readBStringSet("/media/FULL_PATH_HERE/glomeromycetes_seq.txt", "fasta")
names(glomerom.seq) <- gsub("gb\\|", "", names(glomerom.seq))
#join sequences from all files into one
all.seq <- append(paraglom.seq, c(archaeo.seq,glomerom.seq), after=length(paraglom.seq))
all.seq <- all.seq[names(all.seq) != "YYY00000"]
all.ordered.seq <- all.seq[order(as.character((names(all.seq))))]
writeXStringSet(all.ordered.seq, "/FULL_PATH_HERE/maarjAM.fasta", format="fasta")
sed -i 's/[a-z]/\U&/g' maarjAM.fasta #fix lowercase in seqs
|
## predict()
model.all = lm(Sepal.Width ~ . , data=iris)
new_data = data.frame(Sepal.Length=6.22, Petal.Length = 3.77, Petal.Width = 1.99, Species="virginica")
predict(model.all,newdata = new_data)
# Confidence interval 95%
predict(model.all,newdata=new_data,interval="confidence") | /05Probability distribution - Statistical model/07predict.R | no_license | MomusChao/R | R | false | false | 292 | r | ## predict()
model.all = lm(Sepal.Width ~ . , data=iris)
new_data = data.frame(Sepal.Length=6.22, Petal.Length = 3.77, Petal.Width = 1.99, Species="virginica")
predict(model.all,newdata = new_data)
# Confidence interval 95%
predict(model.all,newdata=new_data,interval="confidence") |
save_plots = F
# Exercise 3 (EPage 54):
#
library(caret)
data(BloodBrain)
# Look for degenerate columns:
zero_cols = nearZeroVar( bbbDescr )
colnames( bbbDescr )[ zero_cols ]
# Look for strong correlations among the predictors:
library(corrplot)
corrplot( cor( bbbDescr ), order="hclust" )
# Find which predictors we can elliminate since they have correlations that are "too large":
#
highCorr = findCorrelation( cor( bbbDescr ), cutoff=0.75 )
bbbDescr_independent = bbbDescr[,-highCorr]
corrplot( cor(bbbDescr_independent) ) # notice that this matrix has no values > cutoff=0.75 above
| /Predictive_Modeling/3_DataPreprocessing/3_Answers3_Scanning_Predictors.R | no_license | LataniaReece/R_Scripts | R | false | false | 596 | r | save_plots = F
# Exercise 3 (EPage 54):
#
library(caret)
data(BloodBrain)
# Look for degenerate columns:
zero_cols = nearZeroVar( bbbDescr )
colnames( bbbDescr )[ zero_cols ]
# Look for strong correlations among the predictors:
library(corrplot)
corrplot( cor( bbbDescr ), order="hclust" )
# Find which predictors we can elliminate since they have correlations that are "too large":
#
highCorr = findCorrelation( cor( bbbDescr ), cutoff=0.75 )
bbbDescr_independent = bbbDescr[,-highCorr]
corrplot( cor(bbbDescr_independent) ) # notice that this matrix has no values > cutoff=0.75 above
|
# Turnstiles project
# Tatiana Velasco R
# December 13th, 2019
# -------------------------------------------------------------- #
# CODING INTERACTIONS BETWEEN ALL INDIVIDUALS AT UNIANDES #
# -------------------------------------------------------------- #
## Genera las listas de pares para cada base de mes a mes. No en loop sino separado
## La ventaja de hacerlo separado es que si el codigo se encuentra con un error en un mes dado, puede seguir al siguiente.
rm(list = ls())
# install functions
# install.packages("foreign")
# install.packages("dplyr")
# install.packages("doBy")
# install.packages("plyr")
# install.packages("lubridate")
# Import Libraries
library(foreign)
library(dplyr)
library(doBy)
library(plyr)
library(lubridate)
# Define directory locations ... UPDATE WITH PROPER LOCATIONS ...
functions <- "/Users/tatianavelasco.ro/Dropbox/TC\ Columbia/Research/Turnstiles/Torniquetes_TRT/Data/GitHub/Turnstile_networks/Code"
source <- "/Users/tatianavelasco.ro/Dropbox/TC\ Columbia/Research/Turnstiles/Torniquetes_TRT/Data/P2000"
store <- "/Users/tatianavelasco.ro/Dropbox/TC\ Columbia/Research/Turnstiles/Torniquetes_TRT/Data/data_processing"
# Import functions
# set working directory
setwd(functions)
source("all.full.edgelist.revisit.R")
# Set time clock
start_time <- Sys.time() # Start counting time
#### ENERO
# --------
# setwd(source)
# mydata <- read.csv("enero.csv")
#
# # Format data
# mydata <- mydata[mydata$modo == "Peatonal",]
# mydata <- mydata[!is.na(mydata$carnet), ]
# mydata <- mydata[, -which(names(mydata) %in% c("X", "programa", "edificio", "modo", "dia", "NA.", "jornada", "fecha", "mesn"))] # Remove NA row
# mydata <- mydata[!duplicated(mydata),] # Drop duplicates
#
# # Format variables variables
# mydata$date_time <- strptime(mydata$fecha_completa, format = '%Y.%m.%d %H:%M:%S')
# mydata$torniquete <- as.character(mydata$torniquete)
# mydata$year <- year(mydata$date_time)
# mydata$porteria <- trimws(as.character(mydata$porteria))
# mydata$accion <- as.character(mydata$accion)
# mydata$action_in <- ifelse(mydata$accion == "IN ", 1, ifelse(mydata$accion == "OUT", 0, NA))
#
#
# # gen auxiliary tools
# building <- c("W", "SD", "S", "RGB", "PB", "ÑF", "ÑE", "NAVAS", "ML", "MJ", "LL", "GA", "FRANCO", "CPM", "CORCAS", "CAI", "AU")
# # building <- c("SD") # For testing
#
#
# # Genera edge list
# # ----------------
# # Create auxiliary tools
# edge.list.full.all <- matrix(nrow = 1, ncol = 9, rep(NA))
# colnames(edge.list.full.all) <- c("carnet1", "carnet2", "date_time_carnet1", "torniquete_carnet1", "torniquete_carnet2", "porteria", "action", "day", "year")
#
# for (y in 2016:2019) { # try w/one year
# for (b in building) {
# for (a in 0:1) { # try outs only
# for (w in 1:5) { # try mondays only
#
# my.data.filtered <- mydata[mydata$year == y, ]
# my.data.day <- my.data.filtered[ which(my.data.filtered$porteria==b & my.data.filtered$action_in == a & my.data.filtered$dia_semana == w), ]
# if(nrow(my.data.day) == 0) {next}
# edge.list <- all.full.edgelist(my.data.day, building = b, action=a, day=w, year=y, time = 2)
# edge.list.full.all <- rbind(edge.list.full.all, edge.list)
# }
# }
# }
#
# }
#
# setwd(store)
# edge.list.full.enero.csv <- write.csv2(edge.list.full.all, "edge.list.full.enero.csv")
#### FEBRERO
# --------
setwd(source)
mydata <- read.csv("febrero.csv")
# Format data
mydata <- mydata[mydata$modo == "Peatonal",]
mydata <- mydata[!is.na(mydata$carnet), ]
mydata <- mydata[, -which(names(mydata) %in% c("X", "programa", "edificio", "modo", "dia", "NA.", "jornada", "fecha", "mesn"))] # Remove NA row
mydata <- mydata[!duplicated(mydata),] # Drop duplicates
# Format variables variables
mydata$date_time <- strptime(mydata$fecha_completa, format = '%Y.%m.%d %H:%M:%S')
mydata$torniquete <- as.character(mydata$torniquete)
mydata$year <- year(mydata$date_time)
mydata$porteria <- trimws(as.character(mydata$porteria))
mydata$accion <- as.character(mydata$accion)
mydata$action_in <- ifelse(mydata$accion == "IN ", 1, ifelse(mydata$accion == "OUT", 0, NA))
# gen auxiliary tools
building <- c("W", "SD", "S", "RGB", "PB", "ÑF", "ÑE", "NAVAS", "ML", "MJ", "LL", "GA", "FRANCO", "CPM", "CORCAS", "CAI", "AU")
# building <- c("SD") # For testing
# Genera edge list
# ----------------
# Create auxiliary tools
edge.list.full.all <- matrix(nrow = 1, ncol = 9, rep(NA))
colnames(edge.list.full.all) <- c("carnet1", "carnet2", "date_time_carnet1", "torniquete_carnet1", "torniquete_carnet2", "porteria", "action", "day", "year")
for (y in 2016:2019) { # try w/one year
for (b in building) {
for (a in 0:1) { # try outs only
for (w in 1:5) { # try mondays only
my.data.filtered <- mydata[mydata$year == y, ]
my.data.day <- my.data.filtered[ which(my.data.filtered$porteria==b & my.data.filtered$action_in == a & my.data.filtered$dia_semana == w), ]
if(nrow(my.data.day) == 0) {next}
edge.list <- all.full.edgelist(my.data.day, building = b, action=a, day=w, year=y, time = 2) # Decirle a la funcion qué distancia es permitida
edge.list.full.all <- rbind(edge.list.full.all, edge.list)
}
}
}
}
setwd(store)
edge.list.full.febrero.csv <- write.csv2(edge.list.full.all, "edge.list.full.febrero.csv")
end_time <- Sys.time() # stop counting time
time <- end_time - start_time
time
#### MARZO
# --------
setwd(source)
mydata <- read.csv("marzo.csv")
# Format data
mydata <- mydata[mydata$modo == "Peatonal",]
mydata <- mydata[!is.na(mydata$carnet), ]
mydata <- mydata[, -which(names(mydata) %in% c("X", "programa", "edificio", "modo", "dia", "NA.", "jornada", "fecha", "mesn"))] # Remove NA row
mydata <- mydata[!duplicated(mydata),] # Drop duplicates
# Format variables variables
mydata$date_time <- strptime(mydata$fecha_completa, format = '%Y.%m.%d %H:%M:%S')
mydata$torniquete <- as.character(mydata$torniquete)
mydata$year <- year(mydata$date_time)
mydata$porteria <- trimws(as.character(mydata$porteria))
mydata$accion <- as.character(mydata$accion)
mydata$action_in <- ifelse(mydata$accion == "IN ", 1, ifelse(mydata$accion == "OUT", 0, NA))
# gen auxiliary tools
building <- c("W", "SD", "S", "RGB", "PB", "ÑF", "ÑE", "NAVAS", "ML", "MJ", "LL", "GA", "FRANCO", "CPM", "CORCAS", "CAI", "AU")
# building <- c("SD") # For testing
# Genera edge list
# ----------------
# Create auxiliary tools
edge.list.full.all <- matrix(nrow = 1, ncol = 9, rep(NA))
colnames(edge.list.full.all) <- c("carnet1", "carnet2", "date_time_carnet1", "torniquete_carnet1", "torniquete_carnet2", "porteria", "action", "day", "year")
for (y in 2016:2019) { # try w/one year
for (b in building) {
for (a in 0:1) { # try outs only
for (w in 1:5) { # try mondays only
my.data.filtered <- mydata[mydata$year == y, ]
my.data.day <- my.data.filtered[ which(my.data.filtered$porteria==b & my.data.filtered$action_in == a & my.data.filtered$dia_semana == w), ]
if(nrow(my.data.day) == 0) {next}
edge.list <- all.full.edgelist(my.data.day, building = b, action=a, day=w, year=y, time = 2)
edge.list.full.all <- rbind(edge.list.full.all, edge.list)
}
}
}
}
setwd(store)
edge.list.full.marzo.csv <- write.csv2(edge.list.full.all, "edge.list.full.marzo.csv")
#### ABRIL
# --------
setwd(source)
mydata <- read.csv("abril.csv")
# Format data
mydata <- mydata[mydata$modo == "Peatonal",]
mydata <- mydata[!is.na(mydata$carnet), ]
mydata <- mydata[, -which(names(mydata) %in% c("X", "programa", "edificio", "modo", "dia", "NA.", "jornada", "fecha", "mesn"))] # Remove NA row
mydata <- mydata[!duplicated(mydata),] # Drop duplicates
# Format variables variables
mydata$date_time <- strptime(mydata$fecha_completa, format = '%Y.%m.%d %H:%M:%S')
mydata$torniquete <- as.character(mydata$torniquete)
mydata$year <- year(mydata$date_time)
mydata$porteria <- trimws(as.character(mydata$porteria))
mydata$accion <- as.character(mydata$accion)
mydata$action_in <- ifelse(mydata$accion == "IN ", 1, ifelse(mydata$accion == "OUT", 0, NA))
# gen auxiliary tools
building <- c("W", "SD", "S", "RGB", "PB", "ÑF", "ÑE", "NAVAS", "ML", "MJ", "LL", "GA", "FRANCO", "CPM", "CORCAS", "CAI", "AU")
# building <- c("SD") # For testing
# Genera edge list
# ----------------
# Create auxiliary tools
edge.list.full.all <- matrix(nrow = 1, ncol = 9, rep(NA))
colnames(edge.list.full.all) <- c("carnet1", "carnet2", "date_time_carnet1", "torniquete_carnet1", "torniquete_carnet2", "porteria", "action", "day", "year")
for (y in 2016:2019) { # try w/one year
for (b in building) {
for (a in 0:1) { # try outs only
for (w in 1:5) { # try mondays only
my.data.filtered <- mydata[mydata$year == y, ]
my.data.day <- my.data.filtered[ which(my.data.filtered$porteria==b & my.data.filtered$action_in == a & my.data.filtered$dia_semana == w), ]
if(nrow(my.data.day) == 0) {next}
edge.list <- all.full.edgelist(my.data.day, building = b, action=a, day=w, year=y, time = 2)
edge.list.full.all <- rbind(edge.list.full.all, edge.list)
}
}
}
}
setwd(store)
edge.list.full.abril.csv <- write.csv2(edge.list.full.all, "edge.list.full.abril.csv")
#### MAYO
# --------
setwd(source)
mydata <- read.csv("mayo.csv")
# Format data
mydata <- mydata[mydata$modo == "Peatonal",]
mydata <- mydata[!is.na(mydata$carnet), ]
mydata <- mydata[, -which(names(mydata) %in% c("X", "programa", "edificio", "modo", "dia", "NA.", "jornada", "fecha", "mesn"))] # Remove NA row
mydata <- mydata[!duplicated(mydata),] # Drop duplicates
# Format variables variables
mydata$date_time <- strptime(mydata$fecha_completa, format = '%Y.%m.%d %H:%M:%S')
mydata$torniquete <- as.character(mydata$torniquete)
mydata$year <- year(mydata$date_time)
mydata$porteria <- trimws(as.character(mydata$porteria))
mydata$accion <- as.character(mydata$accion)
mydata$action_in <- ifelse(mydata$accion == "IN ", 1, ifelse(mydata$accion == "OUT", 0, NA))
# gen auxiliary tools
building <- c("W", "SD", "S", "RGB", "PB", "ÑF", "ÑE", "NAVAS", "ML", "MJ", "LL", "GA", "FRANCO", "CPM", "CORCAS", "CAI", "AU")
# building <- c("SD") # For testing
# Genera edge list
# ----------------
# Create auxiliary tools
edge.list.full.all <- matrix(nrow = 1, ncol = 9, rep(NA))
colnames(edge.list.full.all) <- c("carnet1", "carnet2", "date_time_carnet1", "torniquete_carnet1", "torniquete_carnet2", "porteria", "action", "day", "year")
for (y in 2016:2019) { # try w/one year
for (b in building) {
for (a in 0:1) { # try outs only
for (w in 1:5) { # try mondays only
my.data.filtered <- mydata[mydata$year == y, ]
my.data.day <- my.data.filtered[ which(my.data.filtered$porteria==b & my.data.filtered$action_in == a & my.data.filtered$dia_semana == w), ]
if(nrow(my.data.day) == 0) {next}
edge.list <- all.full.edgelist(my.data.day, building = b, action=a, day=w, year=y, time = 2)
edge.list.full.all <- rbind(edge.list.full.all, edge.list)
}
}
}
}
setwd(store)
edge.list.full.mayo.csv <- write.csv2(edge.list.full.all, "edge.list.full.mayo.csv")
#### AGOSTO
# ---------
setwd(source)
mydata <- read.csv("agosto.csv")
# Format data
mydata <- mydata[mydata$modo == "Peatonal",]
mydata <- mydata[!is.na(mydata$carnet), ]
mydata <- mydata[, -which(names(mydata) %in% c("X", "programa", "edificio", "modo", "dia", "NA.", "jornada", "fecha", "mesn"))] # Remove NA row
mydata <- mydata[!duplicated(mydata),] # Drop duplicates
# Format variables variables
mydata$date_time <- strptime(mydata$fecha_completa, format = '%Y.%m.%d %H:%M:%S')
mydata$torniquete <- as.character(mydata$torniquete)
mydata$year <- year(mydata$date_time)
mydata$porteria <- trimws(as.character(mydata$porteria))
mydata$accion <- as.character(mydata$accion)
mydata$action_in <- ifelse(mydata$accion == "IN ", 1, ifelse(mydata$accion == "OUT", 0, NA))
# gen auxiliary tools
building <- c("W", "SD", "S", "RGB", "PB", "ÑF", "ÑE", "NAVAS", "ML", "MJ", "LL", "GA", "FRANCO", "CPM", "CORCAS", "CAI", "AU")
# building <- c("SD") # For testing
# Genera edge list
# ----------------
# Create auxiliary tools
edge.list.full.all <- matrix(nrow = 1, ncol = 9, rep(NA))
colnames(edge.list.full.all) <- c("carnet1", "carnet2", "date_time_carnet1", "torniquete_carnet1", "torniquete_carnet2", "porteria", "action", "day", "year")
for (y in 2016:2019) { # try w/one year
for (b in building) {
for (a in 0:1) { # try outs only
for (w in 1:5) { # try mondays only
my.data.filtered <- mydata[mydata$year == y, ]
my.data.day <- my.data.filtered[ which(my.data.filtered$porteria==b & my.data.filtered$action_in == a & my.data.filtered$dia_semana == w), ]
if(nrow(my.data.day) == 0) {next}
edge.list <- all.full.edgelist(my.data.day, building = b, action=a, day=w, year=y, time = 2)
edge.list.full.all <- rbind(edge.list.full.all, edge.list)
}
}
}
}
setwd(store)
edge.list.full.agosto.csv <- write.csv2(edge.list.full.all, "edge.list.full.agosto.csv")
#### SEPTIEMBRE
# ---------
setwd(source)
mydata <- read.csv("septiembre.csv")
# Format data
mydata <- mydata[mydata$modo == "Peatonal",]
mydata <- mydata[!is.na(mydata$carnet), ]
mydata <- mydata[, -which(names(mydata) %in% c("X", "programa", "edificio", "modo", "dia", "NA.", "jornada", "fecha", "mesn"))] # Remove NA row
mydata <- mydata[!duplicated(mydata),] # Drop duplicates
# Format variables variables
mydata$date_time <- strptime(mydata$fecha_completa, format = '%Y.%m.%d %H:%M:%S')
mydata$torniquete <- as.character(mydata$torniquete)
mydata$year <- year(mydata$date_time)
mydata$porteria <- trimws(as.character(mydata$porteria))
mydata$accion <- as.character(mydata$accion)
mydata$action_in <- ifelse(mydata$accion == "IN ", 1, ifelse(mydata$accion == "OUT", 0, NA))
# gen auxiliary tools
building <- c("W", "SD", "S", "RGB", "PB", "ÑF", "ÑE", "NAVAS", "ML", "MJ", "LL", "GA", "FRANCO", "CPM", "CORCAS", "CAI", "AU")
# building <- c("SD") # For testing
# Genera edge list
# ----------------
# Create auxiliary tools
edge.list.full.all <- matrix(nrow = 1, ncol = 9, rep(NA))
colnames(edge.list.full.all) <- c("carnet1", "carnet2", "date_time_carnet1", "torniquete_carnet1", "torniquete_carnet2", "porteria", "action", "day", "year")
for (y in 2016:2019) { # try w/one year
for (b in building) {
for (a in 0:1) { # try outs only
for (w in 1:5) { # try mondays only
my.data.filtered <- mydata[mydata$year == y, ]
my.data.day <- my.data.filtered[ which(my.data.filtered$porteria==b & my.data.filtered$action_in == a & my.data.filtered$dia_semana == w), ]
if(nrow(my.data.day) == 0) {next}
edge.list <- all.full.edgelist(my.data.day, building = b, action=a, day=w, year=y, time = 2)
edge.list.full.all <- rbind(edge.list.full.all, edge.list)
}
}
}
}
setwd(store)
edge.list.full.septiembre.csv <- write.csv2(edge.list.full.all, "edge.list.full.septiembre.csv")
#### OCTUBRE
# -----------
setwd(source)
mydata <- read.csv("octubre.csv")
# Format data
mydata <- mydata[mydata$modo == "Peatonal",]
mydata <- mydata[!is.na(mydata$carnet), ]
mydata <- mydata[, -which(names(mydata) %in% c("X", "programa", "edificio", "modo", "dia", "NA.", "jornada", "fecha", "mesn"))] # Remove NA row
mydata <- mydata[!duplicated(mydata),] # Drop duplicates
# Format variables variables
mydata$date_time <- strptime(mydata$fecha_completa, format = '%Y.%m.%d %H:%M:%S')
mydata$torniquete <- as.character(mydata$torniquete)
mydata$year <- year(mydata$date_time)
mydata$porteria <- trimws(as.character(mydata$porteria))
mydata$accion <- as.character(mydata$accion)
mydata$action_in <- ifelse(mydata$accion == "IN ", 1, ifelse(mydata$accion == "OUT", 0, NA))
# gen auxiliary tools
building <- c("W", "SD", "S", "RGB", "PB", "ÑF", "ÑE", "NAVAS", "ML", "MJ", "LL", "GA", "FRANCO", "CPM", "CORCAS", "CAI", "AU")
# building <- c("SD") # For testing
# Genera edge list
# ----------------
# Create auxiliary tools
edge.list.full.all <- matrix(nrow = 1, ncol = 9, rep(NA))
colnames(edge.list.full.all) <- c("carnet1", "carnet2", "date_time_carnet1", "torniquete_carnet1", "torniquete_carnet2", "porteria", "action", "day", "year")
for (y in 2016:2019) { # try w/one year
for (b in building) {
for (a in 0:1) { # try outs only
for (w in 1:5) { # try mondays only
my.data.filtered <- mydata[mydata$year == y, ]
my.data.day <- my.data.filtered[ which(my.data.filtered$porteria==b & my.data.filtered$action_in == a & my.data.filtered$dia_semana == w), ]
if(nrow(my.data.day) == 0) {next}
edge.list <- all.full.edgelist(my.data.day, building = b, action=a, day=w, year=y, time = 2)
edge.list.full.all <- rbind(edge.list.full.all, edge.list)
}
}
}
}
setwd(store)
edge.list.full.octubre.csv <- write.csv2(edge.list.full.all, "edge.list.full.octubre.csv")
#### NOVIEMBRE
# -----------
setwd(source)
mydata <- read.csv("noviembre.csv")
# Format data
mydata <- mydata[mydata$modo == "Peatonal",]
mydata <- mydata[!is.na(mydata$carnet), ]
mydata <- mydata[, -which(names(mydata) %in% c("X", "programa", "edificio", "modo", "dia", "NA.", "jornada", "fecha", "mesn"))] # Remove NA row
mydata <- mydata[!duplicated(mydata),] # Drop duplicates
# Format variables variables
mydata$date_time <- strptime(mydata$fecha_completa, format = '%Y.%m.%d %H:%M:%S')
mydata$torniquete <- as.character(mydata$torniquete)
mydata$year <- year(mydata$date_time)
mydata$porteria <- trimws(as.character(mydata$porteria))
mydata$accion <- as.character(mydata$accion)
mydata$action_in <- ifelse(mydata$accion == "IN ", 1, ifelse(mydata$accion == "OUT", 0, NA))
# gen auxiliary tools
building <- c("W", "SD", "S", "RGB", "PB", "ÑF", "ÑE", "NAVAS", "ML", "MJ", "LL", "GA", "FRANCO", "CPM", "CORCAS", "CAI", "AU")
# building <- c("SD") # For testing
# Genera edge list
# ----------------
# Create auxiliary tools
edge.list.full.all <- matrix(nrow = 1, ncol = 9, rep(NA))
colnames(edge.list.full.all) <- c("carnet1", "carnet2", "date_time_carnet1", "torniquete_carnet1", "torniquete_carnet2", "porteria", "action", "day", "year")
for (y in 2016:2019) { # try w/one year
for (b in building) {
for (a in 0:1) { # try outs only
for (w in 1:5) { # try mondays only
my.data.filtered <- mydata[mydata$year == y, ]
my.data.day <- my.data.filtered[ which(my.data.filtered$porteria==b & my.data.filtered$action_in == a & my.data.filtered$dia_semana == w), ]
if(nrow(my.data.day) == 0) {next}
edge.list <- all.full.edgelist(my.data.day, building = b, action=a, day=w, year=y, time = 2)
edge.list.full.all <- rbind(edge.list.full.all, edge.list)
}
}
}
}
setwd(store)
edge.list.full.noviembre.csv <- write.csv2(edge.list.full.all, "edge.list.full.noviembre.csv")
end_time <- Sys.time() # stop counting time
time <- end_time - start_time
time
| /Code/interactions_all_uniandes.R | no_license | JuanMoreno11/Turnstile_networks | R | false | false | 19,179 | r | # Turnstiles project
# Tatiana Velasco R
# December 13th, 2019
# -------------------------------------------------------------- #
# CODING INTERACTIONS BETWEEN ALL INDIVIDUALS AT UNIANDES #
# -------------------------------------------------------------- #
## Genera las listas de pares para cada base de mes a mes. No en loop sino separado
## La ventaja de hacerlo separado es que si el codigo se encuentra con un error en un mes dado, puede seguir al siguiente.
rm(list = ls())
# install functions
# install.packages("foreign")
# install.packages("dplyr")
# install.packages("doBy")
# install.packages("plyr")
# install.packages("lubridate")
# Import Libraries
library(foreign)
library(dplyr)
library(doBy)
library(plyr)
library(lubridate)
# Define directory locations ... UPDATE WITH PROPER LOCATIONS ...
functions <- "/Users/tatianavelasco.ro/Dropbox/TC\ Columbia/Research/Turnstiles/Torniquetes_TRT/Data/GitHub/Turnstile_networks/Code"
source <- "/Users/tatianavelasco.ro/Dropbox/TC\ Columbia/Research/Turnstiles/Torniquetes_TRT/Data/P2000"
store <- "/Users/tatianavelasco.ro/Dropbox/TC\ Columbia/Research/Turnstiles/Torniquetes_TRT/Data/data_processing"
# Import functions
# set working directory
setwd(functions)
source("all.full.edgelist.revisit.R")
# Set time clock
start_time <- Sys.time() # Start counting time
#### ENERO
# --------
# setwd(source)
# mydata <- read.csv("enero.csv")
#
# # Format data
# mydata <- mydata[mydata$modo == "Peatonal",]
# mydata <- mydata[!is.na(mydata$carnet), ]
# mydata <- mydata[, -which(names(mydata) %in% c("X", "programa", "edificio", "modo", "dia", "NA.", "jornada", "fecha", "mesn"))] # Remove NA row
# mydata <- mydata[!duplicated(mydata),] # Drop duplicates
#
# # Format variables variables
# mydata$date_time <- strptime(mydata$fecha_completa, format = '%Y.%m.%d %H:%M:%S')
# mydata$torniquete <- as.character(mydata$torniquete)
# mydata$year <- year(mydata$date_time)
# mydata$porteria <- trimws(as.character(mydata$porteria))
# mydata$accion <- as.character(mydata$accion)
# mydata$action_in <- ifelse(mydata$accion == "IN ", 1, ifelse(mydata$accion == "OUT", 0, NA))
#
#
# # gen auxiliary tools
# building <- c("W", "SD", "S", "RGB", "PB", "ÑF", "ÑE", "NAVAS", "ML", "MJ", "LL", "GA", "FRANCO", "CPM", "CORCAS", "CAI", "AU")
# # building <- c("SD") # For testing
#
#
# # Genera edge list
# # ----------------
# # Create auxiliary tools
# edge.list.full.all <- matrix(nrow = 1, ncol = 9, rep(NA))
# colnames(edge.list.full.all) <- c("carnet1", "carnet2", "date_time_carnet1", "torniquete_carnet1", "torniquete_carnet2", "porteria", "action", "day", "year")
#
# for (y in 2016:2019) { # try w/one year
# for (b in building) {
# for (a in 0:1) { # try outs only
# for (w in 1:5) { # try mondays only
#
# my.data.filtered <- mydata[mydata$year == y, ]
# my.data.day <- my.data.filtered[ which(my.data.filtered$porteria==b & my.data.filtered$action_in == a & my.data.filtered$dia_semana == w), ]
# if(nrow(my.data.day) == 0) {next}
# edge.list <- all.full.edgelist(my.data.day, building = b, action=a, day=w, year=y, time = 2)
# edge.list.full.all <- rbind(edge.list.full.all, edge.list)
# }
# }
# }
#
# }
#
# setwd(store)
# edge.list.full.enero.csv <- write.csv2(edge.list.full.all, "edge.list.full.enero.csv")
#### FEBRERO
# --------
setwd(source)
mydata <- read.csv("febrero.csv")
# Format data
mydata <- mydata[mydata$modo == "Peatonal",]
mydata <- mydata[!is.na(mydata$carnet), ]
mydata <- mydata[, -which(names(mydata) %in% c("X", "programa", "edificio", "modo", "dia", "NA.", "jornada", "fecha", "mesn"))] # Remove NA row
mydata <- mydata[!duplicated(mydata),] # Drop duplicates
# Format variables variables
mydata$date_time <- strptime(mydata$fecha_completa, format = '%Y.%m.%d %H:%M:%S')
mydata$torniquete <- as.character(mydata$torniquete)
mydata$year <- year(mydata$date_time)
mydata$porteria <- trimws(as.character(mydata$porteria))
mydata$accion <- as.character(mydata$accion)
mydata$action_in <- ifelse(mydata$accion == "IN ", 1, ifelse(mydata$accion == "OUT", 0, NA))
# gen auxiliary tools
building <- c("W", "SD", "S", "RGB", "PB", "ÑF", "ÑE", "NAVAS", "ML", "MJ", "LL", "GA", "FRANCO", "CPM", "CORCAS", "CAI", "AU")
# building <- c("SD") # For testing
# Genera edge list
# ----------------
# Create auxiliary tools
edge.list.full.all <- matrix(nrow = 1, ncol = 9, rep(NA))
colnames(edge.list.full.all) <- c("carnet1", "carnet2", "date_time_carnet1", "torniquete_carnet1", "torniquete_carnet2", "porteria", "action", "day", "year")
for (y in 2016:2019) { # try w/one year
for (b in building) {
for (a in 0:1) { # try outs only
for (w in 1:5) { # try mondays only
my.data.filtered <- mydata[mydata$year == y, ]
my.data.day <- my.data.filtered[ which(my.data.filtered$porteria==b & my.data.filtered$action_in == a & my.data.filtered$dia_semana == w), ]
if(nrow(my.data.day) == 0) {next}
edge.list <- all.full.edgelist(my.data.day, building = b, action=a, day=w, year=y, time = 2) # Decirle a la funcion qué distancia es permitida
edge.list.full.all <- rbind(edge.list.full.all, edge.list)
}
}
}
}
setwd(store)
edge.list.full.febrero.csv <- write.csv2(edge.list.full.all, "edge.list.full.febrero.csv")
end_time <- Sys.time() # stop counting time
time <- end_time - start_time
time
#### MARZO
# --------
setwd(source)
mydata <- read.csv("marzo.csv")
# Format data
mydata <- mydata[mydata$modo == "Peatonal",]
mydata <- mydata[!is.na(mydata$carnet), ]
mydata <- mydata[, -which(names(mydata) %in% c("X", "programa", "edificio", "modo", "dia", "NA.", "jornada", "fecha", "mesn"))] # Remove NA row
mydata <- mydata[!duplicated(mydata),] # Drop duplicates
# Format variables variables
mydata$date_time <- strptime(mydata$fecha_completa, format = '%Y.%m.%d %H:%M:%S')
mydata$torniquete <- as.character(mydata$torniquete)
mydata$year <- year(mydata$date_time)
mydata$porteria <- trimws(as.character(mydata$porteria))
mydata$accion <- as.character(mydata$accion)
mydata$action_in <- ifelse(mydata$accion == "IN ", 1, ifelse(mydata$accion == "OUT", 0, NA))
# gen auxiliary tools
building <- c("W", "SD", "S", "RGB", "PB", "ÑF", "ÑE", "NAVAS", "ML", "MJ", "LL", "GA", "FRANCO", "CPM", "CORCAS", "CAI", "AU")
# building <- c("SD") # For testing
# Genera edge list
# ----------------
# Create auxiliary tools
edge.list.full.all <- matrix(nrow = 1, ncol = 9, rep(NA))
colnames(edge.list.full.all) <- c("carnet1", "carnet2", "date_time_carnet1", "torniquete_carnet1", "torniquete_carnet2", "porteria", "action", "day", "year")
for (y in 2016:2019) { # try w/one year
for (b in building) {
for (a in 0:1) { # try outs only
for (w in 1:5) { # try mondays only
my.data.filtered <- mydata[mydata$year == y, ]
my.data.day <- my.data.filtered[ which(my.data.filtered$porteria==b & my.data.filtered$action_in == a & my.data.filtered$dia_semana == w), ]
if(nrow(my.data.day) == 0) {next}
edge.list <- all.full.edgelist(my.data.day, building = b, action=a, day=w, year=y, time = 2)
edge.list.full.all <- rbind(edge.list.full.all, edge.list)
}
}
}
}
setwd(store)
edge.list.full.marzo.csv <- write.csv2(edge.list.full.all, "edge.list.full.marzo.csv")
#### ABRIL
# --------
setwd(source)
mydata <- read.csv("abril.csv")
# Format data
mydata <- mydata[mydata$modo == "Peatonal",]
mydata <- mydata[!is.na(mydata$carnet), ]
mydata <- mydata[, -which(names(mydata) %in% c("X", "programa", "edificio", "modo", "dia", "NA.", "jornada", "fecha", "mesn"))] # Remove NA row
mydata <- mydata[!duplicated(mydata),] # Drop duplicates
# Format variables variables
mydata$date_time <- strptime(mydata$fecha_completa, format = '%Y.%m.%d %H:%M:%S')
mydata$torniquete <- as.character(mydata$torniquete)
mydata$year <- year(mydata$date_time)
mydata$porteria <- trimws(as.character(mydata$porteria))
mydata$accion <- as.character(mydata$accion)
mydata$action_in <- ifelse(mydata$accion == "IN ", 1, ifelse(mydata$accion == "OUT", 0, NA))
# gen auxiliary tools
building <- c("W", "SD", "S", "RGB", "PB", "ÑF", "ÑE", "NAVAS", "ML", "MJ", "LL", "GA", "FRANCO", "CPM", "CORCAS", "CAI", "AU")
# building <- c("SD") # For testing
# Genera edge list
# ----------------
# Create auxiliary tools
edge.list.full.all <- matrix(nrow = 1, ncol = 9, rep(NA))
colnames(edge.list.full.all) <- c("carnet1", "carnet2", "date_time_carnet1", "torniquete_carnet1", "torniquete_carnet2", "porteria", "action", "day", "year")
for (y in 2016:2019) { # try w/one year
for (b in building) {
for (a in 0:1) { # try outs only
for (w in 1:5) { # try mondays only
my.data.filtered <- mydata[mydata$year == y, ]
my.data.day <- my.data.filtered[ which(my.data.filtered$porteria==b & my.data.filtered$action_in == a & my.data.filtered$dia_semana == w), ]
if(nrow(my.data.day) == 0) {next}
edge.list <- all.full.edgelist(my.data.day, building = b, action=a, day=w, year=y, time = 2)
edge.list.full.all <- rbind(edge.list.full.all, edge.list)
}
}
}
}
setwd(store)
edge.list.full.abril.csv <- write.csv2(edge.list.full.all, "edge.list.full.abril.csv")
#### MAYO
# --------
setwd(source)
mydata <- read.csv("mayo.csv")
# Format data
mydata <- mydata[mydata$modo == "Peatonal",]
mydata <- mydata[!is.na(mydata$carnet), ]
mydata <- mydata[, -which(names(mydata) %in% c("X", "programa", "edificio", "modo", "dia", "NA.", "jornada", "fecha", "mesn"))] # Remove NA row
mydata <- mydata[!duplicated(mydata),] # Drop duplicates
# Format variables variables
mydata$date_time <- strptime(mydata$fecha_completa, format = '%Y.%m.%d %H:%M:%S')
mydata$torniquete <- as.character(mydata$torniquete)
mydata$year <- year(mydata$date_time)
mydata$porteria <- trimws(as.character(mydata$porteria))
mydata$accion <- as.character(mydata$accion)
mydata$action_in <- ifelse(mydata$accion == "IN ", 1, ifelse(mydata$accion == "OUT", 0, NA))
# gen auxiliary tools
building <- c("W", "SD", "S", "RGB", "PB", "ÑF", "ÑE", "NAVAS", "ML", "MJ", "LL", "GA", "FRANCO", "CPM", "CORCAS", "CAI", "AU")
# building <- c("SD") # For testing
# Genera edge list
# ----------------
# Create auxiliary tools
edge.list.full.all <- matrix(nrow = 1, ncol = 9, rep(NA))
colnames(edge.list.full.all) <- c("carnet1", "carnet2", "date_time_carnet1", "torniquete_carnet1", "torniquete_carnet2", "porteria", "action", "day", "year")
for (y in 2016:2019) { # try w/one year
for (b in building) {
for (a in 0:1) { # try outs only
for (w in 1:5) { # try mondays only
my.data.filtered <- mydata[mydata$year == y, ]
my.data.day <- my.data.filtered[ which(my.data.filtered$porteria==b & my.data.filtered$action_in == a & my.data.filtered$dia_semana == w), ]
if(nrow(my.data.day) == 0) {next}
edge.list <- all.full.edgelist(my.data.day, building = b, action=a, day=w, year=y, time = 2)
edge.list.full.all <- rbind(edge.list.full.all, edge.list)
}
}
}
}
setwd(store)
edge.list.full.mayo.csv <- write.csv2(edge.list.full.all, "edge.list.full.mayo.csv")
#### AGOSTO
# ---------
setwd(source)
mydata <- read.csv("agosto.csv")
# Format data
mydata <- mydata[mydata$modo == "Peatonal",]
mydata <- mydata[!is.na(mydata$carnet), ]
mydata <- mydata[, -which(names(mydata) %in% c("X", "programa", "edificio", "modo", "dia", "NA.", "jornada", "fecha", "mesn"))] # Remove NA row
mydata <- mydata[!duplicated(mydata),] # Drop duplicates
# Format variables variables
mydata$date_time <- strptime(mydata$fecha_completa, format = '%Y.%m.%d %H:%M:%S')
mydata$torniquete <- as.character(mydata$torniquete)
mydata$year <- year(mydata$date_time)
mydata$porteria <- trimws(as.character(mydata$porteria))
mydata$accion <- as.character(mydata$accion)
mydata$action_in <- ifelse(mydata$accion == "IN ", 1, ifelse(mydata$accion == "OUT", 0, NA))
# gen auxiliary tools
building <- c("W", "SD", "S", "RGB", "PB", "ÑF", "ÑE", "NAVAS", "ML", "MJ", "LL", "GA", "FRANCO", "CPM", "CORCAS", "CAI", "AU")
# building <- c("SD") # For testing
# Genera edge list
# ----------------
# Create auxiliary tools
edge.list.full.all <- matrix(nrow = 1, ncol = 9, rep(NA))
colnames(edge.list.full.all) <- c("carnet1", "carnet2", "date_time_carnet1", "torniquete_carnet1", "torniquete_carnet2", "porteria", "action", "day", "year")
for (y in 2016:2019) { # try w/one year
for (b in building) {
for (a in 0:1) { # try outs only
for (w in 1:5) { # try mondays only
my.data.filtered <- mydata[mydata$year == y, ]
my.data.day <- my.data.filtered[ which(my.data.filtered$porteria==b & my.data.filtered$action_in == a & my.data.filtered$dia_semana == w), ]
if(nrow(my.data.day) == 0) {next}
edge.list <- all.full.edgelist(my.data.day, building = b, action=a, day=w, year=y, time = 2)
edge.list.full.all <- rbind(edge.list.full.all, edge.list)
}
}
}
}
setwd(store)
edge.list.full.agosto.csv <- write.csv2(edge.list.full.all, "edge.list.full.agosto.csv")
#### SEPTIEMBRE
# ---------
setwd(source)
mydata <- read.csv("septiembre.csv")
# Format data
mydata <- mydata[mydata$modo == "Peatonal",]
mydata <- mydata[!is.na(mydata$carnet), ]
mydata <- mydata[, -which(names(mydata) %in% c("X", "programa", "edificio", "modo", "dia", "NA.", "jornada", "fecha", "mesn"))] # Remove NA row
mydata <- mydata[!duplicated(mydata),] # Drop duplicates
# Format variables variables
mydata$date_time <- strptime(mydata$fecha_completa, format = '%Y.%m.%d %H:%M:%S')
mydata$torniquete <- as.character(mydata$torniquete)
mydata$year <- year(mydata$date_time)
mydata$porteria <- trimws(as.character(mydata$porteria))
mydata$accion <- as.character(mydata$accion)
mydata$action_in <- ifelse(mydata$accion == "IN ", 1, ifelse(mydata$accion == "OUT", 0, NA))
# gen auxiliary tools
building <- c("W", "SD", "S", "RGB", "PB", "ÑF", "ÑE", "NAVAS", "ML", "MJ", "LL", "GA", "FRANCO", "CPM", "CORCAS", "CAI", "AU")
# building <- c("SD") # For testing
# Genera edge list
# ----------------
# Create auxiliary tools
edge.list.full.all <- matrix(nrow = 1, ncol = 9, rep(NA))
colnames(edge.list.full.all) <- c("carnet1", "carnet2", "date_time_carnet1", "torniquete_carnet1", "torniquete_carnet2", "porteria", "action", "day", "year")
for (y in 2016:2019) { # try w/one year
for (b in building) {
for (a in 0:1) { # try outs only
for (w in 1:5) { # try mondays only
my.data.filtered <- mydata[mydata$year == y, ]
my.data.day <- my.data.filtered[ which(my.data.filtered$porteria==b & my.data.filtered$action_in == a & my.data.filtered$dia_semana == w), ]
if(nrow(my.data.day) == 0) {next}
edge.list <- all.full.edgelist(my.data.day, building = b, action=a, day=w, year=y, time = 2)
edge.list.full.all <- rbind(edge.list.full.all, edge.list)
}
}
}
}
setwd(store)
edge.list.full.septiembre.csv <- write.csv2(edge.list.full.all, "edge.list.full.septiembre.csv")
#### OCTUBRE
# -----------
setwd(source)
mydata <- read.csv("octubre.csv")
# Format data
mydata <- mydata[mydata$modo == "Peatonal",]
mydata <- mydata[!is.na(mydata$carnet), ]
mydata <- mydata[, -which(names(mydata) %in% c("X", "programa", "edificio", "modo", "dia", "NA.", "jornada", "fecha", "mesn"))] # Remove NA row
mydata <- mydata[!duplicated(mydata),] # Drop duplicates
# Format variables variables
mydata$date_time <- strptime(mydata$fecha_completa, format = '%Y.%m.%d %H:%M:%S')
mydata$torniquete <- as.character(mydata$torniquete)
mydata$year <- year(mydata$date_time)
mydata$porteria <- trimws(as.character(mydata$porteria))
mydata$accion <- as.character(mydata$accion)
mydata$action_in <- ifelse(mydata$accion == "IN ", 1, ifelse(mydata$accion == "OUT", 0, NA))
# gen auxiliary tools
building <- c("W", "SD", "S", "RGB", "PB", "ÑF", "ÑE", "NAVAS", "ML", "MJ", "LL", "GA", "FRANCO", "CPM", "CORCAS", "CAI", "AU")
# building <- c("SD") # For testing
# Genera edge list
# ----------------
# Create auxiliary tools
edge.list.full.all <- matrix(nrow = 1, ncol = 9, rep(NA))
colnames(edge.list.full.all) <- c("carnet1", "carnet2", "date_time_carnet1", "torniquete_carnet1", "torniquete_carnet2", "porteria", "action", "day", "year")
for (y in 2016:2019) { # try w/one year
for (b in building) {
for (a in 0:1) { # try outs only
for (w in 1:5) { # try mondays only
my.data.filtered <- mydata[mydata$year == y, ]
my.data.day <- my.data.filtered[ which(my.data.filtered$porteria==b & my.data.filtered$action_in == a & my.data.filtered$dia_semana == w), ]
if(nrow(my.data.day) == 0) {next}
edge.list <- all.full.edgelist(my.data.day, building = b, action=a, day=w, year=y, time = 2)
edge.list.full.all <- rbind(edge.list.full.all, edge.list)
}
}
}
}
setwd(store)
edge.list.full.octubre.csv <- write.csv2(edge.list.full.all, "edge.list.full.octubre.csv")
#### NOVIEMBRE
# -----------
setwd(source)
mydata <- read.csv("noviembre.csv")
# Format data
mydata <- mydata[mydata$modo == "Peatonal",]
mydata <- mydata[!is.na(mydata$carnet), ]
mydata <- mydata[, -which(names(mydata) %in% c("X", "programa", "edificio", "modo", "dia", "NA.", "jornada", "fecha", "mesn"))] # Remove NA row
mydata <- mydata[!duplicated(mydata),] # Drop duplicates
# Format variables variables
mydata$date_time <- strptime(mydata$fecha_completa, format = '%Y.%m.%d %H:%M:%S')
mydata$torniquete <- as.character(mydata$torniquete)
mydata$year <- year(mydata$date_time)
mydata$porteria <- trimws(as.character(mydata$porteria))
mydata$accion <- as.character(mydata$accion)
mydata$action_in <- ifelse(mydata$accion == "IN ", 1, ifelse(mydata$accion == "OUT", 0, NA))
# gen auxiliary tools
building <- c("W", "SD", "S", "RGB", "PB", "ÑF", "ÑE", "NAVAS", "ML", "MJ", "LL", "GA", "FRANCO", "CPM", "CORCAS", "CAI", "AU")
# building <- c("SD") # For testing
# Genera edge list
# ----------------
# Create auxiliary tools
edge.list.full.all <- matrix(nrow = 1, ncol = 9, rep(NA))
colnames(edge.list.full.all) <- c("carnet1", "carnet2", "date_time_carnet1", "torniquete_carnet1", "torniquete_carnet2", "porteria", "action", "day", "year")
for (y in 2016:2019) { # try w/one year
for (b in building) {
for (a in 0:1) { # try outs only
for (w in 1:5) { # try mondays only
my.data.filtered <- mydata[mydata$year == y, ]
my.data.day <- my.data.filtered[ which(my.data.filtered$porteria==b & my.data.filtered$action_in == a & my.data.filtered$dia_semana == w), ]
if(nrow(my.data.day) == 0) {next}
edge.list <- all.full.edgelist(my.data.day, building = b, action=a, day=w, year=y, time = 2)
edge.list.full.all <- rbind(edge.list.full.all, edge.list)
}
}
}
}
setwd(store)
edge.list.full.noviembre.csv <- write.csv2(edge.list.full.all, "edge.list.full.noviembre.csv")
end_time <- Sys.time() # stop counting time
time <- end_time - start_time
time
|
library(readr)
iris <- read_csv("iris.csv")
tapply(iris$Petal.Length,iris$Species,mean)
| /iris.R | no_license | jinmeiz/R_projects | R | false | false | 88 | r | library(readr)
iris <- read_csv("iris.csv")
tapply(iris$Petal.Length,iris$Species,mean)
|
## Process data for station 115 - Wattle
## this sript is called both for hydrograph
## and for discharge calculations
## stn.names(115,105) ## name files done in hydgrph or discharge script
##-----IMPORTANT NOTE-----###
## input data is AVERAGED BASED ON THE 5/10 OR 15 MINUTE LOG
## POSTERIOR VALUES ARE USED TO POPULATE PRIOR NA VALUES
## observed height of the water above the notch on:
## 28 June 2015 at 11:20:00 am was 0.157m
## stage at that time was 0.31610m
## HEIGHT OF V ABOVE STILLING WELL IS 0.1661m
## forumula is Q (m3/hr) = 4969 * (H^2.5)
##-----END OF NOTE-----##
##--- define constants
cat(paste("Processing data for station: ", wlr.no, sep=""),"\n")
ar.cat <- 293562.5 ## TO BE FIXED
catch.type <- "Wattle Catchment"
hgt.diff <- 0.1661 # height difference b/w weir and wlr
wlr.path <- "~/OngoingProjects/CWC/Data/Nilgiris/wlr/csv/"
wlr.flnm <- "wlr_115_1 min.csv"
wlr.flnm.full <- paste(wlr.path, wlr.flnm, sep="")
##--- call function to get rating curve and calculate discharge
wlr.dat.all <- calc.disch.weir(wlr.flnm, wlr.flnm.full )
wlr.dat.all$Timestamp <- as.POSIXct(wlr.dat.all$Timestamp, tz="Asia/Kolkata")
##--- calculate depth of discharge ----##
wlr.dat.all$DepthDischarge <- (wlr.dat.all$Discharge/ar.cat)*1e+9
wlr.dat.all.sorted <- wlr.dat.all[order(wlr.dat.all$Timestamp, na.last=FALSE),]
wlr.dat.all <- wlr.dat.all[!is.na(wlr.dat.all$Stage),]
wlr.dat.all$Discharge <- round(wlr.dat.all$Discharge, digits=5)
| /Discharge/stn_115.R | no_license | feralindia/CWC | R | false | false | 1,446 | r | ## Process data for station 115 - Wattle
## this sript is called both for hydrograph
## and for discharge calculations
## stn.names(115,105) ## name files done in hydgrph or discharge script
##-----IMPORTANT NOTE-----###
## input data is AVERAGED BASED ON THE 5/10 OR 15 MINUTE LOG
## POSTERIOR VALUES ARE USED TO POPULATE PRIOR NA VALUES
## observed height of the water above the notch on:
## 28 June 2015 at 11:20:00 am was 0.157m
## stage at that time was 0.31610m
## HEIGHT OF V ABOVE STILLING WELL IS 0.1661m
## forumula is Q (m3/hr) = 4969 * (H^2.5)
##-----END OF NOTE-----##
##--- define constants
cat(paste("Processing data for station: ", wlr.no, sep=""),"\n")
ar.cat <- 293562.5 ## TO BE FIXED
catch.type <- "Wattle Catchment"
hgt.diff <- 0.1661 # height difference b/w weir and wlr
wlr.path <- "~/OngoingProjects/CWC/Data/Nilgiris/wlr/csv/"
wlr.flnm <- "wlr_115_1 min.csv"
wlr.flnm.full <- paste(wlr.path, wlr.flnm, sep="")
##--- call function to get rating curve and calculate discharge
wlr.dat.all <- calc.disch.weir(wlr.flnm, wlr.flnm.full )
wlr.dat.all$Timestamp <- as.POSIXct(wlr.dat.all$Timestamp, tz="Asia/Kolkata")
##--- calculate depth of discharge ----##
wlr.dat.all$DepthDischarge <- (wlr.dat.all$Discharge/ar.cat)*1e+9
wlr.dat.all.sorted <- wlr.dat.all[order(wlr.dat.all$Timestamp, na.last=FALSE),]
wlr.dat.all <- wlr.dat.all[!is.na(wlr.dat.all$Stage),]
wlr.dat.all$Discharge <- round(wlr.dat.all$Discharge, digits=5)
|
## modified from https://github.com/rosca002/FAO_Bfast_workshop/tree/master/tutorial
# #################################################################################################################################
############### # load packages
source("www/scripts/load_BFAST_packages.R",echo = TRUE)
options(echo=TRUE)
args <- commandArgs(TRUE)
print(args[1])
data_dir <- args[1]
load(paste0(data_dir,"/my_work_space.RData"))
overall_start_time <- Sys.time()
chunkerize <- function(infile, outfile, xmin, ymin, xmax, ymax) {
gdalwarp(srcfile=infile, dstfile=outfile,
t_srs='+proj=longlat +datum=WGS84 +no_defs',
te=c(xmin, ymin, xmax, ymax), multi=TRUE,
output_Raster=TRUE,
overwrite = TRUE,
ot="UInt16")
}
############### LOOP THROUGH EACH TILE
for(the_dir in tiles){
print(paste0('BFAST running for ',the_dir))
############### # check if the processing text exists, create a new blank processing text file
the_path_dir <- paste0(data_dir, the_dir, '/')
the_path_dir
############### Write the console outputs
sink(progress_file)
print("Preparing data...")
print(paste0('Running time series analysis for: ',basename(the_path_dir)))
############### Get the list of stacks inside the tile
main_stack_name <- paste0(the_path_dir,'/','stack.vrt')
sub_stacks <- list.files(the_path_dir,pattern="_stack.vrt")
list_stack <- list()
if(length(sub_stacks) > 1){
list_stack <- paste0(the_path_dir,'/',sub_stacks)
}else{
if(file.exists(main_stack_name)){
list_stack <- main_stack_name}}
list_stack <- paste0(the_path_dir,'/',sub_stacks)
################# CREATE THE MAIN OUTPUT DIRECTORY
output_directory <- paste0(the_path_dir,"results/")
dir.create(output_directory, recursive = T,showWarnings = F)
############### Write the console outputs
print(paste0('The results will be found in the folder: ' ,paste0(output_directory)))
print(paste0('Number of GEE blocks: ',length(sub_stacks)))
print(paste0('Number of cores: ',detectCores()))
############### LOOP THROUGH THE DIFFERENT STACKS
for(stack_name in list_stack){
stack_basename <- substr(basename(stack_name),1,nchar(basename(stack_name))-4)
############### Write the console output
print(paste0(' Processing block: ',stack_basename))
################# READ THE DATES FROM THE CSV FILE
dates <- unlist(read.csv(paste0(the_path_dir,'/','dates.csv'),header = FALSE))
################# CREATE LOCAL STACK RESULTS DIRECTORY
results_directory <- file.path(output_directory,paste0("bfast_",
stack_basename,"_",title,'/'))
dir.create(results_directory,recursive = T,showWarnings = F)
chunks_directory <- file.path(results_directory,paste0("chunks",'/'))
dir.create(chunks_directory,recursive = T,showWarnings = F)
log_filename <- file.path(results_directory,paste0(format(Sys.time(), "%Y-%m-%d-%H-%M-%S"),"_bfast_", title, ".log"))
start_time <- format(Sys.time(), "%Y/%m/%d %H:%M:%S")
nf_start_time <- Sys.time()
################# MULTIPLY THE INPUT BY THE FNF MASK IF NEEDED
tryCatch({
if(mask == "FNF Mask" ){
print(' Using the Forest/Nonforest mask')
mask_file_path <- mask_file_path
data_input_msk <- paste0(the_path_dir,'/','mask_FNF.tif')
data_input_vrt_nd <- paste0(the_path_dir,'/','stack_ND.tif')
data_input_tif_msk <- paste0(the_path_dir,'/','stack_FNF.tif')
#################### ALIGN
input <- mask_file_path
ouput <- data_input_msk
mask <- stack_name
system(sprintf("gdalwarp -ot UInt16 -co COMPRESS=LZW -t_srs \"%s\" -te %s %s %s %s -tr %s %s %s %s -overwrite",
proj4string(raster(mask)),
extent(raster(mask))@xmin,
extent(raster(mask))@ymin,
extent(raster(mask))@xmax,
extent(raster(mask))@ymax,
res(raster(mask))[1],
res(raster(mask))[2],
input,
ouput
))
#################### SET NODATA TO NONE IN THE TIME SERIES STACK
system(sprintf("gdal_translate -a_nodata none -co COMPRESS=LZW %s %s",
mask,
data_input_vrt_nd
))
#################### MULTIPLY THE TIME SERIES STACK BY MASK
system(sprintf("gdal_calc.py -A %s -B %s --allBands=A --overwrite --co COMPRESS=LZW --outfile=%s --calc=\"%s\"",
data_input_vrt_nd,
data_input_msk,
data_input_tif_msk,
paste0("A*B")
))
stack_name <- data_input_tif_msk
}
}, error=function(e){})
# ############# READ THE STACK METADATA WITHOUT WARNINGS
# info <- GDALinfo(stack_name,silent = TRUE)
#
# ############# GET STACK SIZE
# stack_x <- as.numeric(info[2])
# stack_y <- as.numeric(info[1])
#
# nx <- floor(stack_x / chunk_size)
# ny <- floor(stack_y / chunk_size)
#
# sizes_x <- c(rep(chunk_size,nx),stack_x - nx*chunk_size)
# sizes_y <- c(rep(chunk_size,ny),stack_y - ny*chunk_size)
#
# start_x <- cumsum(c(0,rep(chunk_size,nx)))
# start_y <- cumsum(c(0,rep(chunk_size,ny)))
#
# ############# CALCULATE CHUNKS SIZES
# sizes <- cbind(expand.grid(sizes_x,sizes_y),
# expand.grid(start_x,start_y))
#
# names(sizes) <- c("size_x","size_y","start_x","start_y")
############# READ THE STACK METADATA WITHOUT WARNINGS
info <- GDALinfo(stack_name,silent = TRUE)
############# GET STACK SIZE
stack_x <- as.numeric(info[2])
stack_y <- as.numeric(info[1])
orig_x <- as.numeric(info[4])
orig_y <- as.numeric(info[5])
res_x <- as.numeric(info[6])
res_y <- as.numeric(info[7])
nx <- floor(stack_x / chunk_size)
ny <- floor(stack_y / chunk_size)
xmin <- orig_x + cumsum(c(0,rep(chunk_size,nx)*res_x))
ymin <- orig_y + cumsum(c(0,rep(chunk_size,ny)*res_y))
xmax <- orig_x + res_x * stack_x
ymax <- orig_y + res_y * stack_y
rest_x <- nx - stack_x / chunk_size
rest_y <- ny - stack_y / chunk_size
if(rest_x == 0){
xmin <- orig_x + cumsum(c(0,rep(chunk_size,nx-1)*res_x))
xmax <- orig_x + c(cumsum(rep(chunk_size,nx-1)*res_x),res_x * stack_x)
}
if(rest_y == 0){
ymin <- orig_y + cumsum(c(0,rep(chunk_size,ny-1)*res_y))
ymax <- orig_y + c(cumsum(rep(chunk_size,ny-1)*res_y),res_y * stack_y)
}
if (nx >= 1 & rest_x != 0) {
xmax <- orig_x + c(cumsum(rep(chunk_size,nx)*res_x),res_x * stack_x)
}
if (ny >= 1 & rest_y != 0 ) {
ymax <- orig_y + c(cumsum(rep(chunk_size,ny)*res_y),res_y * stack_y)
}
stack_proj <- info[12]
############# CALCULATE CHUNKS SIZES
sizes <- data.frame(xmin=numeric(),
ymin=numeric(),
xmax=numeric(),
ymax=numeric()
)
for (k in 1:length(xmin)){
for (i in 1:length(ymin)){
cc <- c(xmin[k],ymin[i],xmax[k],ymax[i])
sizes <- rbind(sizes, cc)
}
}
sizes <- na.omit(sizes)
names(sizes) <- c("xmin","ymin","xmax","ymax")
print(paste0(' Number of chunks to process: ',nrow(sizes)))
print(sizes)
############# NAME OF RESULT FOR THE TILE
result <- file.path(results_directory, paste0("bfast_",title, ".tif"))
############# IF RESULT EXISTS, SKIP
if(!file.exists(result)){
############# PROCESS IF OVERALL APPROACH CHOSEN
if(mode == "Overall"){
############# LOOP THROUGH CHUNKS
for(chunk in 1:nrow(sizes)){
chunk_stack_name <- paste0(chunks_directory,"tmp_chunk_",chunk,"_stack.tif")
chunk_bfast_name <- paste0(chunks_directory,"chunk_",chunk,"_bfast_",title, ".tif")
if(!file.exists(chunk_bfast_name)){
chunk_start_time <- format(Sys.time(), "%Y/%m/%d %H:%M:%S")
print(paste0(" Processed : ",ceiling((chunk-1)/nrow(sizes)*100),"%"))
# ############# CREATE THE CHUNK
# system(sprintf("gdal_translate -srcwin %s %s %s %s -co COMPRESS=LZW %s %s",
# sizes[chunk,"start_x"],
# sizes[chunk,"start_y"],
# sizes[chunk,"size_x"],
# sizes[chunk,"size_y"],
# stack_name,
# chunk_stack_name))
chunkerize(stack_name, chunk_stack_name,
sizes$xmin[chunk],
sizes$ymin[chunk],
sizes$xmax[chunk],
sizes$ymax[chunk])
chunk_stack <- brick(chunk_stack_name)
############# DELETE THE RESULT IF IT EXISTS
system(sprintf("rm -f %s",chunk_bfast_name))
############# GENERATE A LOG FILENAME
chunk_log_filename <- paste0(chunks_directory,"log_chunk_",chunk,"_params_",title, ".log")
############# CREATE A FUNCTION TO IMPLEMENT BFAST
loop_process <- function(){
cores <- detectCores()
chunktime <- system.time(bfmSpatial(chunk_stack,
start = c(monitoring_year_beg[1], 1),
monend = c(monitoring_year_end[1], 1),
dates = dates,
formula = as.Formula(formula),
order = order,
history = history,
filename = chunk_bfast_name,
type = type,
returnLayers = returnLayers,
mc.cores = cores))
############# WRITE THE TIME TO A LOG
write(paste0("Chunk: ",
chunk,
" Start time: ",chunk_start_time,
" End time: ",format(Sys.time(),"%Y/%m/%d %H:%M:%S"),
" for a total time of ", chunktime[[3]]/60," minutes"),
chunk_log_filename,
append=TRUE)
chunktime
}
tryCatch({
print(paste0(" Processing chunk ",chunk," of ",nrow(sizes)))
loop_process()
system(sprintf(paste0("rm -f ", chunks_directory,"tmp_chunk*.tif")))
},error=function(e){
print(paste0(" Failed chunk ",chunk))
fail_log_filename <- paste0(chunks_directory,"fail_chunk_",chunk,"_params_",title, ".log")
write(paste0("Failed Chunk: ",
chunk,
" Start time: ",chunk_start_time,
" End time: ",format(Sys.time(),"%Y/%m/%d %H:%M:%S"),
" Reason for failure ",
e),
fail_log_filename,
append=TRUE)
})
} ### END OF TEST EXISTS CHUNK
print(paste0(" Finished chunk ",chunk))
} ### END OF THE CHUNK LOOP
############# COMBINE ALL THE CHUNKS
system(sprintf("gdal_merge.py -co COMPRESS=LZW -o %s %s",
result,
paste0(chunks_directory, paste0("chunk_*","_bfast_",title, ".tif"))
))
total_time <- Sys.time()-nf_start_time
print(total_time)
############# WRITE TIMING INFO TO A LOG
write(paste0("This process started on ", start_time,
" and ended on ",format(Sys.time(),"%Y/%m/%d %H:%M:%S"),
" Total time for the tile: ",total_time ,
" Number of CPUs: ",detectCores(),
" Number of chunks: ",nrow(sizes)),
log_filename,
append=TRUE)
############# NAME OF THE THRESHOLDED OUTPUT
outputfile <- paste0(results_directory,"bfast_",title,'_threshold.tif')
## Post-processing ####
# calculate the mean, standard deviation, minimum and maximum of the magnitude band
# reclass the image into 10 classes
# 0 = no data
# 1 = no change (mean +/- 1 standard deviation)
# 2 = negative small magnitude change (mean - 2 standard deviations)
# 3 = negative medium magnitude change (mean - 3 standard deviations)
# 4 = negative large magnitude change (mean - 4 standard deviations)
# 5 = negative very large magnitude change (mean - 4+ standard deviations)
# 6 = postive small magnitude change (mean + 2 standard deviations)
# 7 = postive medium magnitude change (mean + 3 standard deviations)
# 8 = postive large magnitude change (mean + 4 standard deviations)
# 9 = postive very large magnitude change (mean + 4+ standard deviations)
tryCatch({
means_b2 <- cellStats( raster(result,band=2) , "mean")
mins_b2 <- cellStats( raster(result,band=2) , "min")
maxs_b2 <- cellStats( raster(result,band=2) , "max")
stdevs_b2 <- cellStats( raster(result,band=2) , "sd")
system(sprintf("gdal_calc.py -A %s --A_band=2 --co=COMPRESS=LZW --type=Byte --overwrite --outfile=%s --calc=\"%s\"",
result,
paste0(results_directory,"tmp_bfast_",title,'_threshold.tif'),
paste0('(A<=',(maxs_b2),")*",
'(A>' ,(means_b2+(stdevs_b2*4)),")*9+",
'(A<=',(means_b2+(stdevs_b2*4)),")*",
'(A>' ,(means_b2+(stdevs_b2*3)),")*8+",
'(A<=',(means_b2+(stdevs_b2*3)),")*",
'(A>' ,(means_b2+(stdevs_b2*2)),")*7+",
'(A<=',(means_b2+(stdevs_b2*2)),")*",
'(A>' ,(means_b2+(stdevs_b2)),")*6+",
'(A<=',(means_b2+(stdevs_b2)),")*",
'(A>' ,(means_b2-(stdevs_b2)),")*1+",
'(A>=',(mins_b2),")*",
'(A<' ,(means_b2-(stdevs_b2*4)),")*5+",
'(A>=',(means_b2-(stdevs_b2*4)),")*",
'(A<' ,(means_b2-(stdevs_b2*3)),")*4+",
'(A>=',(means_b2-(stdevs_b2*3)),")*",
'(A<' ,(means_b2-(stdevs_b2*2)),")*3+",
'(A>=',(means_b2-(stdevs_b2*2)),")*",
'(A<' ,(means_b2-(stdevs_b2)),")*2")
))
}, error=function(e){})
#################### CREATE A PSEUDO COLOR TABLE
cols <- col2rgb(c("white","beige","yellow","orange","red","darkred","palegreen","green2","forestgreen",'darkgreen'))
pct <- data.frame(cbind(c(0:9),
cols[1,],
cols[2,],
cols[3,]
))
write.table(pct,paste0(results_directory,"color_table.txt"),row.names = F,col.names = F,quote = F)
################################################################################
## Add pseudo color table to result
system(sprintf("(echo %s) | oft-addpct.py %s %s",
paste0(results_directory,"color_table.txt"),
paste0(results_directory,"tmp_bfast_",title,'_threshold.tif'),
paste0(results_directory,"tmp_colortable.tif")
))
################################################################################
## Compress final result
system(sprintf("gdal_translate -ot byte -co COMPRESS=LZW %s %s",
paste0(results_directory,"tmp_colortable.tif"),
outputfile
))
#################### CREATE A VRT OUTPUT
system(sprintf("gdalbuildvrt %s %s",
paste0(data_dir,"/bfast_",title,"_threshold.vrt"),
paste0(data_dir,
"/*/results/",
"bfast_","*",title,"/",
"bfast_","*",title,"_threshold.tif")
))
system(sprintf(paste0("rm -f ",results_directory,"tmp*.tif")))
system(sprintf(paste0("rm -f ", chunks_directory,"tmp*.tif")))
}else{ #################### End of OVERALL loop and Beginning of SEQUENTIAL loop
cores <- detectCores()
bfmSpatialSq <- function(start, end, timeStack, ...){
lapply(start:end,
function(year){
############# LOOP THROUGH CHUNKS
outfl <- paste0(results_directory,"bfast_",title,"_year",year,'.tif')
############# LOOP THROUGH CHUNKS
for(chunk in 1:nrow(sizes)){
chunk_stack_year_name <- paste0(chunks_directory,"tmp_chunk_",chunk,"_year",year,"_stack.tif")
chunk_bfast_year_name <- paste0(chunks_directory,"chunk_",chunk,"_year",year,"_bfast_",title, ".tif")
if(!file.exists(chunk_bfast_year_name)){
chunk_start_time <- format(Sys.time(), "%Y/%m/%d %H:%M:%S")
# ############# CLIP THE STACK TO THE CHUNK EXTENT
# system(sprintf("gdal_translate -srcwin %s %s %s %s %s %s",
# sizes[chunk,"start_x"],
# sizes[chunk,"start_y"],
# sizes[chunk,"size_x"],
# sizes[chunk,"size_y"],
# timeStack,
# chunk_stack_year_name))
chunkerize(timeStack, chunk_stack_year_name,
sizes$xmin[chunk],
sizes$ymin[chunk],
sizes$xmax[chunk],
sizes$ymax[chunk])
chunk_stack_year <- brick(chunk_stack_year_name)
print(paste0(" Processing year: ",year))
system(sprintf("rm -f %s",chunk_bfast_year_name))
chunk_log_year_filename <- paste0(chunks_directory,"log_chunk_",chunk,"_year_",year,"_params_",title, ".log")
loop_process <- function(){bfm_year <- bfmSpatial(chunk_stack_year,
start = c(year, 1),
monend = c(year + 1, 1),
dates = dates,
formula = as.Formula(formula),
order = order,
history = history,
filename = chunk_bfast_year_name,
type = type,
mc.cores = cores)
############# WRITE THE TIME TO A LOG
write(paste0("Chunk: ",
chunk,
" Start time: ",chunk_start_time,
" End time: ",format(Sys.time(),"%Y/%m/%d %H:%M:%S")
),
chunk_log_year_filename,
append=TRUE)
bfm_year
}
tryCatch({
print(paste0(" Processing chunk ",chunk," of ",nrow(sizes)))
loop_process()
system(sprintf(paste0("rm -f ", chunks_directory,"tmp_chunk*.tif")))
},error=function(e){
print(paste0(" Failed process on chunk ",chunk))
fail_log_year_filename <- paste0(chunks_directory,"fail_chunk_",chunk,"_year_",year,"_params_",title, ".log")
write(paste0("Failed Chunk: ",
chunk,
" Start time: ",chunk_start_time,
" End time: ",format(Sys.time(),"%Y/%m/%d %H:%M:%S")),
fail_log_year_filename,
append=TRUE)
})
} ### END OF CHUNK EXISTS
} ### END OF THE CHUNK LOOP
############# COMBINE ALL THE CHUNKS
system(sprintf("gdal_merge.py -co COMPRESS=LZW -o %s %s",
outfl,
paste0(chunks_directory,"chunk_*","_year",year,"_bfast_",title, ".tif")
))
outfl
} ### END OF THE YEAR FUNCTION
) ### END OF THE YEAR LAPPLY
} ### END OF THE BFASTSQ FUNCTION
############# RUN BFAST IN SEQUENTIAL
time <- system.time(
bfmSpatialSq(
monitoring_year_beg,
monitoring_year_end,
stack_name
))
## Post-processing ####
# output the maximum of the breakpoint dates for all sequential outputs
numfiles<- length(list.files(results_directory,pattern='.tif'))
outputfile <- paste0(results_directory,"bfast_",title,'_breakpoints.tif')
system(sprintf("gdal_calc.py %s --co=COMPRESS=LZW --type=Float32 --overwrite --outfile=%s --calc=\"%s\"",
paste(paste0('-',LETTERS[1:numfiles],' ',list.files(results_directory,pattern='.tif',full.names = T), ' --',LETTERS[1:numfiles],'_band=1'),collapse=" "),
outputfile,
if(LETTERS[numfiles]>3){
nummax<- numfiles-2
paste(
paste(replicate(nummax, "maximum"),'(', collapse = ""),
paste('maximum(',LETTERS[1:numfiles][1],',',LETTERS[1:numfiles][2],')'),
paste( ',',LETTERS[3:numfiles],')', collapse = "")
, collapse = "")
}else if(LETTERS[numfiles]==2){
print(paste('maximum(',LETTERS[1:numfiles][1],',',LETTERS[1:numfiles][2],')'))
}else if(LETTERS[numfiles]==1){
print(paste('maximum(',LETTERS[1:numfiles][1],')'))
}
))
write(paste0("This process started on ",
start_time," and ended on ",
format(Sys.time(),"%Y/%m/%d %H:%M:%S"),
" for a total time of ",
time[[3]]/60," minutes"),
log_filename, append=TRUE)
} ## End of SEQUENTIAL loop
} ### End of STACKNAME loop
} ### End of DATA AVAILABLE loop
print(paste0('The result is ',basename(result)))
overall_time <- Sys.time() - overall_start_time
print(overall_time)
print('Done with processing')
sink()
} ### END OF TILE LOOP
| /www/scripts/bfast_run_chunks_warp.R | no_license | DrRoad/bfastspatial | R | false | false | 25,422 | r | ## modified from https://github.com/rosca002/FAO_Bfast_workshop/tree/master/tutorial
# #################################################################################################################################
############### # load packages
source("www/scripts/load_BFAST_packages.R",echo = TRUE)
options(echo=TRUE)
args <- commandArgs(TRUE)
print(args[1])
data_dir <- args[1]
load(paste0(data_dir,"/my_work_space.RData"))
overall_start_time <- Sys.time()
chunkerize <- function(infile, outfile, xmin, ymin, xmax, ymax) {
gdalwarp(srcfile=infile, dstfile=outfile,
t_srs='+proj=longlat +datum=WGS84 +no_defs',
te=c(xmin, ymin, xmax, ymax), multi=TRUE,
output_Raster=TRUE,
overwrite = TRUE,
ot="UInt16")
}
############### LOOP THROUGH EACH TILE
for(the_dir in tiles){
print(paste0('BFAST running for ',the_dir))
############### # check if the processing text exists, create a new blank processing text file
the_path_dir <- paste0(data_dir, the_dir, '/')
the_path_dir
############### Write the console outputs
sink(progress_file)
print("Preparing data...")
print(paste0('Running time series analysis for: ',basename(the_path_dir)))
############### Get the list of stacks inside the tile
main_stack_name <- paste0(the_path_dir,'/','stack.vrt')
sub_stacks <- list.files(the_path_dir,pattern="_stack.vrt")
list_stack <- list()
if(length(sub_stacks) > 1){
list_stack <- paste0(the_path_dir,'/',sub_stacks)
}else{
if(file.exists(main_stack_name)){
list_stack <- main_stack_name}}
list_stack <- paste0(the_path_dir,'/',sub_stacks)
################# CREATE THE MAIN OUTPUT DIRECTORY
output_directory <- paste0(the_path_dir,"results/")
dir.create(output_directory, recursive = T,showWarnings = F)
############### Write the console outputs
print(paste0('The results will be found in the folder: ' ,paste0(output_directory)))
print(paste0('Number of GEE blocks: ',length(sub_stacks)))
print(paste0('Number of cores: ',detectCores()))
############### LOOP THROUGH THE DIFFERENT STACKS
for(stack_name in list_stack){
stack_basename <- substr(basename(stack_name),1,nchar(basename(stack_name))-4)
############### Write the console output
print(paste0(' Processing block: ',stack_basename))
################# READ THE DATES FROM THE CSV FILE
dates <- unlist(read.csv(paste0(the_path_dir,'/','dates.csv'),header = FALSE))
################# CREATE LOCAL STACK RESULTS DIRECTORY
results_directory <- file.path(output_directory,paste0("bfast_",
stack_basename,"_",title,'/'))
dir.create(results_directory,recursive = T,showWarnings = F)
chunks_directory <- file.path(results_directory,paste0("chunks",'/'))
dir.create(chunks_directory,recursive = T,showWarnings = F)
log_filename <- file.path(results_directory,paste0(format(Sys.time(), "%Y-%m-%d-%H-%M-%S"),"_bfast_", title, ".log"))
start_time <- format(Sys.time(), "%Y/%m/%d %H:%M:%S")
nf_start_time <- Sys.time()
################# MULTIPLY THE INPUT BY THE FNF MASK IF NEEDED
tryCatch({
if(mask == "FNF Mask" ){
print(' Using the Forest/Nonforest mask')
mask_file_path <- mask_file_path
data_input_msk <- paste0(the_path_dir,'/','mask_FNF.tif')
data_input_vrt_nd <- paste0(the_path_dir,'/','stack_ND.tif')
data_input_tif_msk <- paste0(the_path_dir,'/','stack_FNF.tif')
#################### ALIGN
input <- mask_file_path
ouput <- data_input_msk
mask <- stack_name
system(sprintf("gdalwarp -ot UInt16 -co COMPRESS=LZW -t_srs \"%s\" -te %s %s %s %s -tr %s %s %s %s -overwrite",
proj4string(raster(mask)),
extent(raster(mask))@xmin,
extent(raster(mask))@ymin,
extent(raster(mask))@xmax,
extent(raster(mask))@ymax,
res(raster(mask))[1],
res(raster(mask))[2],
input,
ouput
))
#################### SET NODATA TO NONE IN THE TIME SERIES STACK
system(sprintf("gdal_translate -a_nodata none -co COMPRESS=LZW %s %s",
mask,
data_input_vrt_nd
))
#################### MULTIPLY THE TIME SERIES STACK BY MASK
system(sprintf("gdal_calc.py -A %s -B %s --allBands=A --overwrite --co COMPRESS=LZW --outfile=%s --calc=\"%s\"",
data_input_vrt_nd,
data_input_msk,
data_input_tif_msk,
paste0("A*B")
))
stack_name <- data_input_tif_msk
}
}, error=function(e){})
# ############# READ THE STACK METADATA WITHOUT WARNINGS
# info <- GDALinfo(stack_name,silent = TRUE)
#
# ############# GET STACK SIZE
# stack_x <- as.numeric(info[2])
# stack_y <- as.numeric(info[1])
#
# nx <- floor(stack_x / chunk_size)
# ny <- floor(stack_y / chunk_size)
#
# sizes_x <- c(rep(chunk_size,nx),stack_x - nx*chunk_size)
# sizes_y <- c(rep(chunk_size,ny),stack_y - ny*chunk_size)
#
# start_x <- cumsum(c(0,rep(chunk_size,nx)))
# start_y <- cumsum(c(0,rep(chunk_size,ny)))
#
# ############# CALCULATE CHUNKS SIZES
# sizes <- cbind(expand.grid(sizes_x,sizes_y),
# expand.grid(start_x,start_y))
#
# names(sizes) <- c("size_x","size_y","start_x","start_y")
############# READ THE STACK METADATA WITHOUT WARNINGS
info <- GDALinfo(stack_name,silent = TRUE)
############# GET STACK SIZE
stack_x <- as.numeric(info[2])
stack_y <- as.numeric(info[1])
orig_x <- as.numeric(info[4])
orig_y <- as.numeric(info[5])
res_x <- as.numeric(info[6])
res_y <- as.numeric(info[7])
nx <- floor(stack_x / chunk_size)
ny <- floor(stack_y / chunk_size)
xmin <- orig_x + cumsum(c(0,rep(chunk_size,nx)*res_x))
ymin <- orig_y + cumsum(c(0,rep(chunk_size,ny)*res_y))
xmax <- orig_x + res_x * stack_x
ymax <- orig_y + res_y * stack_y
rest_x <- nx - stack_x / chunk_size
rest_y <- ny - stack_y / chunk_size
if(rest_x == 0){
xmin <- orig_x + cumsum(c(0,rep(chunk_size,nx-1)*res_x))
xmax <- orig_x + c(cumsum(rep(chunk_size,nx-1)*res_x),res_x * stack_x)
}
if(rest_y == 0){
ymin <- orig_y + cumsum(c(0,rep(chunk_size,ny-1)*res_y))
ymax <- orig_y + c(cumsum(rep(chunk_size,ny-1)*res_y),res_y * stack_y)
}
if (nx >= 1 & rest_x != 0) {
xmax <- orig_x + c(cumsum(rep(chunk_size,nx)*res_x),res_x * stack_x)
}
if (ny >= 1 & rest_y != 0 ) {
ymax <- orig_y + c(cumsum(rep(chunk_size,ny)*res_y),res_y * stack_y)
}
stack_proj <- info[12]
############# CALCULATE CHUNKS SIZES
sizes <- data.frame(xmin=numeric(),
ymin=numeric(),
xmax=numeric(),
ymax=numeric()
)
for (k in 1:length(xmin)){
for (i in 1:length(ymin)){
cc <- c(xmin[k],ymin[i],xmax[k],ymax[i])
sizes <- rbind(sizes, cc)
}
}
sizes <- na.omit(sizes)
names(sizes) <- c("xmin","ymin","xmax","ymax")
print(paste0(' Number of chunks to process: ',nrow(sizes)))
print(sizes)
############# NAME OF RESULT FOR THE TILE
result <- file.path(results_directory, paste0("bfast_",title, ".tif"))
############# IF RESULT EXISTS, SKIP
if(!file.exists(result)){
############# PROCESS IF OVERALL APPROACH CHOSEN
if(mode == "Overall"){
############# LOOP THROUGH CHUNKS
for(chunk in 1:nrow(sizes)){
chunk_stack_name <- paste0(chunks_directory,"tmp_chunk_",chunk,"_stack.tif")
chunk_bfast_name <- paste0(chunks_directory,"chunk_",chunk,"_bfast_",title, ".tif")
if(!file.exists(chunk_bfast_name)){
chunk_start_time <- format(Sys.time(), "%Y/%m/%d %H:%M:%S")
print(paste0(" Processed : ",ceiling((chunk-1)/nrow(sizes)*100),"%"))
# ############# CREATE THE CHUNK
# system(sprintf("gdal_translate -srcwin %s %s %s %s -co COMPRESS=LZW %s %s",
# sizes[chunk,"start_x"],
# sizes[chunk,"start_y"],
# sizes[chunk,"size_x"],
# sizes[chunk,"size_y"],
# stack_name,
# chunk_stack_name))
chunkerize(stack_name, chunk_stack_name,
sizes$xmin[chunk],
sizes$ymin[chunk],
sizes$xmax[chunk],
sizes$ymax[chunk])
chunk_stack <- brick(chunk_stack_name)
############# DELETE THE RESULT IF IT EXISTS
system(sprintf("rm -f %s",chunk_bfast_name))
############# GENERATE A LOG FILENAME
chunk_log_filename <- paste0(chunks_directory,"log_chunk_",chunk,"_params_",title, ".log")
############# CREATE A FUNCTION TO IMPLEMENT BFAST
loop_process <- function(){
cores <- detectCores()
chunktime <- system.time(bfmSpatial(chunk_stack,
start = c(monitoring_year_beg[1], 1),
monend = c(monitoring_year_end[1], 1),
dates = dates,
formula = as.Formula(formula),
order = order,
history = history,
filename = chunk_bfast_name,
type = type,
returnLayers = returnLayers,
mc.cores = cores))
############# WRITE THE TIME TO A LOG
write(paste0("Chunk: ",
chunk,
" Start time: ",chunk_start_time,
" End time: ",format(Sys.time(),"%Y/%m/%d %H:%M:%S"),
" for a total time of ", chunktime[[3]]/60," minutes"),
chunk_log_filename,
append=TRUE)
chunktime
}
tryCatch({
print(paste0(" Processing chunk ",chunk," of ",nrow(sizes)))
loop_process()
system(sprintf(paste0("rm -f ", chunks_directory,"tmp_chunk*.tif")))
},error=function(e){
print(paste0(" Failed chunk ",chunk))
fail_log_filename <- paste0(chunks_directory,"fail_chunk_",chunk,"_params_",title, ".log")
write(paste0("Failed Chunk: ",
chunk,
" Start time: ",chunk_start_time,
" End time: ",format(Sys.time(),"%Y/%m/%d %H:%M:%S"),
" Reason for failure ",
e),
fail_log_filename,
append=TRUE)
})
} ### END OF TEST EXISTS CHUNK
print(paste0(" Finished chunk ",chunk))
} ### END OF THE CHUNK LOOP
############# COMBINE ALL THE CHUNKS
system(sprintf("gdal_merge.py -co COMPRESS=LZW -o %s %s",
result,
paste0(chunks_directory, paste0("chunk_*","_bfast_",title, ".tif"))
))
total_time <- Sys.time()-nf_start_time
print(total_time)
############# WRITE TIMING INFO TO A LOG
write(paste0("This process started on ", start_time,
" and ended on ",format(Sys.time(),"%Y/%m/%d %H:%M:%S"),
" Total time for the tile: ",total_time ,
" Number of CPUs: ",detectCores(),
" Number of chunks: ",nrow(sizes)),
log_filename,
append=TRUE)
############# NAME OF THE THRESHOLDED OUTPUT
outputfile <- paste0(results_directory,"bfast_",title,'_threshold.tif')
## Post-processing ####
# calculate the mean, standard deviation, minimum and maximum of the magnitude band
# reclass the image into 10 classes
# 0 = no data
# 1 = no change (mean +/- 1 standard deviation)
# 2 = negative small magnitude change (mean - 2 standard deviations)
# 3 = negative medium magnitude change (mean - 3 standard deviations)
# 4 = negative large magnitude change (mean - 4 standard deviations)
# 5 = negative very large magnitude change (mean - 4+ standard deviations)
# 6 = postive small magnitude change (mean + 2 standard deviations)
# 7 = postive medium magnitude change (mean + 3 standard deviations)
# 8 = postive large magnitude change (mean + 4 standard deviations)
# 9 = postive very large magnitude change (mean + 4+ standard deviations)
tryCatch({
means_b2 <- cellStats( raster(result,band=2) , "mean")
mins_b2 <- cellStats( raster(result,band=2) , "min")
maxs_b2 <- cellStats( raster(result,band=2) , "max")
stdevs_b2 <- cellStats( raster(result,band=2) , "sd")
system(sprintf("gdal_calc.py -A %s --A_band=2 --co=COMPRESS=LZW --type=Byte --overwrite --outfile=%s --calc=\"%s\"",
result,
paste0(results_directory,"tmp_bfast_",title,'_threshold.tif'),
paste0('(A<=',(maxs_b2),")*",
'(A>' ,(means_b2+(stdevs_b2*4)),")*9+",
'(A<=',(means_b2+(stdevs_b2*4)),")*",
'(A>' ,(means_b2+(stdevs_b2*3)),")*8+",
'(A<=',(means_b2+(stdevs_b2*3)),")*",
'(A>' ,(means_b2+(stdevs_b2*2)),")*7+",
'(A<=',(means_b2+(stdevs_b2*2)),")*",
'(A>' ,(means_b2+(stdevs_b2)),")*6+",
'(A<=',(means_b2+(stdevs_b2)),")*",
'(A>' ,(means_b2-(stdevs_b2)),")*1+",
'(A>=',(mins_b2),")*",
'(A<' ,(means_b2-(stdevs_b2*4)),")*5+",
'(A>=',(means_b2-(stdevs_b2*4)),")*",
'(A<' ,(means_b2-(stdevs_b2*3)),")*4+",
'(A>=',(means_b2-(stdevs_b2*3)),")*",
'(A<' ,(means_b2-(stdevs_b2*2)),")*3+",
'(A>=',(means_b2-(stdevs_b2*2)),")*",
'(A<' ,(means_b2-(stdevs_b2)),")*2")
))
}, error=function(e){})
#################### CREATE A PSEUDO COLOR TABLE
cols <- col2rgb(c("white","beige","yellow","orange","red","darkred","palegreen","green2","forestgreen",'darkgreen'))
pct <- data.frame(cbind(c(0:9),
cols[1,],
cols[2,],
cols[3,]
))
write.table(pct,paste0(results_directory,"color_table.txt"),row.names = F,col.names = F,quote = F)
################################################################################
## Add pseudo color table to result
system(sprintf("(echo %s) | oft-addpct.py %s %s",
paste0(results_directory,"color_table.txt"),
paste0(results_directory,"tmp_bfast_",title,'_threshold.tif'),
paste0(results_directory,"tmp_colortable.tif")
))
################################################################################
## Compress final result
system(sprintf("gdal_translate -ot byte -co COMPRESS=LZW %s %s",
paste0(results_directory,"tmp_colortable.tif"),
outputfile
))
#################### CREATE A VRT OUTPUT
system(sprintf("gdalbuildvrt %s %s",
paste0(data_dir,"/bfast_",title,"_threshold.vrt"),
paste0(data_dir,
"/*/results/",
"bfast_","*",title,"/",
"bfast_","*",title,"_threshold.tif")
))
system(sprintf(paste0("rm -f ",results_directory,"tmp*.tif")))
system(sprintf(paste0("rm -f ", chunks_directory,"tmp*.tif")))
}else{ #################### End of OVERALL loop and Beginning of SEQUENTIAL loop
cores <- detectCores()
bfmSpatialSq <- function(start, end, timeStack, ...){
lapply(start:end,
function(year){
############# LOOP THROUGH CHUNKS
outfl <- paste0(results_directory,"bfast_",title,"_year",year,'.tif')
############# LOOP THROUGH CHUNKS
for(chunk in 1:nrow(sizes)){
chunk_stack_year_name <- paste0(chunks_directory,"tmp_chunk_",chunk,"_year",year,"_stack.tif")
chunk_bfast_year_name <- paste0(chunks_directory,"chunk_",chunk,"_year",year,"_bfast_",title, ".tif")
if(!file.exists(chunk_bfast_year_name)){
chunk_start_time <- format(Sys.time(), "%Y/%m/%d %H:%M:%S")
# ############# CLIP THE STACK TO THE CHUNK EXTENT
# system(sprintf("gdal_translate -srcwin %s %s %s %s %s %s",
# sizes[chunk,"start_x"],
# sizes[chunk,"start_y"],
# sizes[chunk,"size_x"],
# sizes[chunk,"size_y"],
# timeStack,
# chunk_stack_year_name))
chunkerize(timeStack, chunk_stack_year_name,
sizes$xmin[chunk],
sizes$ymin[chunk],
sizes$xmax[chunk],
sizes$ymax[chunk])
chunk_stack_year <- brick(chunk_stack_year_name)
print(paste0(" Processing year: ",year))
system(sprintf("rm -f %s",chunk_bfast_year_name))
chunk_log_year_filename <- paste0(chunks_directory,"log_chunk_",chunk,"_year_",year,"_params_",title, ".log")
loop_process <- function(){bfm_year <- bfmSpatial(chunk_stack_year,
start = c(year, 1),
monend = c(year + 1, 1),
dates = dates,
formula = as.Formula(formula),
order = order,
history = history,
filename = chunk_bfast_year_name,
type = type,
mc.cores = cores)
############# WRITE THE TIME TO A LOG
write(paste0("Chunk: ",
chunk,
" Start time: ",chunk_start_time,
" End time: ",format(Sys.time(),"%Y/%m/%d %H:%M:%S")
),
chunk_log_year_filename,
append=TRUE)
bfm_year
}
tryCatch({
print(paste0(" Processing chunk ",chunk," of ",nrow(sizes)))
loop_process()
system(sprintf(paste0("rm -f ", chunks_directory,"tmp_chunk*.tif")))
},error=function(e){
print(paste0(" Failed process on chunk ",chunk))
fail_log_year_filename <- paste0(chunks_directory,"fail_chunk_",chunk,"_year_",year,"_params_",title, ".log")
write(paste0("Failed Chunk: ",
chunk,
" Start time: ",chunk_start_time,
" End time: ",format(Sys.time(),"%Y/%m/%d %H:%M:%S")),
fail_log_year_filename,
append=TRUE)
})
} ### END OF CHUNK EXISTS
} ### END OF THE CHUNK LOOP
############# COMBINE ALL THE CHUNKS
system(sprintf("gdal_merge.py -co COMPRESS=LZW -o %s %s",
outfl,
paste0(chunks_directory,"chunk_*","_year",year,"_bfast_",title, ".tif")
))
outfl
} ### END OF THE YEAR FUNCTION
) ### END OF THE YEAR LAPPLY
} ### END OF THE BFASTSQ FUNCTION
############# RUN BFAST IN SEQUENTIAL
time <- system.time(
bfmSpatialSq(
monitoring_year_beg,
monitoring_year_end,
stack_name
))
## Post-processing ####
# output the maximum of the breakpoint dates for all sequential outputs
numfiles<- length(list.files(results_directory,pattern='.tif'))
outputfile <- paste0(results_directory,"bfast_",title,'_breakpoints.tif')
system(sprintf("gdal_calc.py %s --co=COMPRESS=LZW --type=Float32 --overwrite --outfile=%s --calc=\"%s\"",
paste(paste0('-',LETTERS[1:numfiles],' ',list.files(results_directory,pattern='.tif',full.names = T), ' --',LETTERS[1:numfiles],'_band=1'),collapse=" "),
outputfile,
if(LETTERS[numfiles]>3){
nummax<- numfiles-2
paste(
paste(replicate(nummax, "maximum"),'(', collapse = ""),
paste('maximum(',LETTERS[1:numfiles][1],',',LETTERS[1:numfiles][2],')'),
paste( ',',LETTERS[3:numfiles],')', collapse = "")
, collapse = "")
}else if(LETTERS[numfiles]==2){
print(paste('maximum(',LETTERS[1:numfiles][1],',',LETTERS[1:numfiles][2],')'))
}else if(LETTERS[numfiles]==1){
print(paste('maximum(',LETTERS[1:numfiles][1],')'))
}
))
write(paste0("This process started on ",
start_time," and ended on ",
format(Sys.time(),"%Y/%m/%d %H:%M:%S"),
" for a total time of ",
time[[3]]/60," minutes"),
log_filename, append=TRUE)
} ## End of SEQUENTIAL loop
} ### End of STACKNAME loop
} ### End of DATA AVAILABLE loop
print(paste0('The result is ',basename(result)))
overall_time <- Sys.time() - overall_start_time
print(overall_time)
print('Done with processing')
sink()
} ### END OF TILE LOOP
|
library(ggplot2)
library(readr)
install.packages("animation")
require(animation)
air <- read.csv("Airplane_Crashes_and_Fatalities_Since_1908.csv", header = T)
airfrance = air[air$Operator == 'Air France',] #select certain only
american = air[air$Operator == 'American Airlines',] #select certain only
aeroflot = air[air$Operator == 'Aeroflot',] #select certain only
usmil = air[air$Operator == 'Military - U.S. Air Force',] #select certain only
indian = air[air$Operator == 'Indian Airlines',] #select certain only
#exclude rows with missing data
exc.Numbers = (is.na((airfrance$Fatalities) | is.na(airfrance$Aboard)))
airfrance = airfrance[!exc.Numbers, ]
airfrance$Date = as.character((airfrance$Date)) #format the date
airfrance$Date = as.Date(airfrance$Date, "%m%d%Y") #Format the date
airfrance = airfrance[order(airfrance$Date), ] #reorder the data by date
#Exclude rows with missing data,
exc.Numbers = (is.na(american$Fatalities) | is.na(american$Aboard))
american = american[!exc.Numbers, ]
american$Date = as.character(american$Date) #Format the date
american$Date = as.Date(american$Date, "%m/%d/%Y") #Format the date
american = american[order(american$Date),] #Reorder the data by date
#Exclude rows with missing data,
exc.Numbers = (is.na(usmil$Fatalities) | is.na(usmil$Aboard))
usmil = usmil[!exc.Numbers, ]
usmil$Date = as.character(usmil$Date) #Format the date
usmil$Date = as.Date(usmil$Date, "%m/%d/%Y") #Format the date
usmil = usmil[order(usmil$Date),] #Reorder the data by date
#Exclude rows with missing data,
exc.Numbers = (is.na(aeroflot$Fatalities) | is.na(aeroflot$Aboard))
aeroflot = aeroflot[!exc.Numbers, ]
aeroflot$Date = as.character(aeroflot$Date) #Format the date
aeroflot$Date = as.Date(aeroflot$Date, "%m/%d/%Y") #Format the date
aeroflot = aeroflot[order(aeroflot$Date),] #Reorder the data by date
#Exclude rows with missing data,
exc.Numbers = (is.na(indian$Fatalities) | is.na(indian$Aboard))
indian = indian[!exc.Numbers, ]
indian$Date = as.character(indian$Date) #Format the date
indian$Date = as.Date(indian$Date, "%m/%d/%Y") #Format the date
indian = indian[order(indian$Date),] #Reorder the data by date
#Set up run totals...
runsum_airfrance = cumsum(airfrance$Fatalities)
runsum_airfrance2 = cumsum(airfrance$Aboard)
runsum_airfrance = data.frame(cbind(runsum_airfrance, airfrance['Date']), runsum_airfrance2)
runsum_airfrance$Operator = 'Air France'
colnames(runsum_airfrance) = c('Fatalities_runsum', 'Date', 'Aboard_runsum', 'Operator')
runsum_american = cumsum(american$Fatalities)
runsum_american2 = cumsum(american$Aboard)
runsum_american = data.frame(cbind(runsum_american, american['Date']), runsum_american2)
runsum_american$Operator = 'American Airlines'
colnames(runsum_american) = c('Fatalities_runsum', 'Date', 'Aboard_runsum', 'Operator')
runsum_aeroflot = cumsum(aeroflot$Fatalities)
runsum_aeroflot2 = cumsum(aeroflot$Aboard)
runsum_aeroflot = data.frame(cbind(runsum_aeroflot, aeroflot['Date']), runsum_aeroflot2)
runsum_aeroflot$Operator = 'Aeroflot'
colnames(runsum_aeroflot) = c('Fatalities_runsum', 'Date', 'Aboard_runsum', 'Operator')
runsum_usmil = cumsum(usmil$Fatalities)
runsum_usmil2 = cumsum(usmil$Aboard)
runsum_usmil = data.frame(cbind(runsum_usmil, usmil['Date']), runsum_usmil2)
runsum_usmil$Operator = 'US Military'
colnames(runsum_usmil) = c('Fatalities_runsum', 'Date', 'Aboard_runsum', 'Operator')
runsum_indian = cumsum(indian$Fatalities)
runsum_indian2 = cumsum(indian$Aboard)
runsum_indian = data.frame(cbind(runsum_indian, indian['Date']), runsum_indian2)
runsum_indian$Operator = 'Indian Airlines'
colnames(runsum_indian) = c('Fatalities_runsum', 'Date', 'Aboard_runsum', 'Operator')
#Combine and tweak the data to be plotted,
x = rbind(runsum_airfrance, runsum_american, runsum_indian, runsum_usmil, runsum_aeroflot)
y = data.frame(Fatalities_runsum = c(1,1,1,1), Date = c("1933-10-31","1933-10-31",
"1933-10-31","1933-10-31"), Aboard_runsum = c(1,1,1,1), Operator =
c("Indian Airlines", "US Military", "Aeroflot", "American Airlines"))
x = rbind(x,y)
x = x[order(x$Date),]
#Set animation time,
ani.options(interval = 0.1, nmax = 100)
i=1
#Loop through the rows and save the gif...
saveGIF(while (i<dim(x)[1]) {
print(m <- qplot(x$Date[1:i],
x$Aboard_runsum[1:i],
size = (x$Fatalities_runsum[1:i]),
alpha=x$Aboard_runsum[1:i],
main = "Crash Data for 5 Airlines",
col = x$Operator[1:i],
xlab = "Date",
ylab = "No. of People Aboad (Cumulative)", na.rm=TRUE) + theme_bw() + ylim(0,10000) +
theme(panel.background = element_rect(fill = "black"),
panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = c(0.25, 0.65)) +
scale_size_continuous(limits = c(0,8000), name = 'No. of Fatalities (Cumulative)') +
scale_color_discrete(name = "Airline Operator") +
scale_alpha(guide = 'none'))
i=i+1
}, movie.name = "crash_ani.gif", convert = "convert", ani.width = 500,
ani.height = 500) | /crash_animation.R | no_license | chauhanprateek89/Airplane-Crashes | R | false | false | 5,209 | r | library(ggplot2)
library(readr)
install.packages("animation")
require(animation)
air <- read.csv("Airplane_Crashes_and_Fatalities_Since_1908.csv", header = T)
airfrance = air[air$Operator == 'Air France',] #select certain only
american = air[air$Operator == 'American Airlines',] #select certain only
aeroflot = air[air$Operator == 'Aeroflot',] #select certain only
usmil = air[air$Operator == 'Military - U.S. Air Force',] #select certain only
indian = air[air$Operator == 'Indian Airlines',] #select certain only
#exclude rows with missing data
exc.Numbers = (is.na((airfrance$Fatalities) | is.na(airfrance$Aboard)))
airfrance = airfrance[!exc.Numbers, ]
airfrance$Date = as.character((airfrance$Date)) #format the date
airfrance$Date = as.Date(airfrance$Date, "%m%d%Y") #Format the date
airfrance = airfrance[order(airfrance$Date), ] #reorder the data by date
#Exclude rows with missing data,
exc.Numbers = (is.na(american$Fatalities) | is.na(american$Aboard))
american = american[!exc.Numbers, ]
american$Date = as.character(american$Date) #Format the date
american$Date = as.Date(american$Date, "%m/%d/%Y") #Format the date
american = american[order(american$Date),] #Reorder the data by date
#Exclude rows with missing data,
exc.Numbers = (is.na(usmil$Fatalities) | is.na(usmil$Aboard))
usmil = usmil[!exc.Numbers, ]
usmil$Date = as.character(usmil$Date) #Format the date
usmil$Date = as.Date(usmil$Date, "%m/%d/%Y") #Format the date
usmil = usmil[order(usmil$Date),] #Reorder the data by date
#Exclude rows with missing data,
exc.Numbers = (is.na(aeroflot$Fatalities) | is.na(aeroflot$Aboard))
aeroflot = aeroflot[!exc.Numbers, ]
aeroflot$Date = as.character(aeroflot$Date) #Format the date
aeroflot$Date = as.Date(aeroflot$Date, "%m/%d/%Y") #Format the date
aeroflot = aeroflot[order(aeroflot$Date),] #Reorder the data by date
#Exclude rows with missing data,
exc.Numbers = (is.na(indian$Fatalities) | is.na(indian$Aboard))
indian = indian[!exc.Numbers, ]
indian$Date = as.character(indian$Date) #Format the date
indian$Date = as.Date(indian$Date, "%m/%d/%Y") #Format the date
indian = indian[order(indian$Date),] #Reorder the data by date
#Set up run totals...
runsum_airfrance = cumsum(airfrance$Fatalities)
runsum_airfrance2 = cumsum(airfrance$Aboard)
runsum_airfrance = data.frame(cbind(runsum_airfrance, airfrance['Date']), runsum_airfrance2)
runsum_airfrance$Operator = 'Air France'
colnames(runsum_airfrance) = c('Fatalities_runsum', 'Date', 'Aboard_runsum', 'Operator')
runsum_american = cumsum(american$Fatalities)
runsum_american2 = cumsum(american$Aboard)
runsum_american = data.frame(cbind(runsum_american, american['Date']), runsum_american2)
runsum_american$Operator = 'American Airlines'
colnames(runsum_american) = c('Fatalities_runsum', 'Date', 'Aboard_runsum', 'Operator')
runsum_aeroflot = cumsum(aeroflot$Fatalities)
runsum_aeroflot2 = cumsum(aeroflot$Aboard)
runsum_aeroflot = data.frame(cbind(runsum_aeroflot, aeroflot['Date']), runsum_aeroflot2)
runsum_aeroflot$Operator = 'Aeroflot'
colnames(runsum_aeroflot) = c('Fatalities_runsum', 'Date', 'Aboard_runsum', 'Operator')
runsum_usmil = cumsum(usmil$Fatalities)
runsum_usmil2 = cumsum(usmil$Aboard)
runsum_usmil = data.frame(cbind(runsum_usmil, usmil['Date']), runsum_usmil2)
runsum_usmil$Operator = 'US Military'
colnames(runsum_usmil) = c('Fatalities_runsum', 'Date', 'Aboard_runsum', 'Operator')
runsum_indian = cumsum(indian$Fatalities)
runsum_indian2 = cumsum(indian$Aboard)
runsum_indian = data.frame(cbind(runsum_indian, indian['Date']), runsum_indian2)
runsum_indian$Operator = 'Indian Airlines'
colnames(runsum_indian) = c('Fatalities_runsum', 'Date', 'Aboard_runsum', 'Operator')
#Combine and tweak the data to be plotted,
x = rbind(runsum_airfrance, runsum_american, runsum_indian, runsum_usmil, runsum_aeroflot)
y = data.frame(Fatalities_runsum = c(1,1,1,1), Date = c("1933-10-31","1933-10-31",
"1933-10-31","1933-10-31"), Aboard_runsum = c(1,1,1,1), Operator =
c("Indian Airlines", "US Military", "Aeroflot", "American Airlines"))
x = rbind(x,y)
x = x[order(x$Date),]
#Set animation time,
ani.options(interval = 0.1, nmax = 100)
i=1
#Loop through the rows and save the gif...
saveGIF(while (i<dim(x)[1]) {
print(m <- qplot(x$Date[1:i],
x$Aboard_runsum[1:i],
size = (x$Fatalities_runsum[1:i]),
alpha=x$Aboard_runsum[1:i],
main = "Crash Data for 5 Airlines",
col = x$Operator[1:i],
xlab = "Date",
ylab = "No. of People Aboad (Cumulative)", na.rm=TRUE) + theme_bw() + ylim(0,10000) +
theme(panel.background = element_rect(fill = "black"),
panel.grid.major = element_blank(), panel.grid.minor = element_blank(), legend.position = c(0.25, 0.65)) +
scale_size_continuous(limits = c(0,8000), name = 'No. of Fatalities (Cumulative)') +
scale_color_discrete(name = "Airline Operator") +
scale_alpha(guide = 'none'))
i=i+1
}, movie.name = "crash_ani.gif", convert = "convert", ani.width = 500,
ani.height = 500) |
\alias{pangoFontGetGlyphExtents}
\name{pangoFontGetGlyphExtents}
\title{pangoFontGetGlyphExtents}
\description{Gets the logical and ink extents of a glyph within a font. The
coordinate system for each rectangle has its origin at the
base line and horizontal origin of the character with increasing
coordinates extending to the right and down. The functions \code{pangoAscent()},
\code{pangoDescent()}, \code{pangoLbearing()}, and \code{pangoRbearing()} can be used to convert
from the extents rectangle to more traditional font metrics. The units
of the rectangles are in 1/PANGO\_SCALE of a device unit.}
\usage{pangoFontGetGlyphExtents(object, glyph)}
\arguments{
\item{\code{object}}{[\code{\link{PangoFont}}] a \code{\link{PangoFont}}}
\item{\code{glyph}}{[numeric] the glyph index}
}
\value{
A list containing the following elements:
\item{\code{ink.rect}}{[\code{\link{PangoRectangle}}] rectangle used to store the extents of the glyph as drawn
or \code{NULL} to indicate that the result is not needed.}
\item{\code{logical.rect}}{[\code{\link{PangoRectangle}}] rectangle used to store the logical extents of the glyph
or \code{NULL} to indicate that the result is not needed.}
}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
| /man/pangoFontGetGlyphExtents.Rd | no_license | cran/RGtk2.10 | R | false | false | 1,283 | rd | \alias{pangoFontGetGlyphExtents}
\name{pangoFontGetGlyphExtents}
\title{pangoFontGetGlyphExtents}
\description{Gets the logical and ink extents of a glyph within a font. The
coordinate system for each rectangle has its origin at the
base line and horizontal origin of the character with increasing
coordinates extending to the right and down. The functions \code{pangoAscent()},
\code{pangoDescent()}, \code{pangoLbearing()}, and \code{pangoRbearing()} can be used to convert
from the extents rectangle to more traditional font metrics. The units
of the rectangles are in 1/PANGO\_SCALE of a device unit.}
\usage{pangoFontGetGlyphExtents(object, glyph)}
\arguments{
\item{\code{object}}{[\code{\link{PangoFont}}] a \code{\link{PangoFont}}}
\item{\code{glyph}}{[numeric] the glyph index}
}
\value{
A list containing the following elements:
\item{\code{ink.rect}}{[\code{\link{PangoRectangle}}] rectangle used to store the extents of the glyph as drawn
or \code{NULL} to indicate that the result is not needed.}
\item{\code{logical.rect}}{[\code{\link{PangoRectangle}}] rectangle used to store the logical extents of the glyph
or \code{NULL} to indicate that the result is not needed.}
}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
########################################################################
# solve_lambda_gamma.R
# Function to solve for unobserved \Lambda and \Gamma, given observed A and G.
# License: MIT
# ""
# Jesse Tweedle
# , 2016
########################################################################
solve_gamma <- function(R,N,args) {
beta <- args$beta
C <- args$C
A <- args$A
G <- args$G
ir <- args$ir
eta <- args$eta
epsilon <- args$epsilon
Ti <- args$Ti
Tr <- args$Tr
s <- args$s
z <- args$z
tol <- 1e-5
# plant prices
p_i0 <- p_i1 <- .sparseDiagonal(n=N,x=1)
GAM0 <- GAM1 <- G
obj = tol + 1
obj_0 <- obj + 1
counter <- 0
# while the difference between iterations is greater than tolerance
while (obj > tol) {
if (obj > obj_0 | (log(obj_0) - log(obj)) < 0.005) {
counter <- counter+1
if (counter>3) {
break
}
} else {
counter <- 0
}
obj_0 <- obj
# save last iteration of parameters
p_i0 <- p_i1
GAM0 <- GAM1
# calculate new p_mi ( = unit intermediate cost)
m2 <- Ti %*% p_i0
m2@x <- m2@x^(1-eta)
mxx <- rowSums(GAM0 * m2)
p_mi <- mxx^(1/(1-eta)) #^((1-beta)/(1-sigma))
# calculate new p_i1
p_i1 <- (C * p_mi^(1-beta) / z) %>% to_sdiag()
temp.3 <- (mxx / (1-beta)) %>% to_sdiag()
temp.4 <- Ti %*% p_i1
temp.4@x <- temp.4@x^(eta-1)
GAM1 <- temp.3 %*% (G * temp.4)
# solve for w, normalize p and p?
obj <- (diag(p_i1) - diag(p_i0))^2 %>% sum() %>% sqrt()
print(obj)
}
p_r0 <- p_r1 <- .sparseDiagonal(n=R,x=1)
LAM0 <- LAM1 <- A
obj = tol + 1
obj_0 <- obj + 1
counter <- 0
# while the difference between iterations is greater than tolerance
while (obj > tol) {
if (obj > obj_0 | (log(obj_0) - log(obj)) < 0.005) {
counter <- counter+1
if (counter>3) {
break
}
} else {
counter <- 0
}
obj_0 <- obj
# save last iteration of parameters
p_r0 <- p_r1
LAM0 <- LAM1
m1 <- Tr %*% p_i0
m1@x <- m1@x^(1-epsilon)
p_r1 <- rowSums(LAM0 * m1)^(1/(1-epsilon)) %>% to_sdiag()
temp.1 <- p_r1
temp.1@x <- temp.1@x^(1-epsilon)
temp.2 <- Tr %*% p_i1
temp.2@x <- temp.2@x^(epsilon-1)
LAM1 <- temp.1 %*% (A * temp.2)
obj <- (diag(p_r1) - diag(p_r0))^2 %>% sum() %>% sqrt()
print(obj)
}
# return the region-plant and plant-plant demand shares matrices
return(list(lambda=LAM1,gamma=GAM1,p_r=p_r1,p_i=p_i1))
}
| /R/solve_gamma.R | permissive | vishalbelsare/networkasymmetry | R | false | false | 2,534 | r | ########################################################################
# solve_lambda_gamma.R
# Function to solve for unobserved \Lambda and \Gamma, given observed A and G.
# License: MIT
# ""
# Jesse Tweedle
# , 2016
########################################################################
solve_gamma <- function(R,N,args) {
beta <- args$beta
C <- args$C
A <- args$A
G <- args$G
ir <- args$ir
eta <- args$eta
epsilon <- args$epsilon
Ti <- args$Ti
Tr <- args$Tr
s <- args$s
z <- args$z
tol <- 1e-5
# plant prices
p_i0 <- p_i1 <- .sparseDiagonal(n=N,x=1)
GAM0 <- GAM1 <- G
obj = tol + 1
obj_0 <- obj + 1
counter <- 0
# while the difference between iterations is greater than tolerance
while (obj > tol) {
if (obj > obj_0 | (log(obj_0) - log(obj)) < 0.005) {
counter <- counter+1
if (counter>3) {
break
}
} else {
counter <- 0
}
obj_0 <- obj
# save last iteration of parameters
p_i0 <- p_i1
GAM0 <- GAM1
# calculate new p_mi ( = unit intermediate cost)
m2 <- Ti %*% p_i0
m2@x <- m2@x^(1-eta)
mxx <- rowSums(GAM0 * m2)
p_mi <- mxx^(1/(1-eta)) #^((1-beta)/(1-sigma))
# calculate new p_i1
p_i1 <- (C * p_mi^(1-beta) / z) %>% to_sdiag()
temp.3 <- (mxx / (1-beta)) %>% to_sdiag()
temp.4 <- Ti %*% p_i1
temp.4@x <- temp.4@x^(eta-1)
GAM1 <- temp.3 %*% (G * temp.4)
# solve for w, normalize p and p?
obj <- (diag(p_i1) - diag(p_i0))^2 %>% sum() %>% sqrt()
print(obj)
}
p_r0 <- p_r1 <- .sparseDiagonal(n=R,x=1)
LAM0 <- LAM1 <- A
obj = tol + 1
obj_0 <- obj + 1
counter <- 0
# while the difference between iterations is greater than tolerance
while (obj > tol) {
if (obj > obj_0 | (log(obj_0) - log(obj)) < 0.005) {
counter <- counter+1
if (counter>3) {
break
}
} else {
counter <- 0
}
obj_0 <- obj
# save last iteration of parameters
p_r0 <- p_r1
LAM0 <- LAM1
m1 <- Tr %*% p_i0
m1@x <- m1@x^(1-epsilon)
p_r1 <- rowSums(LAM0 * m1)^(1/(1-epsilon)) %>% to_sdiag()
temp.1 <- p_r1
temp.1@x <- temp.1@x^(1-epsilon)
temp.2 <- Tr %*% p_i1
temp.2@x <- temp.2@x^(epsilon-1)
LAM1 <- temp.1 %*% (A * temp.2)
obj <- (diag(p_r1) - diag(p_r0))^2 %>% sum() %>% sqrt()
print(obj)
}
# return the region-plant and plant-plant demand shares matrices
return(list(lambda=LAM1,gamma=GAM1,p_r=p_r1,p_i=p_i1))
}
|
addSamples.salbm <- function( obj, NBootstraps = 0, bBS = 1,
nseeds = c( 5,9), nseeds2 = c(-4,-5),
returnJP = TRUE, returnSamples = FALSE, ...)
{
# --------------------------------------------------------------------------------
Narm <- obj$Narm
ns <- length(nseeds)
ns2 <- length(nseeds2)
if ( ns < Narm ) nseeds <- c( nseeds, nseeds [ns ] + 1:(Narm-ns ))
if ( ns2 < Narm ) nseeds2 <- c( nseeds2, nseeds2[ns2] - 1:(Narm-ns2))
eBS <- bBS + NBootstraps - 1
Ret <- obj
if ( is.null(Ret$Upd) ) {
Ret$Nupd <- 1
Ret$Upd <- list( Upd1 = list( nseeds = nseeds, nseeds2 = nseeds2, bBS = bBS, eBS = eBS, NBootstraps = NBootstraps ))
} else {
Nupd <- Ret$Nupd + 1
nm <- paste0("Upd",Nupd)
Ret$Nupd <- Nupd
Ret$Upd[[nm]] <- list( nseeds = nseeds, nseeds2 = nseeds2, bBS = bBS, eBS = eBS, NBootstraps = NBootstraps )
}
data <- obj$data
mna <- obj$mna
mxa <- obj$mxa
for ( trt in 1:Narm ) {
sd <- nseeds2[trt]
tdat <- data[[trt]]
nr <- nrow(tdat)
tjp <- obj[[ paste0("JP",trt) ]]
K <- obj$K
alphas <- obj$alphas
ntree <- obj$ntree
if ( !returnJP ) Ret[[ paste0("JP",trt) ]] <- NULL
if ( NBootstraps > 0 ) {
set.seed( nseeds[trt] )
llout <- lapply( bBS:eBS, oneSamp, jps=tjp, nsamp = nr, K = K,
sd = sd, ntree = ntree, alphas = alphas, trt = trt, returnSamples = returnSamples )
SampR <- lapply(llout,function(x) { return(x$SampR ) } )
SampRL <- lapply(llout,function(x) { return(x$SampRL ) } )
Sampwts <- lapply(llout,function(x) { return(x$wtsSamp) } )
SampR <- do.call(rbind,SampR)
SampRL <- do.call(rbind,SampRL)
Sampwts <- do.call(rbind,Sampwts)
nms <- c( paste0("Samp",trt,"R"), paste0("Samp",trt,"RL"),
paste0("Samp",trt,"wts"), paste0("Samp",trt))
if (!is.null(obj[[nms[1]]])) SampR <- rbind( obj[[nms[1]]], SampR)
if (!is.null(obj[[nms[2]]])) SampRL <- rbind( obj[[nms[2]]], SampRL)
if (!is.null(obj[[nms[3]]])) Sampwts <- rbind( obj[[nms[3]]], Sampwts)
Ret[[nms[1]]] <- SampR
Ret[[nms[2]]] <- SampRL
Ret[[nms[3]]] <- Sampwts
if ( returnSamples ) {
Samp <- lapply(llout,function(x) { return(x$Samp ) } )
Samp <- do.call(rbind,Samp)
Ret[[nms[4]]] <- Samp
}
}
}
# --------------------------------------------------------------------------------
class(Ret) <- "salbm"
return(Ret)
}
| /R/addSamples.salbm.R | no_license | cran/salbm | R | false | false | 2,717 | r | addSamples.salbm <- function( obj, NBootstraps = 0, bBS = 1,
nseeds = c( 5,9), nseeds2 = c(-4,-5),
returnJP = TRUE, returnSamples = FALSE, ...)
{
# --------------------------------------------------------------------------------
Narm <- obj$Narm
ns <- length(nseeds)
ns2 <- length(nseeds2)
if ( ns < Narm ) nseeds <- c( nseeds, nseeds [ns ] + 1:(Narm-ns ))
if ( ns2 < Narm ) nseeds2 <- c( nseeds2, nseeds2[ns2] - 1:(Narm-ns2))
eBS <- bBS + NBootstraps - 1
Ret <- obj
if ( is.null(Ret$Upd) ) {
Ret$Nupd <- 1
Ret$Upd <- list( Upd1 = list( nseeds = nseeds, nseeds2 = nseeds2, bBS = bBS, eBS = eBS, NBootstraps = NBootstraps ))
} else {
Nupd <- Ret$Nupd + 1
nm <- paste0("Upd",Nupd)
Ret$Nupd <- Nupd
Ret$Upd[[nm]] <- list( nseeds = nseeds, nseeds2 = nseeds2, bBS = bBS, eBS = eBS, NBootstraps = NBootstraps )
}
data <- obj$data
mna <- obj$mna
mxa <- obj$mxa
for ( trt in 1:Narm ) {
sd <- nseeds2[trt]
tdat <- data[[trt]]
nr <- nrow(tdat)
tjp <- obj[[ paste0("JP",trt) ]]
K <- obj$K
alphas <- obj$alphas
ntree <- obj$ntree
if ( !returnJP ) Ret[[ paste0("JP",trt) ]] <- NULL
if ( NBootstraps > 0 ) {
set.seed( nseeds[trt] )
llout <- lapply( bBS:eBS, oneSamp, jps=tjp, nsamp = nr, K = K,
sd = sd, ntree = ntree, alphas = alphas, trt = trt, returnSamples = returnSamples )
SampR <- lapply(llout,function(x) { return(x$SampR ) } )
SampRL <- lapply(llout,function(x) { return(x$SampRL ) } )
Sampwts <- lapply(llout,function(x) { return(x$wtsSamp) } )
SampR <- do.call(rbind,SampR)
SampRL <- do.call(rbind,SampRL)
Sampwts <- do.call(rbind,Sampwts)
nms <- c( paste0("Samp",trt,"R"), paste0("Samp",trt,"RL"),
paste0("Samp",trt,"wts"), paste0("Samp",trt))
if (!is.null(obj[[nms[1]]])) SampR <- rbind( obj[[nms[1]]], SampR)
if (!is.null(obj[[nms[2]]])) SampRL <- rbind( obj[[nms[2]]], SampRL)
if (!is.null(obj[[nms[3]]])) Sampwts <- rbind( obj[[nms[3]]], Sampwts)
Ret[[nms[1]]] <- SampR
Ret[[nms[2]]] <- SampRL
Ret[[nms[3]]] <- Sampwts
if ( returnSamples ) {
Samp <- lapply(llout,function(x) { return(x$Samp ) } )
Samp <- do.call(rbind,Samp)
Ret[[nms[4]]] <- Samp
}
}
}
# --------------------------------------------------------------------------------
class(Ret) <- "salbm"
return(Ret)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.