content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
#' multiplot
#'
#' This function allows you to combine multiple ggplot2 plots into one.
#' Code from http://www.cookbook-r.com/Graphs/Multiple_graphs_on_one_page_(ggplot2)/
#' @param cols Number of columns in layout.
#' @param layout A matrix specifying the layout. If present, 'cols' is ignored. If the layout is something like matrix(c(1,2,3,3), nrow=2, byrow=TRUE), then plot 1 will go in the upper left, 2 will go in the upper right, and 3 will go all the way across the bottom.
#' @export
#' @examples
#' multiplot(p1, p2, p3, p4, cols = 2)
multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) {
library(grid)
# Make a list from the ... arguments and plotlist
plots <- c(list(...), plotlist)
numPlots = length(plots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout)) {
# Make the panel
# ncol: Number of columns of plots
# nrow: Number of rows needed, calculated from # of cols
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots==1) {
print(plots[[1]])
} else {
# Set up the page
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
}
| /R/multiplot.r | no_license | ianhussey/timesavers | R | false | false | 1,631 | r | #' multiplot
#'
#' This function allows you to combine multiple ggplot2 plots into one.
#' Code from http://www.cookbook-r.com/Graphs/Multiple_graphs_on_one_page_(ggplot2)/
#' @param cols Number of columns in layout.
#' @param layout A matrix specifying the layout. If present, 'cols' is ignored. If the layout is something like matrix(c(1,2,3,3), nrow=2, byrow=TRUE), then plot 1 will go in the upper left, 2 will go in the upper right, and 3 will go all the way across the bottom.
#' @export
#' @examples
#' multiplot(p1, p2, p3, p4, cols = 2)
multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) {
library(grid)
# Make a list from the ... arguments and plotlist
plots <- c(list(...), plotlist)
numPlots = length(plots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout)) {
# Make the panel
# ncol: Number of columns of plots
# nrow: Number of rows needed, calculated from # of cols
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots==1) {
print(plots[[1]])
} else {
# Set up the page
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cut_to_classes.R
\name{cut_to_classes}
\alias{cut_to_classes}
\title{Cuts the Values Column into Classes and Polishes the Labels}
\usage{
cut_to_classes(
x,
n = 5,
style = "equal",
manual = FALSE,
manual_breaks = NULL,
decimals = 0,
nodata_label = "No data"
)
}
\arguments{
\item{x}{A numeric vector, eg. \code{values} variable in data returned by
\code{\link[=get_eurostat]{get_eurostat()}}.}
\item{n}{A numeric. number of classes/categories}
\item{style}{chosen style: one of "fixed", "sd", "equal", "pretty", "quantile", "kmeans", "hclust", "bclust", "fisher", "jenks", "dpih", "headtails", or "maximum"}
\item{manual}{Logical. If manual breaks are being used}
\item{manual_breaks}{Numeric vector with manual threshold values}
\item{decimals}{Number of decimals to include with labels}
\item{nodata_label}{String. Text label for NA category.}
}
\value{
a factor.
}
\description{
Categorises a numeric vector into automatic or manually defined
categories and polishes the labels ready for used in mapping with \code{ggplot2}.
}
\examples{
\dontshow{if (check_access_to_data()) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
\donttest{
# lp <- get_eurostat("nama_aux_lp")
lp <- get_eurostat("nama_10_lp_ulc")
lp$class <- cut_to_classes(lp$values, n = 5, style = "equal", decimals = 1)
}
\dontshow{\}) # examplesIf}
}
\seealso{
\code{\link[classInt:classIntervals]{classInt::classIntervals()}}
Other helpers:
\code{\link{dic_order}()},
\code{\link{eurotime2date2}()},
\code{\link{eurotime2date}()},
\code{\link{eurotime2num2}()},
\code{\link{eurotime2num}()},
\code{\link{harmonize_country_code}()},
\code{\link{label_eurostat2}()},
\code{\link{label_eurostat}()}
}
\author{
Markus Kainu \href{mailto:markuskainu@gmail.com}{markuskainu@gmail.com}
}
\concept{helpers}
| /man/cut_to_classes.Rd | permissive | rOpenGov/eurostat | R | false | true | 1,892 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cut_to_classes.R
\name{cut_to_classes}
\alias{cut_to_classes}
\title{Cuts the Values Column into Classes and Polishes the Labels}
\usage{
cut_to_classes(
x,
n = 5,
style = "equal",
manual = FALSE,
manual_breaks = NULL,
decimals = 0,
nodata_label = "No data"
)
}
\arguments{
\item{x}{A numeric vector, eg. \code{values} variable in data returned by
\code{\link[=get_eurostat]{get_eurostat()}}.}
\item{n}{A numeric. number of classes/categories}
\item{style}{chosen style: one of "fixed", "sd", "equal", "pretty", "quantile", "kmeans", "hclust", "bclust", "fisher", "jenks", "dpih", "headtails", or "maximum"}
\item{manual}{Logical. If manual breaks are being used}
\item{manual_breaks}{Numeric vector with manual threshold values}
\item{decimals}{Number of decimals to include with labels}
\item{nodata_label}{String. Text label for NA category.}
}
\value{
a factor.
}
\description{
Categorises a numeric vector into automatic or manually defined
categories and polishes the labels ready for used in mapping with \code{ggplot2}.
}
\examples{
\dontshow{if (check_access_to_data()) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
\donttest{
# lp <- get_eurostat("nama_aux_lp")
lp <- get_eurostat("nama_10_lp_ulc")
lp$class <- cut_to_classes(lp$values, n = 5, style = "equal", decimals = 1)
}
\dontshow{\}) # examplesIf}
}
\seealso{
\code{\link[classInt:classIntervals]{classInt::classIntervals()}}
Other helpers:
\code{\link{dic_order}()},
\code{\link{eurotime2date2}()},
\code{\link{eurotime2date}()},
\code{\link{eurotime2num2}()},
\code{\link{eurotime2num}()},
\code{\link{harmonize_country_code}()},
\code{\link{label_eurostat2}()},
\code{\link{label_eurostat}()}
}
\author{
Markus Kainu \href{mailto:markuskainu@gmail.com}{markuskainu@gmail.com}
}
\concept{helpers}
|
################################################################
# Ramesh Subedi
# Implement the following clustering algorithms:
# 1. K-means
# 2. Expectation Maximization
#
# In addition, implement the following feature dimensionality reduction algorithms
# 1. Any one feature selection algorithm (decision tree, forward selection, backward elimination,etc.)
# 2. PCA
# 3. ICA
# 4. Randomized Projections
#
#
# Tasks:
# 1. Run the clustering algorithms on your datasets and describe your observations (with plots).
# 2. Apply the dimensionality reduction algorithms on your datasets and describe your observations
# (with plots).
# 3. Run the clustering algorithms again, this time after applying dimensionality reduction. Describe
# the difference compared to previous experimentation (with plots).
# 4. Run your neural network learner from assignment 3 on the data after dimensionality reduction
# (from task 2). Explain and plot your observations (error rates, etc.)
# 5. Use the clustering results from task 1 as the new features and apply neural network learner on
# this new data consisting of only clustering results as features and class label as the output.
# Again, plot and explain your results.
###############################################################
######################################
library(data.table)
library(magrittr)
library(plyr)
library(dtplyr)
library(sandwich) # for White correction
library(lmtest) # for more advanced hypothesis testing tools
#library(tseries) # time series package
#library(DBI)
#library(RSQLite)
library(tidyverse) # data manipulation
library(cluster) # clustering algorithms
library(factoextra) # clustering algorithms & visualization
library(broom) # for tidy() function
#library(TSA)
#library(forecast)
library(vars)
#library(fpp) # for VAR forecast
library(UsingR)
#library(margins)
#library(plm) # for pooled OLS (ordinary least squares)
library(car) # for scatterplot()
#library(aod) # for probit link
library(gradDescent) # for Gradient Descent calculation
library(glmnet)
library(e1071) # for Support Vector Machine, Titanic data, etc.
library(tree) # for tree to work on Decisiion Trees
library(gbm) # for gbm (gradient boosting model)
library(adabag) # for bagging
library(rpart) #
library(party) # Recursive partitioning
library(partykit) # Pruning tree from party
library(neuralnet) # for neural net
library(caret) # for KNN
library(ROCR) # for KNN as well
library(pROC) # for KNN as well
library(boot) # for cross-validation
library(ggplot2)
library(class)
library(scales)
library(factoextra)
library(FactoInvestigate)
library(FactoMineR)
library(flexclust) # to quantify performance of kmeans
library(NbClust)
library(mclust) # for Expectation Maximization Clustering
library(ica) # for ICA (independent component analysis)
library(fastICA) # for fast ICA (independent component analysis)
library(vegan) # for hclust (hierarchical clustering) of some type
library(ggbiplot)
library(fpc) # for plotcluster
library(RandPro) # for randomized projection
# rm(list=ls()) #drop all variables
# start with a clean slate
rm(list=ls(all=TRUE))
data <- read.table("~/mlData/student-mat.csv",sep=";",header=TRUE)
data <- data %>% dplyr::select(-G1,-G2) # Drop G2 and G2 variables from data
names(data)
data$school <- as.numeric(data$school)
data$school
str(data$school)
data$sex <- as.numeric(data$sex)
data$address<-as.numeric(data$address)
data$famsize <- as.numeric(data$famsize)
data$Pstatus<- as.numeric(data$Pstatus)
data$Fjob<-as.numeric(data$Fjob)
data$Mjob<- as.numeric(data$Mjob)
data$reason<- as.numeric(data$reason)
data$guardian<- as.numeric(data$guardian)
data$schoolsup<- as.numeric(data$schoolsup)
data$famsup<- as.numeric(data$famsup)
data$paid<- as.numeric(data$paid)
data$activities<- as.numeric(data$activities)
data$nursery<- as.numeric(data$nursery)
data$higher<- as.numeric(data$higher)
data$internet<- as.numeric(data$internet)
data$romantic<- as.numeric(data$romantic)
Data <-data # Data is unscaled
names(Data)
Data$G3
# Normalize data (scaling)
normalize <- function(x){
return ((x-min(x))/(max(x)-min(x)))
}
names(data)
data$school <- normalize(data$school)
data$sex <- normalize(data$sex)
data$age <- normalize(data$age)
data$address <- normalize(data$address)
data$famsize <- normalize(data$famsize)
data$Pstatus <- normalize(data$Pstatus)
data$Medu <- normalize(data$Medu)
data$Fedu <- normalize(data$Fedu)
data$Mjob <- normalize(data$Mjob)
data$Fjob <- normalize(data$Fjob)
data$reason <- normalize(data$reason)
data$guardian <- normalize(data$guardian)
data$traveltime <- normalize(data$traveltime)
data$studytime <- normalize(data$studytime)
data$failures <- normalize(data$failures)
data$schoolsup <- normalize(data$schoolsup)
data$famsup <- normalize(data$famsup)
data$paid <- normalize(data$paid)
data$activities <- normalize(data$activities)
data$nursery <- normalize(data$nursery)
data$higher <- normalize(data$higher)
data$internet <- normalize(data$internet)
data$romantic <- normalize(data$romantic)
data$famrel <- normalize(data$famrel)
data$freetime <- normalize(data$freetime)
data$goout <- normalize(data$goout)
data$Dalc <- normalize(data$Dalc)
data$Walc <- normalize(data$Walc)
data$health <- normalize(data$health)
data$absences <- normalize(data$absences)
data$G3 <- normalize(data$G3)
# make G3 in data a binary varialbe (0 or 1) since GLM needs this.
Grade <- data$G3
Grade
MeanVal<- mean(data$G3)
MeanVal
MyGrade <- ifelse(Grade>=MeanVal,1,0)
MyGrade
# Add variable MyGrade to existing data
newData <- cbind(data,MyGrade)
# remove (drop) G3 from newData:
newData <- newData %>% dplyr::select(-G3)
data <- newData
############# TASK1 ########################################
############ K-Means from here #############################
set.seed(20)
Clusters <- kmeans(data, centers=10, nstart=50)
Clusters
# So how well did the K-means clustering uncover the actual structure of the data contained in the MyGrade variable? A cross-tabulation of MyGrade and cluster membership is given by
confMat <- table(data$MyGrade, Clusters$cluster)
confMat
# We can quantify the agreement between MyGrade and cluster, using an adjusted Rank index provided by the flexclust package.
randIndex(confMat) # 0.1038752 for 10 clusters
# The adjusted Rand index provides a measure of the agreement between two partitions, adjusted for chance. It ranges from -1 (no agreement) to 1 (perfect agreement).
set.seed(20)
ClustersFinal <- kmeans(data, 8, nstart=25)
confMatFinal <- table(data$MyGrade, ClustersFinal$cluster)
randIndex(confMatFinal) # 0.1685834 for 8 clusters (optimal cluster # from scree plot)
dataKmeans <- as.data.frame(ClustersFinal$centers) # This is the output data from kmeans with 8 clusters
#### standalone code to plot scree plot for kmeans ###############
wssplot <- function(data, nc=20, seed=1234){ # nc for number of centers
wss <- (nrow(data)-1)*sum(apply(data,2,var))
for (i in 2:nc){
set.seed(seed)
wss[i] <- sum(kmeans(data, centers=i,nstart=1)$withinss)}
# For nstart>4 we did not see elbow, hence accepted R's default nstart=1 to observe elbow.
plot(1:nc, wss, type="b", xlab="Number of Clusters", ylab="Variance explained")}
wssplot(data) # best elbow plot, cluster number=8
abline(v = 8, lty =2) # draw a veritcal dashed line from 8th cluster
############# kmeans scree plot done ###############################
#### Expectation Maximization ##################
dataDefault <- mclustBIC(data)
dataCustomize <- mclustBIC(data, G = 1:20, x = dataDefault)
plot(dataCustomize, G = 3:20, legendArgs = list(x = "topright"),xlab='Number of clusters') # best value at 7th cluster
abline(v = 7, lty =2)
############### TASK2 ##############################################
##### 1. For Decision Tree use hclust (hierarchical clustering) ###
hcluster <- hclust(dist(data), method='complete')
hclustTrim <- cutree(hcluster,8)
plot(hclustTrim)
Eucl_EX<-vegdist(data,"euclidean")
Eucl<-hclust(Eucl_EX,method="complete") # The mothod is 'complete' linkage
# plot for report
plot(Eucl,main='Complete Linkage Dendrogram',xlab='')
chord_EX<-vegdist(decostand(data,"norm"),"euclidean")
chord_EX
chord<-hclust(chord_EX,method="complete")
plot(chord)
# Cut tree so that it has 8 clusters
hclust.clusters <- cutree(chord, k = 8)
plot(hclust.clusters)
# Compare cluster membership to actual MyGrade
table(hclust.clusters, MyGrade)
sum(apply(table(hclust.clusters, MyGrade), 1, min)) # 146
# To compare with kmeans
abc<-kmeans(scale(data), centers = 8)
table(abc$cluster,MyGrade)
# Table for Report:
table(hclust.clusters, MyGrade) # from hclust
table(abc$cluster,MyGrade) # from kmeans
#################### 2. PCA using prcomp, and PCA for Investigate() utility ###########
pca <- prcomp(data,scale=T)
plot(pca)
plot(pca$sdev^2,xlab="Principal Components",ylab="Variance Explained")
plot(cumsum(pca$sdev^2),xlab="Principal Components",ylab="Cumulative Variance Explained")
# A few plots for report from this:
PCA1 <- PCA(data,ncp=10,graph=T, scale.=T)
Investigate(PCA1)
######## output data from PCA ##############
dataPCA <- as.data.frame(pca$x) # This is the output data from prcomp analysis
head(dataPCA[,1:2])
plot(PC1~PC2, data=dataPCA, cex = 0.5, lty = "solid")
text(PC1~PC2, data=dataPCA, labels=rownames(data),cex=.8)
##############################################
biplot(pca,scale=0) # stick with scale=0
pcavar<- pca$sdev^2
pve<-pcavar/sum(pcavar)
ggbiplot(pca,circle=T) # shows only dots
colMeans(data) # Mean of each variable
apply(data,2,sd) # Standard deviation of each variable
# Plot for report
par(mfrow=c(2,1))
plot(pve,xlab='Principal Components', ylab='Proportion of variance explained',ylim=c(0.01,0.12)) # this is like elbow plot (elbow at 7)
abline(v = 7, lty =2)
plot(cumsum(pve),xlab='Principal Components', ylab='Cumulative Proportion of variance explained',ylim=c(0.01,1)) # this is like elbow plot (elbow at 7)
abline(v = 7, lty =2)
abline(h = 0.43, lty =2)
par(mfrow=c(1,1))
# The total of 42.961% variance explained by 7 principal components
############### ICA ############
icaModel <- fastICA(data, 8, alg.typ = "deflation", fun = "logcosh", alpha = 1,
method = "R", row.norm = FALSE, maxit = 200,
tol = 0.0001, verbose = F)
# plot for report
par(mfrow = c(1, 3))
plot(icaModel$X, main = "Original Component",xlim=c(-2,3),ylim=c(-2.5,2.5))
plot(icaModel$X %*% icaModel$K, main = "PCA components",xlim=c(-2,3),ylim=c(-2.5,2.5))
plot(icaModel$S, main = "ICA components",xlim=c(-2,3),ylim=c(-2.5,2.5))
par(mfrow = c(1, 1))
names(icaModel)
summary(icaModel)
icaModel$X # pre-processed data matrix
icaModel$K # pre-whitening matrix that projects data onto the first n.comp principal components.
icaModel$W # estimated un-mixing matrix (no.factors by no.signals) - no. means number of
icaModel$A # estimated mixing matrix (no.signals by no.factors) - no. means number of
icaModel$S # estimated source matrix (The column vectors of estimated independent components (no.obs by no.factors)) - here no.factors means number of principal components
# check whitening:
# check correlations are zero
cor(icaModel$K) # correlations are not zero
# check diagonals are 1 in covariance
cov(icaModel$K) # diagonals are not 1
cor(icaModel$X)
cor(icaModel$W)
#cor(icaModel$A)
# table for report
cor(icaModel$S) # no correlations
cov(icaModel$S) # diagonal elements are 1
############## Randomized Projections ##################
# https://cran.r-project.org/web/packages/RandPro/RandPro.pdf
set.seed(101)
index.data <- sample(1:nrow(data),round(0.70*nrow(data)))
Train <- data[index.data,] # 70%
Test <- data[-index.data,] # 30%
trainl<-as.factor(Train$MyGrade) # need to declare as factor.
testl<-as.factor(Test$MyGrade) # need to declare as factor.
train<-Train[,1:30]
test<-Test[,1:30]
randomProjection<-classify(train, test, trainl, testl)
# Confusion Matrix and Statistics
# Reference
# Prediction 0 1
# 0 26 12
# 1 28 53
#
# Accuracy : 0.6639
# 95% CI : (0.5715, 0.7478)
# No Information Rate : 0.5462
# P-Value [Acc > NIR] : 0.006031
#
# Kappa : 0.3045
# Mcnemar's Test P-Value : 0.017706
#
# Sensitivity : 0.4815
# Specificity : 0.8154
# Pos Pred Value : 0.6842
# Neg Pred Value : 0.6543
# Prevalence : 0.4538
# Detection Rate : 0.2185
# Detection Prevalence : 0.3193
# Balanced Accuracy : 0.6484
#
# 'Positive' Class : 0
############### TASK3 ##############################################
dataPCA <- as.data.frame(pca$x) # This is the output data from prcomp analysis
head(dataPCA)
set.seed(20)
Clusters <- kmeans(dataPCA, centers=10, nstart=50)
Clusters
# So how well did the K-means clustering uncover the actual structure of the data contained in the MyGrade variable? A cross-tabulation of MyGrade and cluster membership is given by
confMat <- table(dataPCA$PC31, Clusters$cluster)
confMat
# We can quantify the agreement between MyGrade and cluster, using an adjusted Rank index provided by the flexclust package.
randIndex(confMat) # 0.1038752 for 10 clusters
# The adjusted Rand index provides a measure of the agreement between two partitions, adjusted for chance. It ranges from -1 (no agreement) to 1 (perfect agreement).
set.seed(20)
ClustersFinal <- kmeans(dataPCA, 8, nstart=25)
confMatFinal <- table(dataPCA$PC31, ClustersFinal$cluster)
randIndex(confMatFinal) # 0.1685834 for 8 clusters (optimal cluster # from scree plot)
##### Before ####
#### standalone code to plot scree plot for kmeans ###############
wssplot <- function(data, nc=20, seed=1234){
wss <- (nrow(data)-1)*sum(apply(data,2,var))
for (i in 2:nc){
set.seed(seed)
wss[i] <- sum(kmeans(data, centers=i,nstart=1)$withinss)}
plot(1:nc, wss, type="b", xlab="Number of Clusters", ylab="Variance explained", main="Before Dimension Reduction")}
wssplot(data)
abline(v = 8, lty =2)
############# kmeans scree plot done ###############################
### Now ############
#### standalone code to plot scree plot for kmeans ###############
wssplot1 <- function(dataPCA, nc=20, seed=1234){
wss1 <- (nrow(dataPCA)-1)*sum(apply(dataPCA,2,var))
for (i in 2:nc){
set.seed(seed)
wss1[i] <- sum(kmeans(dataPCA, centers=i,nstart=1)$withinss)}
plot(1:nc, wss1, type="b", xlab="Number of Clusters", ylab="Variance explained", main="After Dimension Reduction")}
wssplot1(dataPCA)
abline(v = 8, lty =2)
############# kmeans scree plot done ###############################
# plot for report
par(mfrow=c(2,1))
wssplot(data)
abline(v = 8, lty =2)
wssplot1(dataPCA)
abline(v = 8, lty =2)
par(mfrow=c(1,1))
#### Expectation Maximization ##################
dataDefault1 <- mclustBIC(dataPCA)
dataCustomize1 <- mclustBIC(dataPCA, G = 1:20, x = dataDefault1)
plot(dataCustomize1, G = 3:20, legendArgs = list(x = "topright"),xlab='Number of clusters') # best value at 7th cluster
abline(v = 7, lty =2)
############### TASK4 ##############################################
dataPCA <- as.data.frame(pca$x) # This is the output data from prcomp analysis
# split: .85, .8, .75, .7, .65, .6, .55, .5, .45
set.seed(200)
index.data <- sample(1:nrow(dataPCA),round(0.45*nrow(dataPCA)))
trainScaled <- dataPCA[index.data,] # 70%
testScaled <- dataPCA[-index.data,] # 30%
NN <- names(trainScaled)
ff <- as.formula(paste("PC31 ~", paste(NN[!NN %in% "PC31"], collapse = " + ")))
ff
# For learning curve, run the neural network for multiple times by changing the percentage of
# train/test split and find MSE for each step. Plot that MSE in a graph.
# Calculating MSE.
NeuralNet <- neuralnet(ff,data=trainScaled,hidden=c(3,2), threshold = 0.05, act.fct='logistic')
NN_Train_SSE <- sum((NeuralNet$net.result[[1]] - trainScaled[,31])^2)
MSE_train <- NN_Train_SSE/nrow(trainScaled)
MSE_train # Mean Squared Error 0.1350781249
nrow(trainScaled) # 276
listMSEtr=c(0.1482775817, 0.1171002659, 0.1394318619, 0.1350781249, 0.1082534565, 0.1131214381, 0.08753255714, 0.1022628185, 0.08153341087) # train data
listTr=c(336, 316, 296, 276, 257, 237, 217, 198, 178) # train data
NeuralNetTest <- neuralnet(ff,data=testScaled,hidden=c(3,2), threshold = 0.05, act.fct='logistic')
NN_Test_SSE <- sum((NeuralNetTest$net.result[[1]]-testScaled[,31])^2)
MSE_test<-NN_Test_SSE/nrow(testScaled)
MSE_test
nrow(testScaled)
listMSEte=c(0.04407435405, 0.04463376316, 0.02589934399, 0.05542529601, 0.1145059221, 0.0902390286, 0.09282047811, 0.1031201844, 0.09279801747) # test data
listTe = c(59, 79, 99, 119, 138, 158, 178,197, 217) # test data
# plot for report
par(mfrow=c(2,1))
plot (listMSEte~listTe, type = "b", xlab = "Test data size", ylab = "Test Data MSE", main = "Learning curve for test dataset (School Data)",ylim=c(0,0.15))
plot (listMSEtr~listTr, type = "b", xlab = "Training data size", ylab = "Training Data MSE", main = "Learning curve for training dataset (School Data)",ylim=c(0,0.15))
par(mfrow=c(1,1))
############### TASK5 ##############################################
dataKmeans <- as.data.frame(ClustersFinal$centers) # This is the output data from kmeans with 8 clusters
# split: .85, .8, .75, .7, .65, .6, .55, .5, .45
set.seed(201)
index.data1 <- sample(1:nrow(dataKmeans),round(0.85*nrow(dataKmeans)))
trainScaled1 <- dataKmeans[index.data1,] # 70%
testScaled1 <- dataKmeans[-index.data1,] # 30%
# names(trainScaled1)
# dim(trainScaled1) # 7 31
NN1 <- names(trainScaled1)
ff1 <- as.formula(paste("MyGrade ~", paste(NN1[!NN1 %in% "MyGrade"], collapse = " + ")))
ff1
NeuralNet1 <- neuralnet(ff1,data=trainScaled1,hidden=c(3,2), threshold = 0.05, act.fct='logistic')
NN_Train_SSE1 <- sum((NeuralNet1$net.result[[1]] - trainScaled1[,31])^2)
MSE_train1 <- NN_Train_SSE1/nrow(trainScaled1)
MSE_train1 # Mean Squared Error 0.1828376401
nrow(trainScaled1) # 7
listMSEtr1=c(0.1828376401,0.006122064112,0.006122064112,0.006122064112,0.1344578679,0.1344578679,0.1266985938,0.1266985938,0.1266985938) # train data
listTr1=c(7,6,6,6,5,5,4,4,4) # train data
NeuralNetTest1 <- neuralnet(ff1,data=testScaled1,hidden=c(3,2), threshold = 0.05, act.fct='logistic')
NN_Test_SSE1 <- sum((NeuralNetTest1$net.result[[1]]-testScaled1[,31])^2)
MSE_test1<-NN_Test_SSE1/nrow(testScaled1)
MSE_test1
nrow(testScaled1)
listMSEte1=c(0.002035566436, 0.09332453108,0.09332453108,0.09332453108, 0.09794928906,0.09794928906,0.1716883991,0.1716883991, 0.1716883991) # test data
listTe1 = c(1,2,2,2,3,3,4,4,4)
# plot for report
par(mfrow=c(2,1))
plot (listMSEte1~listTe1, type = "b", xlab = "Test data cluster number", ylab = "Test Data MSE", main = "Learning curve for test dataset (School Data)",ylim=c(0,0.18))
plot (listMSEtr1~listTr1, type = "b", xlab = "Training data cluster number", ylab = "Training Data MSE", main = "Learning curve for training dataset (School Data)",ylim=c(0,0.18))
par(mfrow=c(1,1))
######################################################################
################### End of School Data ###############################
#################### Start of Bank Data ##############################
######################################################################
# Dataset from here:
# https://github.com/gchoi/Dataset/blob/master/UniversalBank.csv
Bank.df <- read.csv("~/mlData/bank.csv", header = TRUE)
names(Bank.df)
str(Bank.df)
Bank.df$PersonalLoan # already a binary variable
Bank.df$CDAccount
dim(Bank.df) # 5000 14
bank.df <- Bank.df[ , -c(1, 5)] # Drop ID and zip code columns.
names(bank.df)
dim(bank.df) # 5000 12
bank.df <- bank.df %>% dplyr::select(Age,Experience,Income,Family,CCAvg,Education,Mortgage,SecuritiesAccount,CDAccount,Online,CreditCard,PersonalLoan) # Make sure PersonalLoan is at the last position.
names(bank.df)
summary(bank.df)
# Normalize bank.df
bank.df$Age <- normalize(bank.df$Age)
bank.df$Experience <- normalize(bank.df$Experience)
bank.df$Income <- normalize(bank.df$Income)
bank.df$Family <- normalize(bank.df$Family)
bank.df$CCAvg <- normalize(bank.df$CCAvg)
bank.df$Education <- normalize(bank.df$Education)
bank.df$Mortgage <- normalize(bank.df$Mortgage)
bank.df$SecuritiesAccount <- normalize(bank.df$SecuritiesAccount)
bank.df$CDAccount <- normalize(bank.df$CDAccount)
bank.df$Online <- normalize(bank.df$Online)
bank.df$CreditCard <- normalize(bank.df$CreditCard)
bank.df$PersonalLoan <- normalize(bank.df$PersonalLoan)
##################### TASK1 ##########################################
set.seed(21)
Clusters1 <- kmeans(bank.df, centers=10, nstart=50)
Clusters1
confMat1 <- table(bank.df$PersonalLoan, Clusters1$cluster)
confMat1
# We can quantify the agreement between MyGrade and cluster, using an adjusted Rank index provided by the flexclust package.
randIndex(confMat1) # 0.04010590441 for 10 clusters
# The adjusted Rand index provides a measure of the agreement between two partitions, adjusted for chance. It ranges from -1 (no agreement) to 1 (perfect agreement).
set.seed(22)
ClustersFinal1 <- kmeans(bank.df, 6, nstart=25)
confMatFinal1 <- table(bank.df$PersonalLoan, ClustersFinal1$cluster)
randIndex(confMatFinal1)
# 0.08745313475 for 6 clusters, this was the max value for clusters 1 t0 20
# Try to find higher rand index for better number of cluster by changing
# number of cluster and watching for higher rand index
dataKmeansBank <- as.data.frame(ClustersFinal1$centers) # This is the output data from kmeans with 8 clusters
#### standalone code to plot scree plot for kmeans ###############
wssBank <- function(bank.df, nc=50, seed=123){ # nc for number of centers
wssBank <- (nrow(bank.df)-1)*sum(apply(bank.df,2,var))
for (i in 2:nc){
set.seed(seed)
wssBank[i] <- sum(kmeans(bank.df, centers=i,nstart=25)$withinss)}
# For nstart>4 we did not see elbow, hence accepted R's default nstart=1 to observe elbow.
plot(1:nc, wssBank, type="b", xlab="Number of Clusters", ylab="Variance explained")}
wssBank(bank.df) # best elbow plot, cluster number=8
abline(v = 6, lty =2) # draw a veritcal dashed line from 8th cluster
############# kmeans scree plot done ###############################
#### Expectation Maximization ##################
dataDefault1a <- mclustBIC(bank.df)
dataCustomize1a <- mclustBIC(bank.df, G = 1:50, x = dataDefault1a)
plot(dataCustomize1a, G = 1:50, legendArgs = list(x = "top",cex=.8),xlab='Number of clusters') # best value at 7th cluster
#abline(v = 7, lty =2)
##################### TASK2 ##########################################
##### 1. For Decision Tree use hclust (hierarchical clustering) ###
hcluster1 <- hclust(dist(bank.df), method='complete')
hcluster1 # this plots the cluster
plot(hcluster1)
abline(h = 2.6, lty =2)
hclustTrim1 <- cutree(hcluster1,7)
plot(hclustTrim1)
Eucl_EX1<-vegdist(bank.df,"euclidean")
Eucl1<-hclust(Eucl_EX1,method="complete") # The mothod is 'complete' linkage
# plot for report
plot(Eucl1,main='Complete Linkage Dendrogram',xlab='')
abline(h = 2.6, lty =2)
bank.df$PersonalLoan
chord_EX1<-vegdist(decostand(bank.df,"norm"),"euclidean")
chord_EX1
chord1<-hclust(chord_EX1,method="complete")
plot(chord1)
# Cut tree so that it has 7 clusters
hclust.clusters1 <- cutree(chord1, k = 7)
plot(hclust.clusters1)
# Compare cluster membership to actual MyGrade
table(hclust.clusters1, bank.df$PersonalLoan)
sum(apply(table(hclust.clusters1, bank.df$PersonalLoan), 1, min)) # 203
# To compare with kmeans
abcd<-kmeans(scale(bank.df), centers = 7)
table(abcd$cluster,bank.df$PersonalLoan)
# Table for Report:
table(hclust.clusters1, bank.df$PersonalLoan) # from hclust
table(abcd$cluster,bank.df$PersonalLoan) # from kmeans
#################### 2. PCA using prcomp, and PCA for Investigate() utility ###########
pca1 <- prcomp(bank.df,scale=T)
plot(pca1)
plot(pca1$sdev^2,xlab="Principal Components",ylab="Variance Explained")
plot(cumsum(pca1$sdev^2),xlab="Principal Components",ylab="Cumulative Variance Explained")
# A few plots for report from this:
PCA2 <- PCA(bank.df,ncp=10,graph=T, scale.=T)
Investigate(PCA2)
######## output data from PCA ##############
bank.dfPCA <- as.data.frame(pca1$x) # This is the output data from prcomp analysis
head(bank.dfPCA[,1:2])
plot(PC1~PC2, data=bank.dfPCA, cex = 0.5, lty = "solid")
#text(PC1~PC2, data=bank.dfPCA, labels=rownames(bank.df),cex=.8)
##############################################
biplot(pca1,scale=0) # stick with scale=0
pcavar1<- pca1$sdev^2
pve1<-pcavar1/sum(pcavar1)
ggbiplot(pca1,circle=T) # shows only dots
colMeans(bank.df) # Mean of each variable
apply(bank.df,2,sd) # Standard deviation of each variable
# Plot for report
par(mfrow=c(2,1))
plot(pve1,xlab='Principal Components', ylab='Proportion of variance explained',ylim=c(0.01,0.2)) # this is like elbow plot (elbow at 7)
abline(v = 4, lty =2)
plot(cumsum(pve1),xlab='Principal Components', ylab='Cumulative Proportion of variance explained',ylim=c(0.01,1)) # this is like elbow plot (elbow at 7)
abline(v = 4, lty =2)
abline(h = 0.56, lty =2)
par(mfrow=c(1,1))
# The total of 42.961% variance explained by 7 principal components
############## ICA ############
icaModel1 <- fastICA(bank.df, 4, alg.typ = "deflation", fun = "logcosh", alpha = 1,
method = "R", row.norm = FALSE, maxit = 200,
tol = 0.0001, verbose = F)
# plot for report
par(mfrow = c(1, 3))
plot(icaModel1$X, main = "Original Component",xlim=c(-2,2),ylim=c(-2.2,2))
plot(icaModel1$X %*% icaModel1$K, main = "PCA components",xlim=c(-2,2),ylim=c(-2.2,2))
plot(icaModel1$S, main = "ICA components",xlim=c(-2,2),ylim=c(-2.2,2))
par(mfrow = c(1, 1))
names(icaModel1)
summary(icaModel1)
icaModel1$X # pre-processed data matrix
icaModel1$K # pre-whitening matrix that projects data onto the first n.comp principal components.
icaModel1$W # estimated un-mixing matrix (no.factors by no.signals) - no. means number of
icaModel1$A # estimated mixing matrix (no.signals by no.factors) - no. means number of
icaModel1$S # estimated source matrix (The column vectors of estimated independent components (no.obs by no.factors)) - here no.factors means number of principal components
# check whitening:
# check correlations are zero
cor(icaModel1$K) # correlations are not zero
# check diagonals are 1 in covariance
cov(icaModel1$K) # diagonals are not 1
cor(icaModel1$X)
cor(icaModel1$W)
#cor(icaModel$A)
# table for report
# no correlations as off-diagonal elements are zero, good.
cor(icaModel1$S)
cov(icaModel1$S)
# diagonal elements are 1, good
######## Randomized Projections (needs more than 2 hours of runtime) #####
# https://cran.r-project.org/web/packages/RandPro/RandPro.pdf
set.seed(101)
index.data1a <- sample(1:nrow(bank.df),round(0.70*nrow(bank.df)))
Train1 <- bank.df[index.data1a,] # 70%
Test1 <- bank.df[-index.data1a,] # 30%
trainla<-as.factor(Train1$PersonalLoan) # need to declare as factor.
testla<-as.factor(Test1$PersonalLoan) # need to declare as factor.
train1<-Train1[,1:11]
test1<-Test1[,1:11]
randomProjection<-classify(train1, test1, trainla, testla)
#
# Confusion Matrix and Statistics
# Reference
# Prediction 0 1
# 0 1354 58
# 1 7 81
#
# Accuracy : 0.9567
# 95% CI : (0.9451, 0.9664)
# No Information Rate : 0.9073
# P-Value [Acc > NIR] : 2.595e-13
#
# Kappa : 0.6915
# Mcnemar's Test P-Value : 5.584e-10
#
# Sensitivity : 0.9949
# Specificity : 0.5827
# Pos Pred Value : 0.9589
# Neg Pred Value : 0.9205
# Prevalence : 0.9073
# Detection Rate : 0.9027
# Detection Prevalence : 0.9413
# Balanced Accuracy : 0.7888
#
# 'Positive' Class : 0
##################### TASK3 ##########################################
######## output data from PCA ##############
bank.dfPCA <- as.data.frame(pca1$x) # This is the output data from prcomp analysis
head(bank.dfPCA)
##############################################
set.seed(20)
Clusters1a <- kmeans(bank.dfPCA, centers=10, nstart=50)
Clusters1a
# So how well did the K-means clustering uncover the actual structure of the data contained in the MyGrade variable? A cross-tabulation of MyGrade and cluster membership is given by
confMat1a <- table(bank.dfPCA$PC12, Clusters1a$cluster)
confMat1a
# We can quantify the agreement between MyGrade and cluster, using an adjusted Rank index provided by the flexclust package.
randIndex(confMat1a) # 1.55073e-05 for 10 clusters
# The adjusted Rand index provides a measure of the agreement between two partitions, adjusted for chance. It ranges from -1 (no agreement) to 1 (perfect agreement).
set.seed(20)
ClustersFinal1a <- kmeans(bank.dfPCA, 6, nstart=25)
confMatFinal1a <- table(bank.dfPCA$PC12, ClustersFinal1a$cluster)
randIndex(confMatFinal1a) # 5.847072e-06 for 6 clusters (optimal cluster # from scree plot)
##### Before ####
#### standalone code to plot scree plot for kmeans ###############
wssplot1a <- function(bank.df, nc=20, seed=124){
wss1a <- (nrow(bank.df)-1)*sum(apply(bank.df,2,var))
for (i in 2:nc){
set.seed(seed)
wss1a[i] <- sum(kmeans(bank.df, centers=i,nstart=1)$withinss)}
plot(1:nc, wss1a, type="b", xlab="Number of Clusters", ylab="Variance explained", main="Before Dimension Reduction")}
wssplot1a(bank.df)
abline(v = 6, lty =2)
############# kmeans scree plot done ###############################
### Now ############
#### standalone code to plot scree plot for kmeans ###############
wssplot1b <- function(bank.dfPCA, nc=20, seed=134){
wss1b <- (nrow(bank.dfPCA)-1)*sum(apply(bank.dfPCA,2,var))
for (i in 2:nc){
set.seed(seed)
wss1b[i] <- sum(kmeans(bank.dfPCA, centers=i,nstart=1)$withinss)}
plot(1:nc, wss1b, type="b", xlab="Number of Clusters", ylab="Variance explained", main="After Dimension Reduction")}
wssplot1b(bank.dfPCA)
abline(v = 6, lty =2)
############# kmeans scree plot done ###############################
# plot for report
par(mfrow=c(2,1))
wssplot1a(bank.df)
abline(v = 6, lty =2)
wssplot1b(bank.dfPCA)
abline(v = 6, lty =2)
par(mfrow=c(1,1))
#### Expectation Maximization ##################
# dataDefault1a <- mclustBIC(bank.df)
# dataCustomize1a <- mclustBIC(bank.df, G = 1:50, x = dataDefault1a)
# plot(dataCustomize1a, G = 1:50, legendArgs = list(x = "top",cex=.8),xlab='Number of clusters') # best value at 7th cluster
Default1b <- mclustBIC(bank.dfPCA)
Customize1b <- mclustBIC(bank.dfPCA, G = 1:50, x = Default1b)
plot(Customize1b, G = 1:20, legendArgs = list(x = "top",cex=.8),xlab='Number of clusters', main="After Dimension Reduction") # best value at 7th cluster
abline(v = 6, lty =2)
# plot for report
par(mfrow=c(1,2))
plot(dataCustomize1a, G = 1:20, legendArgs = list(x = "top",cex = .8),xlab='Number of clusters')
abline(v = 6, lty =2)
plot(Customize1b, G = 1:20,legendArgs = list(x = "top",cex = .8) ,xlab='Number of clusters')
abline(v = 6, lty =2)
par(mfrow=c(1,1))
##################### TASK4 ##########################################
bank.dfPCA <- as.data.frame(pca1$x) # This is the output data from prcomp analysis
# split: .85, .8, .75, .7, .65, .6, .55, .5, .45
set.seed(201)
index.data2 <- sample(1:nrow(bank.dfPCA),round(0.45*nrow(bank.dfPCA)))
trainScaled2 <- bank.dfPCA[index.data2,] # 70%
testScaled2 <- bank.dfPCA[-index.data2,] # 30%
NN2 <- names(trainScaled2)
ff2 <- as.formula(paste("PC12 ~", paste(NN2[!NN2 %in% "PC12"], collapse = " + ")))
ff2
NeuralNet2 <- neuralnet(ff2,data=trainScaled2,hidden=c(3,2), threshold = 0.05, act.fct='logistic')
NN_Train_SSE2 <- sum((NeuralNet2$net.result[[1]] - trainScaled2[,12])^2)
MSE_train2 <- NN_Train_SSE2/nrow(trainScaled2)
MSE_train2 # Mean Squared Error 0.005128815563
nrow(trainScaled2) # 4250
listMSEtr2=c(0.005128815563, 0.005080296713, 0.005095116637, 0.00529211685, 0.004964216109, 0.005154666754, 0.005104140276, 0.005215575954, 0.004915072731) # train data
listTr2=c(4250, 4000, 3750, 3500, 3250, 3000, 2750, 2500, 2250) # train data
NeuralNetTest2a <- neuralnet(ff2,data=testScaled2,hidden=c(3,2), threshold = 0.05, act.fct='logistic')
NN_Test_SSE2a <- sum((NeuralNetTest2a$net.result[[1]]-testScaled2[,12])^2)
MSE_test2a<-NN_Test_SSE2a/nrow(testScaled2)
MSE_test2a
nrow(testScaled2)
listMSEte2=c( 0.004891277009, 0.004930405796, 0.00510313953, 0.004967961858, 0.005117855377, 0.0051708747, 0.004934932701, 0.005154444834, 0.005108289679) # test data
listTe2 = c(750, 1000, 1250, 1500, 1750, 2000, 2250, 2500, 2750) # test data
# plot for report
par(mfrow=c(2,1))
plot (listMSEte2~listTe2, type = "b", xlab = "Test data size", ylab = "Test Data MSE", main = "Learning curve for test dataset (Bank Data)",ylim=c(0.0046,0.0053))
plot (listMSEtr2~listTr2, type = "b", xlab = "Training data size", ylab = "Training Data MSE", main = "Learning curve for training dataset (Bank Data)",ylim=c(0.0046,0.0053))
par(mfrow=c(1,1))
##################### TASK5 ##########################################
dataKmeansBank <- as.data.frame(ClustersFinal1$centers) # This is the output data from kmeans with 8 clusters
# split: .85, .8, .75, .7, .65, .6, .55, .5, .45
set.seed(201)
index.data3 <- sample(1:nrow(dataKmeansBank),round(0.85*nrow(dataKmeansBank)))
trainScaled3 <- dataKmeansBank[index.data3,]
testScaled3 <- dataKmeansBank[-index.data3,]
NN3 <- names(trainScaled3)
ff3 <- as.formula(paste("PersonalLoan ~", paste(NN3[!NN3 %in% "PersonalLoan"], collapse = " + ")))
ff3
NeuralNet3 <- neuralnet(ff3,data=trainScaled3,hidden=c(3,2), threshold = 0.05, act.fct='logistic')
NN_Train_SSE3 <- sum((NeuralNet3$net.result[[1]] - trainScaled3[,12])^2)
MSE_train3 <- NN_Train_SSE3/nrow(trainScaled3)
MSE_train3 # Mean Squared Error 0.1423780342
nrow(trainScaled3) # 5
listMSEtr3=c(0.1423780342, 0.1423780342, 0.00651839908, 0.00651839908, 0.00651839908, 0.00651839908, 0.2052435674, 0.2052435674, 0.2052435674) # train data
listTr3=c(5, 5, 4, 4, 4, 4, 3, 3, 3) # train data
NeuralNetTest3 <- neuralnet(ff3,data=testScaled3,hidden=c(3,2), threshold = 0.05, act.fct='logistic')
NN_Test_SSE3 <- sum((NeuralNetTest3$net.result[[1]]-testScaled3[,12])^2)
MSE_test3<-NN_Test_SSE3/nrow(testScaled3)
MSE_test3
nrow(testScaled3)
listMSEte3=c(0.001485082381, 0.001485082381, 0.00002609464794, 0.00002609464794, 0.00002609464794, 0.00002609464794, 0.000008424406525, 0.000008424406525, 0.000008424406525) # test data
listTe3 = c(1, 1, 2, 2, 2, 2, 3, 3, 3)
# plot for report
par(mfrow=c(2,1))
plot (listMSEte3~listTe3, type = "b", xlab = "Test data cluster number", ylab = "Test Data MSE", main = "Learning curve for test dataset (Bank Data)",ylim=c(0,0.0018))
plot (listMSEtr3~listTr3, type = "b", xlab = "Training data cluster number", ylab = "Training Data MSE", main = "Learning curve for training dataset (Bank Data)",ylim=c(0,0.22))
par(mfrow=c(1,1))
################### End ##################
| /ML_Project_4.R | no_license | rsubedi1/R-Problems | R | false | false | 34,825 | r | ################################################################
# Ramesh Subedi
# Implement the following clustering algorithms:
# 1. K-means
# 2. Expectation Maximization
#
# In addition, implement the following feature dimensionality reduction algorithms
# 1. Any one feature selection algorithm (decision tree, forward selection, backward elimination,etc.)
# 2. PCA
# 3. ICA
# 4. Randomized Projections
#
#
# Tasks:
# 1. Run the clustering algorithms on your datasets and describe your observations (with plots).
# 2. Apply the dimensionality reduction algorithms on your datasets and describe your observations
# (with plots).
# 3. Run the clustering algorithms again, this time after applying dimensionality reduction. Describe
# the difference compared to previous experimentation (with plots).
# 4. Run your neural network learner from assignment 3 on the data after dimensionality reduction
# (from task 2). Explain and plot your observations (error rates, etc.)
# 5. Use the clustering results from task 1 as the new features and apply neural network learner on
# this new data consisting of only clustering results as features and class label as the output.
# Again, plot and explain your results.
###############################################################
######################################
library(data.table)
library(magrittr)
library(plyr)
library(dtplyr)
library(sandwich) # for White correction
library(lmtest) # for more advanced hypothesis testing tools
#library(tseries) # time series package
#library(DBI)
#library(RSQLite)
library(tidyverse) # data manipulation
library(cluster) # clustering algorithms
library(factoextra) # clustering algorithms & visualization
library(broom) # for tidy() function
#library(TSA)
#library(forecast)
library(vars)
#library(fpp) # for VAR forecast
library(UsingR)
#library(margins)
#library(plm) # for pooled OLS (ordinary least squares)
library(car) # for scatterplot()
#library(aod) # for probit link
library(gradDescent) # for Gradient Descent calculation
library(glmnet)
library(e1071) # for Support Vector Machine, Titanic data, etc.
library(tree) # for tree to work on Decisiion Trees
library(gbm) # for gbm (gradient boosting model)
library(adabag) # for bagging
library(rpart) #
library(party) # Recursive partitioning
library(partykit) # Pruning tree from party
library(neuralnet) # for neural net
library(caret) # for KNN
library(ROCR) # for KNN as well
library(pROC) # for KNN as well
library(boot) # for cross-validation
library(ggplot2)
library(class)
library(scales)
library(factoextra)
library(FactoInvestigate)
library(FactoMineR)
library(flexclust) # to quantify performance of kmeans
library(NbClust)
library(mclust) # for Expectation Maximization Clustering
library(ica) # for ICA (independent component analysis)
library(fastICA) # for fast ICA (independent component analysis)
library(vegan) # for hclust (hierarchical clustering) of some type
library(ggbiplot)
library(fpc) # for plotcluster
library(RandPro) # for randomized projection
# rm(list=ls()) #drop all variables
# start with a clean slate
rm(list=ls(all=TRUE))
data <- read.table("~/mlData/student-mat.csv",sep=";",header=TRUE)
data <- data %>% dplyr::select(-G1,-G2) # Drop G2 and G2 variables from data
names(data)
data$school <- as.numeric(data$school)
data$school
str(data$school)
data$sex <- as.numeric(data$sex)
data$address<-as.numeric(data$address)
data$famsize <- as.numeric(data$famsize)
data$Pstatus<- as.numeric(data$Pstatus)
data$Fjob<-as.numeric(data$Fjob)
data$Mjob<- as.numeric(data$Mjob)
data$reason<- as.numeric(data$reason)
data$guardian<- as.numeric(data$guardian)
data$schoolsup<- as.numeric(data$schoolsup)
data$famsup<- as.numeric(data$famsup)
data$paid<- as.numeric(data$paid)
data$activities<- as.numeric(data$activities)
data$nursery<- as.numeric(data$nursery)
data$higher<- as.numeric(data$higher)
data$internet<- as.numeric(data$internet)
data$romantic<- as.numeric(data$romantic)
Data <-data # Data is unscaled
names(Data)
Data$G3
# Normalize data (scaling)
normalize <- function(x){
return ((x-min(x))/(max(x)-min(x)))
}
names(data)
data$school <- normalize(data$school)
data$sex <- normalize(data$sex)
data$age <- normalize(data$age)
data$address <- normalize(data$address)
data$famsize <- normalize(data$famsize)
data$Pstatus <- normalize(data$Pstatus)
data$Medu <- normalize(data$Medu)
data$Fedu <- normalize(data$Fedu)
data$Mjob <- normalize(data$Mjob)
data$Fjob <- normalize(data$Fjob)
data$reason <- normalize(data$reason)
data$guardian <- normalize(data$guardian)
data$traveltime <- normalize(data$traveltime)
data$studytime <- normalize(data$studytime)
data$failures <- normalize(data$failures)
data$schoolsup <- normalize(data$schoolsup)
data$famsup <- normalize(data$famsup)
data$paid <- normalize(data$paid)
data$activities <- normalize(data$activities)
data$nursery <- normalize(data$nursery)
data$higher <- normalize(data$higher)
data$internet <- normalize(data$internet)
data$romantic <- normalize(data$romantic)
data$famrel <- normalize(data$famrel)
data$freetime <- normalize(data$freetime)
data$goout <- normalize(data$goout)
data$Dalc <- normalize(data$Dalc)
data$Walc <- normalize(data$Walc)
data$health <- normalize(data$health)
data$absences <- normalize(data$absences)
data$G3 <- normalize(data$G3)
# make G3 in data a binary varialbe (0 or 1) since GLM needs this.
Grade <- data$G3
Grade
MeanVal<- mean(data$G3)
MeanVal
MyGrade <- ifelse(Grade>=MeanVal,1,0)
MyGrade
# Add variable MyGrade to existing data
newData <- cbind(data,MyGrade)
# remove (drop) G3 from newData:
newData <- newData %>% dplyr::select(-G3)
data <- newData
############# TASK1 ########################################
############ K-Means from here #############################
set.seed(20)
Clusters <- kmeans(data, centers=10, nstart=50)
Clusters
# So how well did the K-means clustering uncover the actual structure of the data contained in the MyGrade variable? A cross-tabulation of MyGrade and cluster membership is given by
confMat <- table(data$MyGrade, Clusters$cluster)
confMat
# We can quantify the agreement between MyGrade and cluster, using an adjusted Rank index provided by the flexclust package.
randIndex(confMat) # 0.1038752 for 10 clusters
# The adjusted Rand index provides a measure of the agreement between two partitions, adjusted for chance. It ranges from -1 (no agreement) to 1 (perfect agreement).
set.seed(20)
ClustersFinal <- kmeans(data, 8, nstart=25)
confMatFinal <- table(data$MyGrade, ClustersFinal$cluster)
randIndex(confMatFinal) # 0.1685834 for 8 clusters (optimal cluster # from scree plot)
dataKmeans <- as.data.frame(ClustersFinal$centers) # This is the output data from kmeans with 8 clusters
#### standalone code to plot scree plot for kmeans ###############
wssplot <- function(data, nc=20, seed=1234){ # nc for number of centers
wss <- (nrow(data)-1)*sum(apply(data,2,var))
for (i in 2:nc){
set.seed(seed)
wss[i] <- sum(kmeans(data, centers=i,nstart=1)$withinss)}
# For nstart>4 we did not see elbow, hence accepted R's default nstart=1 to observe elbow.
plot(1:nc, wss, type="b", xlab="Number of Clusters", ylab="Variance explained")}
wssplot(data) # best elbow plot, cluster number=8
abline(v = 8, lty =2) # draw a veritcal dashed line from 8th cluster
############# kmeans scree plot done ###############################
#### Expectation Maximization ##################
dataDefault <- mclustBIC(data)
dataCustomize <- mclustBIC(data, G = 1:20, x = dataDefault)
plot(dataCustomize, G = 3:20, legendArgs = list(x = "topright"),xlab='Number of clusters') # best value at 7th cluster
abline(v = 7, lty =2)
############### TASK2 ##############################################
##### 1. For Decision Tree use hclust (hierarchical clustering) ###
hcluster <- hclust(dist(data), method='complete')
hclustTrim <- cutree(hcluster,8)
plot(hclustTrim)
Eucl_EX<-vegdist(data,"euclidean")
Eucl<-hclust(Eucl_EX,method="complete") # The mothod is 'complete' linkage
# plot for report
plot(Eucl,main='Complete Linkage Dendrogram',xlab='')
chord_EX<-vegdist(decostand(data,"norm"),"euclidean")
chord_EX
chord<-hclust(chord_EX,method="complete")
plot(chord)
# Cut tree so that it has 8 clusters
hclust.clusters <- cutree(chord, k = 8)
plot(hclust.clusters)
# Compare cluster membership to actual MyGrade
table(hclust.clusters, MyGrade)
sum(apply(table(hclust.clusters, MyGrade), 1, min)) # 146
# To compare with kmeans
abc<-kmeans(scale(data), centers = 8)
table(abc$cluster,MyGrade)
# Table for Report:
table(hclust.clusters, MyGrade) # from hclust
table(abc$cluster,MyGrade) # from kmeans
#################### 2. PCA using prcomp, and PCA for Investigate() utility ###########
pca <- prcomp(data,scale=T)
plot(pca)
plot(pca$sdev^2,xlab="Principal Components",ylab="Variance Explained")
plot(cumsum(pca$sdev^2),xlab="Principal Components",ylab="Cumulative Variance Explained")
# A few plots for report from this:
PCA1 <- PCA(data,ncp=10,graph=T, scale.=T)
Investigate(PCA1)
######## output data from PCA ##############
dataPCA <- as.data.frame(pca$x) # This is the output data from prcomp analysis
head(dataPCA[,1:2])
plot(PC1~PC2, data=dataPCA, cex = 0.5, lty = "solid")
text(PC1~PC2, data=dataPCA, labels=rownames(data),cex=.8)
##############################################
biplot(pca,scale=0) # stick with scale=0
pcavar<- pca$sdev^2
pve<-pcavar/sum(pcavar)
ggbiplot(pca,circle=T) # shows only dots
colMeans(data) # Mean of each variable
apply(data,2,sd) # Standard deviation of each variable
# Plot for report
par(mfrow=c(2,1))
plot(pve,xlab='Principal Components', ylab='Proportion of variance explained',ylim=c(0.01,0.12)) # this is like elbow plot (elbow at 7)
abline(v = 7, lty =2)
plot(cumsum(pve),xlab='Principal Components', ylab='Cumulative Proportion of variance explained',ylim=c(0.01,1)) # this is like elbow plot (elbow at 7)
abline(v = 7, lty =2)
abline(h = 0.43, lty =2)
par(mfrow=c(1,1))
# The total of 42.961% variance explained by 7 principal components
############### ICA ############
icaModel <- fastICA(data, 8, alg.typ = "deflation", fun = "logcosh", alpha = 1,
method = "R", row.norm = FALSE, maxit = 200,
tol = 0.0001, verbose = F)
# plot for report
par(mfrow = c(1, 3))
plot(icaModel$X, main = "Original Component",xlim=c(-2,3),ylim=c(-2.5,2.5))
plot(icaModel$X %*% icaModel$K, main = "PCA components",xlim=c(-2,3),ylim=c(-2.5,2.5))
plot(icaModel$S, main = "ICA components",xlim=c(-2,3),ylim=c(-2.5,2.5))
par(mfrow = c(1, 1))
names(icaModel)
summary(icaModel)
icaModel$X # pre-processed data matrix
icaModel$K # pre-whitening matrix that projects data onto the first n.comp principal components.
icaModel$W # estimated un-mixing matrix (no.factors by no.signals) - no. means number of
icaModel$A # estimated mixing matrix (no.signals by no.factors) - no. means number of
icaModel$S # estimated source matrix (The column vectors of estimated independent components (no.obs by no.factors)) - here no.factors means number of principal components
# check whitening:
# check correlations are zero
cor(icaModel$K) # correlations are not zero
# check diagonals are 1 in covariance
cov(icaModel$K) # diagonals are not 1
cor(icaModel$X)
cor(icaModel$W)
#cor(icaModel$A)
# table for report
cor(icaModel$S) # no correlations
cov(icaModel$S) # diagonal elements are 1
############## Randomized Projections ##################
# https://cran.r-project.org/web/packages/RandPro/RandPro.pdf
set.seed(101)
index.data <- sample(1:nrow(data),round(0.70*nrow(data)))
Train <- data[index.data,] # 70%
Test <- data[-index.data,] # 30%
trainl<-as.factor(Train$MyGrade) # need to declare as factor.
testl<-as.factor(Test$MyGrade) # need to declare as factor.
train<-Train[,1:30]
test<-Test[,1:30]
randomProjection<-classify(train, test, trainl, testl)
# Confusion Matrix and Statistics
# Reference
# Prediction 0 1
# 0 26 12
# 1 28 53
#
# Accuracy : 0.6639
# 95% CI : (0.5715, 0.7478)
# No Information Rate : 0.5462
# P-Value [Acc > NIR] : 0.006031
#
# Kappa : 0.3045
# Mcnemar's Test P-Value : 0.017706
#
# Sensitivity : 0.4815
# Specificity : 0.8154
# Pos Pred Value : 0.6842
# Neg Pred Value : 0.6543
# Prevalence : 0.4538
# Detection Rate : 0.2185
# Detection Prevalence : 0.3193
# Balanced Accuracy : 0.6484
#
# 'Positive' Class : 0
############### TASK3 ##############################################
dataPCA <- as.data.frame(pca$x) # This is the output data from prcomp analysis
head(dataPCA)
set.seed(20)
Clusters <- kmeans(dataPCA, centers=10, nstart=50)
Clusters
# So how well did the K-means clustering uncover the actual structure of the data contained in the MyGrade variable? A cross-tabulation of MyGrade and cluster membership is given by
confMat <- table(dataPCA$PC31, Clusters$cluster)
confMat
# We can quantify the agreement between MyGrade and cluster, using an adjusted Rank index provided by the flexclust package.
randIndex(confMat) # 0.1038752 for 10 clusters
# The adjusted Rand index provides a measure of the agreement between two partitions, adjusted for chance. It ranges from -1 (no agreement) to 1 (perfect agreement).
set.seed(20)
ClustersFinal <- kmeans(dataPCA, 8, nstart=25)
confMatFinal <- table(dataPCA$PC31, ClustersFinal$cluster)
randIndex(confMatFinal) # 0.1685834 for 8 clusters (optimal cluster # from scree plot)
##### Before ####
#### standalone code to plot scree plot for kmeans ###############
wssplot <- function(data, nc=20, seed=1234){
wss <- (nrow(data)-1)*sum(apply(data,2,var))
for (i in 2:nc){
set.seed(seed)
wss[i] <- sum(kmeans(data, centers=i,nstart=1)$withinss)}
plot(1:nc, wss, type="b", xlab="Number of Clusters", ylab="Variance explained", main="Before Dimension Reduction")}
wssplot(data)
abline(v = 8, lty =2)
############# kmeans scree plot done ###############################
### Now ############
#### standalone code to plot scree plot for kmeans ###############
wssplot1 <- function(dataPCA, nc=20, seed=1234){
wss1 <- (nrow(dataPCA)-1)*sum(apply(dataPCA,2,var))
for (i in 2:nc){
set.seed(seed)
wss1[i] <- sum(kmeans(dataPCA, centers=i,nstart=1)$withinss)}
plot(1:nc, wss1, type="b", xlab="Number of Clusters", ylab="Variance explained", main="After Dimension Reduction")}
wssplot1(dataPCA)
abline(v = 8, lty =2)
############# kmeans scree plot done ###############################
# plot for report
par(mfrow=c(2,1))
wssplot(data)
abline(v = 8, lty =2)
wssplot1(dataPCA)
abline(v = 8, lty =2)
par(mfrow=c(1,1))
#### Expectation Maximization ##################
dataDefault1 <- mclustBIC(dataPCA)
dataCustomize1 <- mclustBIC(dataPCA, G = 1:20, x = dataDefault1)
plot(dataCustomize1, G = 3:20, legendArgs = list(x = "topright"),xlab='Number of clusters') # best value at 7th cluster
abline(v = 7, lty =2)
############### TASK4 ##############################################
dataPCA <- as.data.frame(pca$x) # This is the output data from prcomp analysis
# split: .85, .8, .75, .7, .65, .6, .55, .5, .45
set.seed(200)
index.data <- sample(1:nrow(dataPCA),round(0.45*nrow(dataPCA)))
trainScaled <- dataPCA[index.data,] # 70%
testScaled <- dataPCA[-index.data,] # 30%
NN <- names(trainScaled)
ff <- as.formula(paste("PC31 ~", paste(NN[!NN %in% "PC31"], collapse = " + ")))
ff
# For learning curve, run the neural network for multiple times by changing the percentage of
# train/test split and find MSE for each step. Plot that MSE in a graph.
# Calculating MSE.
NeuralNet <- neuralnet(ff,data=trainScaled,hidden=c(3,2), threshold = 0.05, act.fct='logistic')
NN_Train_SSE <- sum((NeuralNet$net.result[[1]] - trainScaled[,31])^2)
MSE_train <- NN_Train_SSE/nrow(trainScaled)
MSE_train # Mean Squared Error 0.1350781249
nrow(trainScaled) # 276
listMSEtr=c(0.1482775817, 0.1171002659, 0.1394318619, 0.1350781249, 0.1082534565, 0.1131214381, 0.08753255714, 0.1022628185, 0.08153341087) # train data
listTr=c(336, 316, 296, 276, 257, 237, 217, 198, 178) # train data
NeuralNetTest <- neuralnet(ff,data=testScaled,hidden=c(3,2), threshold = 0.05, act.fct='logistic')
NN_Test_SSE <- sum((NeuralNetTest$net.result[[1]]-testScaled[,31])^2)
MSE_test<-NN_Test_SSE/nrow(testScaled)
MSE_test
nrow(testScaled)
listMSEte=c(0.04407435405, 0.04463376316, 0.02589934399, 0.05542529601, 0.1145059221, 0.0902390286, 0.09282047811, 0.1031201844, 0.09279801747) # test data
listTe = c(59, 79, 99, 119, 138, 158, 178,197, 217) # test data
# plot for report
par(mfrow=c(2,1))
plot (listMSEte~listTe, type = "b", xlab = "Test data size", ylab = "Test Data MSE", main = "Learning curve for test dataset (School Data)",ylim=c(0,0.15))
plot (listMSEtr~listTr, type = "b", xlab = "Training data size", ylab = "Training Data MSE", main = "Learning curve for training dataset (School Data)",ylim=c(0,0.15))
par(mfrow=c(1,1))
############### TASK5 ##############################################
dataKmeans <- as.data.frame(ClustersFinal$centers) # This is the output data from kmeans with 8 clusters
# split: .85, .8, .75, .7, .65, .6, .55, .5, .45
set.seed(201)
index.data1 <- sample(1:nrow(dataKmeans),round(0.85*nrow(dataKmeans)))
trainScaled1 <- dataKmeans[index.data1,] # 70%
testScaled1 <- dataKmeans[-index.data1,] # 30%
# names(trainScaled1)
# dim(trainScaled1) # 7 31
NN1 <- names(trainScaled1)
ff1 <- as.formula(paste("MyGrade ~", paste(NN1[!NN1 %in% "MyGrade"], collapse = " + ")))
ff1
NeuralNet1 <- neuralnet(ff1,data=trainScaled1,hidden=c(3,2), threshold = 0.05, act.fct='logistic')
NN_Train_SSE1 <- sum((NeuralNet1$net.result[[1]] - trainScaled1[,31])^2)
MSE_train1 <- NN_Train_SSE1/nrow(trainScaled1)
MSE_train1 # Mean Squared Error 0.1828376401
nrow(trainScaled1) # 7
listMSEtr1=c(0.1828376401,0.006122064112,0.006122064112,0.006122064112,0.1344578679,0.1344578679,0.1266985938,0.1266985938,0.1266985938) # train data
listTr1=c(7,6,6,6,5,5,4,4,4) # train data
NeuralNetTest1 <- neuralnet(ff1,data=testScaled1,hidden=c(3,2), threshold = 0.05, act.fct='logistic')
NN_Test_SSE1 <- sum((NeuralNetTest1$net.result[[1]]-testScaled1[,31])^2)
MSE_test1<-NN_Test_SSE1/nrow(testScaled1)
MSE_test1
nrow(testScaled1)
listMSEte1=c(0.002035566436, 0.09332453108,0.09332453108,0.09332453108, 0.09794928906,0.09794928906,0.1716883991,0.1716883991, 0.1716883991) # test data
listTe1 = c(1,2,2,2,3,3,4,4,4)
# plot for report
par(mfrow=c(2,1))
plot (listMSEte1~listTe1, type = "b", xlab = "Test data cluster number", ylab = "Test Data MSE", main = "Learning curve for test dataset (School Data)",ylim=c(0,0.18))
plot (listMSEtr1~listTr1, type = "b", xlab = "Training data cluster number", ylab = "Training Data MSE", main = "Learning curve for training dataset (School Data)",ylim=c(0,0.18))
par(mfrow=c(1,1))
######################################################################
################### End of School Data ###############################
#################### Start of Bank Data ##############################
######################################################################
# Dataset from here:
# https://github.com/gchoi/Dataset/blob/master/UniversalBank.csv
Bank.df <- read.csv("~/mlData/bank.csv", header = TRUE)
names(Bank.df)
str(Bank.df)
Bank.df$PersonalLoan # already a binary variable
Bank.df$CDAccount
dim(Bank.df) # 5000 14
bank.df <- Bank.df[ , -c(1, 5)] # Drop ID and zip code columns.
names(bank.df)
dim(bank.df) # 5000 12
bank.df <- bank.df %>% dplyr::select(Age,Experience,Income,Family,CCAvg,Education,Mortgage,SecuritiesAccount,CDAccount,Online,CreditCard,PersonalLoan) # Make sure PersonalLoan is at the last position.
names(bank.df)
summary(bank.df)
# Normalize bank.df
bank.df$Age <- normalize(bank.df$Age)
bank.df$Experience <- normalize(bank.df$Experience)
bank.df$Income <- normalize(bank.df$Income)
bank.df$Family <- normalize(bank.df$Family)
bank.df$CCAvg <- normalize(bank.df$CCAvg)
bank.df$Education <- normalize(bank.df$Education)
bank.df$Mortgage <- normalize(bank.df$Mortgage)
bank.df$SecuritiesAccount <- normalize(bank.df$SecuritiesAccount)
bank.df$CDAccount <- normalize(bank.df$CDAccount)
bank.df$Online <- normalize(bank.df$Online)
bank.df$CreditCard <- normalize(bank.df$CreditCard)
bank.df$PersonalLoan <- normalize(bank.df$PersonalLoan)
##################### TASK1 ##########################################
set.seed(21)
Clusters1 <- kmeans(bank.df, centers=10, nstart=50)
Clusters1
confMat1 <- table(bank.df$PersonalLoan, Clusters1$cluster)
confMat1
# We can quantify the agreement between MyGrade and cluster, using an adjusted Rank index provided by the flexclust package.
randIndex(confMat1) # 0.04010590441 for 10 clusters
# The adjusted Rand index provides a measure of the agreement between two partitions, adjusted for chance. It ranges from -1 (no agreement) to 1 (perfect agreement).
set.seed(22)
ClustersFinal1 <- kmeans(bank.df, 6, nstart=25)
confMatFinal1 <- table(bank.df$PersonalLoan, ClustersFinal1$cluster)
randIndex(confMatFinal1)
# 0.08745313475 for 6 clusters, this was the max value for clusters 1 t0 20
# Try to find higher rand index for better number of cluster by changing
# number of cluster and watching for higher rand index
dataKmeansBank <- as.data.frame(ClustersFinal1$centers) # This is the output data from kmeans with 8 clusters
#### standalone code to plot scree plot for kmeans ###############
wssBank <- function(bank.df, nc=50, seed=123){ # nc for number of centers
wssBank <- (nrow(bank.df)-1)*sum(apply(bank.df,2,var))
for (i in 2:nc){
set.seed(seed)
wssBank[i] <- sum(kmeans(bank.df, centers=i,nstart=25)$withinss)}
# For nstart>4 we did not see elbow, hence accepted R's default nstart=1 to observe elbow.
plot(1:nc, wssBank, type="b", xlab="Number of Clusters", ylab="Variance explained")}
wssBank(bank.df) # best elbow plot, cluster number=8
abline(v = 6, lty =2) # draw a veritcal dashed line from 8th cluster
############# kmeans scree plot done ###############################
#### Expectation Maximization ##################
dataDefault1a <- mclustBIC(bank.df)
dataCustomize1a <- mclustBIC(bank.df, G = 1:50, x = dataDefault1a)
plot(dataCustomize1a, G = 1:50, legendArgs = list(x = "top",cex=.8),xlab='Number of clusters') # best value at 7th cluster
#abline(v = 7, lty =2)
##################### TASK2 ##########################################
##### 1. For Decision Tree use hclust (hierarchical clustering) ###
hcluster1 <- hclust(dist(bank.df), method='complete')
hcluster1 # this plots the cluster
plot(hcluster1)
abline(h = 2.6, lty =2)
hclustTrim1 <- cutree(hcluster1,7)
plot(hclustTrim1)
Eucl_EX1<-vegdist(bank.df,"euclidean")
Eucl1<-hclust(Eucl_EX1,method="complete") # The mothod is 'complete' linkage
# plot for report
plot(Eucl1,main='Complete Linkage Dendrogram',xlab='')
abline(h = 2.6, lty =2)
bank.df$PersonalLoan
chord_EX1<-vegdist(decostand(bank.df,"norm"),"euclidean")
chord_EX1
chord1<-hclust(chord_EX1,method="complete")
plot(chord1)
# Cut tree so that it has 7 clusters
hclust.clusters1 <- cutree(chord1, k = 7)
plot(hclust.clusters1)
# Compare cluster membership to actual MyGrade
table(hclust.clusters1, bank.df$PersonalLoan)
sum(apply(table(hclust.clusters1, bank.df$PersonalLoan), 1, min)) # 203
# To compare with kmeans
abcd<-kmeans(scale(bank.df), centers = 7)
table(abcd$cluster,bank.df$PersonalLoan)
# Table for Report:
table(hclust.clusters1, bank.df$PersonalLoan) # from hclust
table(abcd$cluster,bank.df$PersonalLoan) # from kmeans
#################### 2. PCA using prcomp, and PCA for Investigate() utility ###########
pca1 <- prcomp(bank.df,scale=T)
plot(pca1)
plot(pca1$sdev^2,xlab="Principal Components",ylab="Variance Explained")
plot(cumsum(pca1$sdev^2),xlab="Principal Components",ylab="Cumulative Variance Explained")
# A few plots for report from this:
PCA2 <- PCA(bank.df,ncp=10,graph=T, scale.=T)
Investigate(PCA2)
######## output data from PCA ##############
bank.dfPCA <- as.data.frame(pca1$x) # This is the output data from prcomp analysis
head(bank.dfPCA[,1:2])
plot(PC1~PC2, data=bank.dfPCA, cex = 0.5, lty = "solid")
#text(PC1~PC2, data=bank.dfPCA, labels=rownames(bank.df),cex=.8)
##############################################
biplot(pca1,scale=0) # stick with scale=0
pcavar1<- pca1$sdev^2
pve1<-pcavar1/sum(pcavar1)
ggbiplot(pca1,circle=T) # shows only dots
colMeans(bank.df) # Mean of each variable
apply(bank.df,2,sd) # Standard deviation of each variable
# Plot for report
par(mfrow=c(2,1))
plot(pve1,xlab='Principal Components', ylab='Proportion of variance explained',ylim=c(0.01,0.2)) # this is like elbow plot (elbow at 7)
abline(v = 4, lty =2)
plot(cumsum(pve1),xlab='Principal Components', ylab='Cumulative Proportion of variance explained',ylim=c(0.01,1)) # this is like elbow plot (elbow at 7)
abline(v = 4, lty =2)
abline(h = 0.56, lty =2)
par(mfrow=c(1,1))
# The total of 42.961% variance explained by 7 principal components
############## ICA ############
icaModel1 <- fastICA(bank.df, 4, alg.typ = "deflation", fun = "logcosh", alpha = 1,
method = "R", row.norm = FALSE, maxit = 200,
tol = 0.0001, verbose = F)
# plot for report
par(mfrow = c(1, 3))
plot(icaModel1$X, main = "Original Component",xlim=c(-2,2),ylim=c(-2.2,2))
plot(icaModel1$X %*% icaModel1$K, main = "PCA components",xlim=c(-2,2),ylim=c(-2.2,2))
plot(icaModel1$S, main = "ICA components",xlim=c(-2,2),ylim=c(-2.2,2))
par(mfrow = c(1, 1))
names(icaModel1)
summary(icaModel1)
icaModel1$X # pre-processed data matrix
icaModel1$K # pre-whitening matrix that projects data onto the first n.comp principal components.
icaModel1$W # estimated un-mixing matrix (no.factors by no.signals) - no. means number of
icaModel1$A # estimated mixing matrix (no.signals by no.factors) - no. means number of
icaModel1$S # estimated source matrix (The column vectors of estimated independent components (no.obs by no.factors)) - here no.factors means number of principal components
# check whitening:
# check correlations are zero
cor(icaModel1$K) # correlations are not zero
# check diagonals are 1 in covariance
cov(icaModel1$K) # diagonals are not 1
cor(icaModel1$X)
cor(icaModel1$W)
#cor(icaModel$A)
# table for report
# no correlations as off-diagonal elements are zero, good.
cor(icaModel1$S)
cov(icaModel1$S)
# diagonal elements are 1, good
######## Randomized Projections (needs more than 2 hours of runtime) #####
# https://cran.r-project.org/web/packages/RandPro/RandPro.pdf
set.seed(101)
index.data1a <- sample(1:nrow(bank.df),round(0.70*nrow(bank.df)))
Train1 <- bank.df[index.data1a,] # 70%
Test1 <- bank.df[-index.data1a,] # 30%
trainla<-as.factor(Train1$PersonalLoan) # need to declare as factor.
testla<-as.factor(Test1$PersonalLoan) # need to declare as factor.
train1<-Train1[,1:11]
test1<-Test1[,1:11]
randomProjection<-classify(train1, test1, trainla, testla)
#
# Confusion Matrix and Statistics
# Reference
# Prediction 0 1
# 0 1354 58
# 1 7 81
#
# Accuracy : 0.9567
# 95% CI : (0.9451, 0.9664)
# No Information Rate : 0.9073
# P-Value [Acc > NIR] : 2.595e-13
#
# Kappa : 0.6915
# Mcnemar's Test P-Value : 5.584e-10
#
# Sensitivity : 0.9949
# Specificity : 0.5827
# Pos Pred Value : 0.9589
# Neg Pred Value : 0.9205
# Prevalence : 0.9073
# Detection Rate : 0.9027
# Detection Prevalence : 0.9413
# Balanced Accuracy : 0.7888
#
# 'Positive' Class : 0
##################### TASK3 ##########################################
######## output data from PCA ##############
bank.dfPCA <- as.data.frame(pca1$x) # This is the output data from prcomp analysis
head(bank.dfPCA)
##############################################
set.seed(20)
Clusters1a <- kmeans(bank.dfPCA, centers=10, nstart=50)
Clusters1a
# So how well did the K-means clustering uncover the actual structure of the data contained in the MyGrade variable? A cross-tabulation of MyGrade and cluster membership is given by
confMat1a <- table(bank.dfPCA$PC12, Clusters1a$cluster)
confMat1a
# We can quantify the agreement between MyGrade and cluster, using an adjusted Rank index provided by the flexclust package.
randIndex(confMat1a) # 1.55073e-05 for 10 clusters
# The adjusted Rand index provides a measure of the agreement between two partitions, adjusted for chance. It ranges from -1 (no agreement) to 1 (perfect agreement).
set.seed(20)
ClustersFinal1a <- kmeans(bank.dfPCA, 6, nstart=25)
confMatFinal1a <- table(bank.dfPCA$PC12, ClustersFinal1a$cluster)
randIndex(confMatFinal1a) # 5.847072e-06 for 6 clusters (optimal cluster # from scree plot)
##### Before ####
#### standalone code to plot scree plot for kmeans ###############
wssplot1a <- function(bank.df, nc=20, seed=124){
wss1a <- (nrow(bank.df)-1)*sum(apply(bank.df,2,var))
for (i in 2:nc){
set.seed(seed)
wss1a[i] <- sum(kmeans(bank.df, centers=i,nstart=1)$withinss)}
plot(1:nc, wss1a, type="b", xlab="Number of Clusters", ylab="Variance explained", main="Before Dimension Reduction")}
wssplot1a(bank.df)
abline(v = 6, lty =2)
############# kmeans scree plot done ###############################
### Now ############
#### standalone code to plot scree plot for kmeans ###############
wssplot1b <- function(bank.dfPCA, nc=20, seed=134){
wss1b <- (nrow(bank.dfPCA)-1)*sum(apply(bank.dfPCA,2,var))
for (i in 2:nc){
set.seed(seed)
wss1b[i] <- sum(kmeans(bank.dfPCA, centers=i,nstart=1)$withinss)}
plot(1:nc, wss1b, type="b", xlab="Number of Clusters", ylab="Variance explained", main="After Dimension Reduction")}
wssplot1b(bank.dfPCA)
abline(v = 6, lty =2)
############# kmeans scree plot done ###############################
# plot for report
par(mfrow=c(2,1))
wssplot1a(bank.df)
abline(v = 6, lty =2)
wssplot1b(bank.dfPCA)
abline(v = 6, lty =2)
par(mfrow=c(1,1))
#### Expectation Maximization ##################
# dataDefault1a <- mclustBIC(bank.df)
# dataCustomize1a <- mclustBIC(bank.df, G = 1:50, x = dataDefault1a)
# plot(dataCustomize1a, G = 1:50, legendArgs = list(x = "top",cex=.8),xlab='Number of clusters') # best value at 7th cluster
Default1b <- mclustBIC(bank.dfPCA)
Customize1b <- mclustBIC(bank.dfPCA, G = 1:50, x = Default1b)
plot(Customize1b, G = 1:20, legendArgs = list(x = "top",cex=.8),xlab='Number of clusters', main="After Dimension Reduction") # best value at 7th cluster
abline(v = 6, lty =2)
# plot for report
par(mfrow=c(1,2))
plot(dataCustomize1a, G = 1:20, legendArgs = list(x = "top",cex = .8),xlab='Number of clusters')
abline(v = 6, lty =2)
plot(Customize1b, G = 1:20,legendArgs = list(x = "top",cex = .8) ,xlab='Number of clusters')
abline(v = 6, lty =2)
par(mfrow=c(1,1))
##################### TASK4 ##########################################
bank.dfPCA <- as.data.frame(pca1$x) # This is the output data from prcomp analysis
# split: .85, .8, .75, .7, .65, .6, .55, .5, .45
set.seed(201)
index.data2 <- sample(1:nrow(bank.dfPCA),round(0.45*nrow(bank.dfPCA)))
trainScaled2 <- bank.dfPCA[index.data2,] # 70%
testScaled2 <- bank.dfPCA[-index.data2,] # 30%
NN2 <- names(trainScaled2)
ff2 <- as.formula(paste("PC12 ~", paste(NN2[!NN2 %in% "PC12"], collapse = " + ")))
ff2
NeuralNet2 <- neuralnet(ff2,data=trainScaled2,hidden=c(3,2), threshold = 0.05, act.fct='logistic')
NN_Train_SSE2 <- sum((NeuralNet2$net.result[[1]] - trainScaled2[,12])^2)
MSE_train2 <- NN_Train_SSE2/nrow(trainScaled2)
MSE_train2 # Mean Squared Error 0.005128815563
nrow(trainScaled2) # 4250
listMSEtr2=c(0.005128815563, 0.005080296713, 0.005095116637, 0.00529211685, 0.004964216109, 0.005154666754, 0.005104140276, 0.005215575954, 0.004915072731) # train data
listTr2=c(4250, 4000, 3750, 3500, 3250, 3000, 2750, 2500, 2250) # train data
NeuralNetTest2a <- neuralnet(ff2,data=testScaled2,hidden=c(3,2), threshold = 0.05, act.fct='logistic')
NN_Test_SSE2a <- sum((NeuralNetTest2a$net.result[[1]]-testScaled2[,12])^2)
MSE_test2a<-NN_Test_SSE2a/nrow(testScaled2)
MSE_test2a
nrow(testScaled2)
listMSEte2=c( 0.004891277009, 0.004930405796, 0.00510313953, 0.004967961858, 0.005117855377, 0.0051708747, 0.004934932701, 0.005154444834, 0.005108289679) # test data
listTe2 = c(750, 1000, 1250, 1500, 1750, 2000, 2250, 2500, 2750) # test data
# plot for report
par(mfrow=c(2,1))
plot (listMSEte2~listTe2, type = "b", xlab = "Test data size", ylab = "Test Data MSE", main = "Learning curve for test dataset (Bank Data)",ylim=c(0.0046,0.0053))
plot (listMSEtr2~listTr2, type = "b", xlab = "Training data size", ylab = "Training Data MSE", main = "Learning curve for training dataset (Bank Data)",ylim=c(0.0046,0.0053))
par(mfrow=c(1,1))
##################### TASK5 ##########################################
dataKmeansBank <- as.data.frame(ClustersFinal1$centers) # This is the output data from kmeans with 8 clusters
# split: .85, .8, .75, .7, .65, .6, .55, .5, .45
set.seed(201)
index.data3 <- sample(1:nrow(dataKmeansBank),round(0.85*nrow(dataKmeansBank)))
trainScaled3 <- dataKmeansBank[index.data3,]
testScaled3 <- dataKmeansBank[-index.data3,]
NN3 <- names(trainScaled3)
ff3 <- as.formula(paste("PersonalLoan ~", paste(NN3[!NN3 %in% "PersonalLoan"], collapse = " + ")))
ff3
NeuralNet3 <- neuralnet(ff3,data=trainScaled3,hidden=c(3,2), threshold = 0.05, act.fct='logistic')
NN_Train_SSE3 <- sum((NeuralNet3$net.result[[1]] - trainScaled3[,12])^2)
MSE_train3 <- NN_Train_SSE3/nrow(trainScaled3)
MSE_train3 # Mean Squared Error 0.1423780342
nrow(trainScaled3) # 5
listMSEtr3=c(0.1423780342, 0.1423780342, 0.00651839908, 0.00651839908, 0.00651839908, 0.00651839908, 0.2052435674, 0.2052435674, 0.2052435674) # train data
listTr3=c(5, 5, 4, 4, 4, 4, 3, 3, 3) # train data
NeuralNetTest3 <- neuralnet(ff3,data=testScaled3,hidden=c(3,2), threshold = 0.05, act.fct='logistic')
NN_Test_SSE3 <- sum((NeuralNetTest3$net.result[[1]]-testScaled3[,12])^2)
MSE_test3<-NN_Test_SSE3/nrow(testScaled3)
MSE_test3
nrow(testScaled3)
listMSEte3=c(0.001485082381, 0.001485082381, 0.00002609464794, 0.00002609464794, 0.00002609464794, 0.00002609464794, 0.000008424406525, 0.000008424406525, 0.000008424406525) # test data
listTe3 = c(1, 1, 2, 2, 2, 2, 3, 3, 3)
# plot for report
par(mfrow=c(2,1))
plot (listMSEte3~listTe3, type = "b", xlab = "Test data cluster number", ylab = "Test Data MSE", main = "Learning curve for test dataset (Bank Data)",ylim=c(0,0.0018))
plot (listMSEtr3~listTr3, type = "b", xlab = "Training data cluster number", ylab = "Training Data MSE", main = "Learning curve for training dataset (Bank Data)",ylim=c(0,0.22))
par(mfrow=c(1,1))
################### End ##################
|
# @file Logging.R
#
# Copyright 2019 Observational Health Data Sciences and Informatics
#
# This file is part of OhdsiRTools
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
registerDefaultHandlers <- function() {
logBaseError <- function() {
logFatal(gsub("\n", " ", geterrmessage()))
}
options(error = logBaseError)
options(warning.expression = quote(for (i in 1:sys.nframe()) {
if (sys.call(-i)[[1]] == ".signalSimpleWarning" && length(sys.call(-i)) > 1) {
OhdsiRTools::logWarn(sys.call(-i)[[2]])
break
}
}))
}
getDefaultLoggerSettings <- function() {
return(list(loggers = list(createLogger())))
}
getLoggerSettings <- function() {
settings <- getOption("loggerSettings")
if (is.null(settings)) {
settings <- getDefaultLoggerSettings()
}
if (is.null(getOption("warning.expression"))) {
registerDefaultHandlers()
}
return(settings)
}
setLoggerSettings <- function(settings) {
options(loggerSettings = settings)
}
#' Create console appender
#'
#' @details
#' Creates an appender that will write to the console.
#'
#' Deprecated. This function has moved to ParallelLogger.
#'
#' @param layout The layout to be used by the appender.
#'
#' @export
createConsoleAppender <- function(layout = layoutSimple) {
.Deprecated("ParallelLogger::createConsoleAppender")
appendFunction <- function(this, level, message) {
if (level == "WARN" || level == "ERROR") {
writeLines(message, con = stderr())
} else if (level != "FATAL") {
# Note: Fatal messages should originate from stop(), which will print its own message.
writeLines(message, con = stdout())
}
}
appender <- list(appendFunction = appendFunction, layout = layout)
class(appender) <- "Appender"
return(appender)
}
#' Create file appender
#'
#' @details
#' Creates an appender that will write to a file.
#'
#' Deprecated. This function has moved to ParallelLogger.
#'
#' @param layout The layout to be used by the appender.
#' @param fileName The name of the file to write to.
#'
#' @export
createFileAppender <- function(layout = layoutParallel, fileName) {
.Deprecated("ParallelLogger::createFileAppender")
appendFunction <- function(this, level, message) {
con <- file(fileName, open = "at", blocking = FALSE)
writeLines(text = message, con = con)
flush(con)
close(con)
}
appender <- list(appendFunction = appendFunction, layout = layout, fileName = fileName)
class(appender) <- "Appender"
return(appender)
}
#' Create a logger
#'
#' @details
#' Creates a logger that will log messages to its appenders. The logger will only log messages at a
#' level equal to or higher than its threshold. For example, if the threshold is "INFO" then messages
#' marked "INFO" will be logged, but messages marked "TRACE" will not. The order of levels is "TRACE",
#' "DEBUG", "INFO", "WARN", "ERROR, "and FATAL".
#'
#' Deprecated. This function has moved to ParallelLogger.
#'
#' @param name A name for the logger.
#' @param threshold The threshold to be used for reporting.
#' @param appenders A list of one or more appenders as created for example using the
#' \code{\link{createConsoleAppender}} or \code{\link{createFileAppender}} function.
#'
#' @return
#' An object of type \code{Logger}, to be used with the \code{\link{registerLogger}} function.
#'
#' @export
createLogger <- function(name = "SIMPLE",
threshold = "INFO",
appenders = list(createConsoleAppender())) {
.Deprecated("ParallelLogger::createLogger")
for (appender in appenders) if (!is(appender, "Appender"))
stop("All appenders must be of class 'Appender'")
logFunction <- function(this, level, message) {
for (appender in this$appenders) {
formatted <- appender$layout(level, message)
appender$appendFunction(appender, level, formatted)
}
}
logger <- list(name = name,
logFunction = logFunction,
threshold = threshold,
appenders = appenders)
class(logger) <- "Logger"
return(logger)
}
#' Register a logger
#'
#' @details
#' Registers a logger as created using the \code{\link{createLogger}} function to the logging system.
#'
#' Deprecated. This function has moved to ParallelLogger.
#'
#' @param logger An object of type \code{Logger} as created using the \code{\link{createLogger}}
#' function.
#'
#' @export
registerLogger <- function(logger) {
.Deprecated("ParallelLogger::registerLogger")
if (!is(logger, "Logger"))
stop("Logger must be of class 'Logger'")
settings <- getLoggerSettings()
settings$loggers[[length(settings$loggers) + 1]] <- logger
setLoggerSettings(settings)
invisible(NULL)
}
#' Unregister a logger
#'
#' @details
#' Unregisters a logger from the logging system.
#'
#' Deprecated. This function has moved to ParallelLogger.
#'
#' @param x Can either be an integer (e.g. 2 to remove the second logger), the name of the logger, or
#' the logger object itself.
#'
#' @return
#' Returns TRUE if the logger was removed.
#'
#' @export
unregisterLogger <- function(x) {
.Deprecated("ParallelLogger::unregisterLogger")
settings <- getLoggerSettings()
if (is.integer(x) || is.numeric(x)) {
if (x <= length(settings$loggers)) {
settings$loggers[[x]] <- NULL
setLoggerSettings(settings)
return(TRUE)
} else {
warning("Could not find logger ", x)
return(FALSE)
}
} else if (is.character(x)) {
for (i in 1:length(settings$loggers)) {
if (settings$loggers[[i]]$name == x) {
settings$loggers[[i]] <- NULL
setLoggerSettings(settings)
return(TRUE)
}
}
warning("Could not find logger ", x)
return(FALSE)
} else if (is(x, "Logger")) {
for (i in 1:length(settings$loggers)) {
if (settings$loggers == x) {
settings$loggers[[i]] <- NULL
setLoggerSettings(settings)
return(TRUE)
}
}
}
warning("Could not find logger ", x)
return(FALSE)
}
#' Get all registered loggers
#'
#' @return
#' Returns all registered loggers.
#'
#' @export
getLoggers <- function() {
.Deprecated("ParallelLogger::getLoggers")
settings <- getLoggerSettings()
return(settings$loggers)
}
#' Remove all registered loggers
#'
#' @export
clearLoggers <- function() {
settings <- getLoggerSettings()
settings$loggers <- list()
setLoggerSettings(settings)
}
#' Add the default console logger
#'
#' @details
#' Creates a logger that writes to the console using the "INFO" threshold and the
#' \code{\link{layoutSimple}} layout.
#'
#' @export
addDefaultConsoleLogger <- function() {
registerLogger(createLogger())
}
#' Add the default file logger
#'
#' @details
#' Creates a logger that writes to a file using the "TRACE" threshold and the
#' \code{\link{layoutParallel}} layout. The output can be viewed with the built-in log viewer that can
#' be started using \code{\link{launchLogViewer}}.
#'
#' @param fileName The name of the file to write to.
#'
#' @export
addDefaultFileLogger <- function(fileName) {
.Deprecated("ParallelLogger::addDefaultFileLogger")
registerLogger(createLogger(name = "DEFAULT",
threshold = "TRACE",
appenders = list(createFileAppender(layout = layoutParallel,
fileName = fileName))))
}
levelToInt <- function(level) {
if (level == "TRACE")
return(1)
if (level == "DEBUG")
return(2)
if (level == "INFO")
return(3)
if (level == "WARN")
return(4)
if (level == "ERROR")
return(5)
if (level == "FATAL")
return(6)
}
log <- function(level, ...) {
.Deprecated("ParallelLogger::log")
message <- .makeMessage(...)
settings <- getLoggerSettings()
for (logger in settings$loggers) {
if (levelToInt(level) >= levelToInt(logger$threshold)) {
logger$logFunction(this = logger, level = level, message = message)
}
}
}
#' Log a message at the TRACE level
#'
#' @details
#' Log a message at the specified level. The message will be sent to all the registered loggers.
#'
#' @param ... Zero or more objects which can be coerced to character (and which are pasted together
#' with no separator).
#'
#' @export
logTrace <- function(...) {
log(level = "TRACE", ...)
}
#' Log a message at the DEBUG level
#'
#' @details
#' Log a message at the specified level. The message will be sent to all the registered loggers.
#'
#' @param ... Zero or more objects which can be coerced to character (and which are pasted together
#' with no separator).
#'
#' @export
logDebug <- function(...) {
log(level = "DEBUG", ...)
}
#' Log a message at the INFO level
#'
#' @details
#' Log a message at the specified level. The message will be sent to all the registered loggers.
#'
#' Deprecated. This function has moved to ParallelLogger.
#'
#' @param ... Zero or more objects which can be coerced to character (and which are pasted together
#' with no separator).
#'
#' @export
logInfo <- function(...) {
log(level = "INFO", ...)
}
#' Log a message at the WARN level
#'
#' @details
#' Log a message at the specified level. The message will be sent to all the registered loggers. This
#' function is automatically called when a warning is thrown, and should not be called directly. Use
#' \code{warning()} instead.
#'
#' @param ... Zero or more objects which can be coerced to character (and which are pasted together
#' with no separator).
#'
#' @export
logWarn <- function(...) {
log(level = "WARN", ...)
}
#' Log a message at the ERROR level
#'
#' @details
#' Log a message at the specified level. The message will be sent to all the registered loggers.
#'
#' @param ... Zero or more objects which can be coerced to character (and which are pasted together
#' with no separator).
#'
#' @export
logError <- function(...) {
log(level = "ERROR", ...)
}
#' Log a message at the FATAL level
#'
#' @details
#' Log a message at the specified level. The message will be sent to all the registered loggers. This
#' function is be automatically called when an error occurs, and should not be called directly. Use
#' \code{stop()} instead.
#'
#' @param ... Zero or more objects which can be coerced to character (and which are pasted together
#' with no separator).
#'
#' @export
logFatal <- function(...) {
log(level = "FATAL", ...)
}
#' Simple logging layout
#'
#' @description
#' A layout function to be used with an appender. This layout simply includes the message itself.
#'
#' @param level The level of the message (e.g. "INFO")
#' @param message The message to layout.
#'
#' @export
layoutSimple <- function(level, message) {
# Avoid check notes about non-used parameters:
if (level == "WARN") {
message <- paste("Warning:", message)
}
return(message)
}
#' Logging layout with timestamp
#'
#' @description
#' A layout function to be used with an appender. This layout adds the time to the message.
#'
#' @param level The level of the message (e.g. "INFO")
#' @param message The message to layout.
#'
#' @export
layoutTimestamp <- function(level, message) {
# Avoid check notes about non-used parameters:
missing(level)
time <- format(Sys.time(), "%Y-%m-%d %H:%M:%S")
sprintf("%s\t%s", time, message)
}
#' Logging layout for parallel computing
#'
#' @description
#' A layout function to be used with an appender. This layout adds the time, thread, level, package
#' name, and function name to the message.
#'
#' @param level The level of the message (e.g. "INFO")
#' @param message The message to layout.
#'
#' @export
layoutParallel <- function(level, message) {
time <- format(Sys.time(), "%Y-%m-%d %H:%M:%S")
threadNumber <- getOption("threadNumber")
if (is.null(threadNumber)) {
threadLabel <- "Main thread"
} else {
threadLabel <- paste("Thread", threadNumber)
}
functionName <- ""
packageName <- ""
if (sys.nframe() > 4) {
for (i in 4:sys.nframe()) {
packageName <- utils::packageName(env = sys.frame(-i))
if (length(packageName) != 0 && packageName != "base" && packageName != "snow" && packageName !=
"OhdsiRTools") {
if (class(sys.call(-i)[[1]]) == "function") {
# USing do.call without quotes means the function name is lost
functionName <- ""
} else {
functionName <- as.character(sys.call(-i)[[1]])
}
break
}
}
}
if (length(functionName) == 0) {
functionName <- ""
} else {
functionName <- functionName[length(functionName)]
}
if (is.null(packageName)) {
packageName <- ""
}
message <- gsub("\n", " ", message)
sprintf("%s\t[%s]\t%s\t%s\t%s\t%s", time, threadLabel, level, packageName, functionName, message)
}
#' Logging layout with stacktrace
#'
#' @description
#' A layout function to be used with an appender. This layout adds the strack trace to the message.
#'
#' @param level The level of the message (e.g. "INFO")
#' @param message The message to layout.
#'
#' @export
layoutStackTrace <- function(level, message) {
# Avoid check notes about non-used parameters:
missing(level)
time <- format(Sys.time(), "%Y-%m-%d %H:%M:%S")
stackTrace <- c()
nFrame <- -4
fun <- sys.call(nFrame)
while (!is.null(fun) && class(fun[[1]]) != "function") {
stackTrace <- c(stackTrace, as.character(fun[[1]]))
nFrame <- nFrame - 1
fun <- sys.call(nFrame)
}
stackTrace <- paste(rev(stackTrace), collapse = " - ")
sprintf("%s\t%s\t%s", time, stackTrace, message)
}
| /R/Logging.R | permissive | parkdongsu/OhdsiRTools | R | false | false | 14,214 | r | # @file Logging.R
#
# Copyright 2019 Observational Health Data Sciences and Informatics
#
# This file is part of OhdsiRTools
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
registerDefaultHandlers <- function() {
logBaseError <- function() {
logFatal(gsub("\n", " ", geterrmessage()))
}
options(error = logBaseError)
options(warning.expression = quote(for (i in 1:sys.nframe()) {
if (sys.call(-i)[[1]] == ".signalSimpleWarning" && length(sys.call(-i)) > 1) {
OhdsiRTools::logWarn(sys.call(-i)[[2]])
break
}
}))
}
getDefaultLoggerSettings <- function() {
return(list(loggers = list(createLogger())))
}
getLoggerSettings <- function() {
settings <- getOption("loggerSettings")
if (is.null(settings)) {
settings <- getDefaultLoggerSettings()
}
if (is.null(getOption("warning.expression"))) {
registerDefaultHandlers()
}
return(settings)
}
setLoggerSettings <- function(settings) {
options(loggerSettings = settings)
}
#' Create console appender
#'
#' @details
#' Creates an appender that will write to the console.
#'
#' Deprecated. This function has moved to ParallelLogger.
#'
#' @param layout The layout to be used by the appender.
#'
#' @export
createConsoleAppender <- function(layout = layoutSimple) {
.Deprecated("ParallelLogger::createConsoleAppender")
appendFunction <- function(this, level, message) {
if (level == "WARN" || level == "ERROR") {
writeLines(message, con = stderr())
} else if (level != "FATAL") {
# Note: Fatal messages should originate from stop(), which will print its own message.
writeLines(message, con = stdout())
}
}
appender <- list(appendFunction = appendFunction, layout = layout)
class(appender) <- "Appender"
return(appender)
}
#' Create file appender
#'
#' @details
#' Creates an appender that will write to a file.
#'
#' Deprecated. This function has moved to ParallelLogger.
#'
#' @param layout The layout to be used by the appender.
#' @param fileName The name of the file to write to.
#'
#' @export
createFileAppender <- function(layout = layoutParallel, fileName) {
.Deprecated("ParallelLogger::createFileAppender")
appendFunction <- function(this, level, message) {
con <- file(fileName, open = "at", blocking = FALSE)
writeLines(text = message, con = con)
flush(con)
close(con)
}
appender <- list(appendFunction = appendFunction, layout = layout, fileName = fileName)
class(appender) <- "Appender"
return(appender)
}
#' Create a logger
#'
#' @details
#' Creates a logger that will log messages to its appenders. The logger will only log messages at a
#' level equal to or higher than its threshold. For example, if the threshold is "INFO" then messages
#' marked "INFO" will be logged, but messages marked "TRACE" will not. The order of levels is "TRACE",
#' "DEBUG", "INFO", "WARN", "ERROR, "and FATAL".
#'
#' Deprecated. This function has moved to ParallelLogger.
#'
#' @param name A name for the logger.
#' @param threshold The threshold to be used for reporting.
#' @param appenders A list of one or more appenders as created for example using the
#' \code{\link{createConsoleAppender}} or \code{\link{createFileAppender}} function.
#'
#' @return
#' An object of type \code{Logger}, to be used with the \code{\link{registerLogger}} function.
#'
#' @export
createLogger <- function(name = "SIMPLE",
threshold = "INFO",
appenders = list(createConsoleAppender())) {
.Deprecated("ParallelLogger::createLogger")
for (appender in appenders) if (!is(appender, "Appender"))
stop("All appenders must be of class 'Appender'")
logFunction <- function(this, level, message) {
for (appender in this$appenders) {
formatted <- appender$layout(level, message)
appender$appendFunction(appender, level, formatted)
}
}
logger <- list(name = name,
logFunction = logFunction,
threshold = threshold,
appenders = appenders)
class(logger) <- "Logger"
return(logger)
}
#' Register a logger
#'
#' @details
#' Registers a logger as created using the \code{\link{createLogger}} function to the logging system.
#'
#' Deprecated. This function has moved to ParallelLogger.
#'
#' @param logger An object of type \code{Logger} as created using the \code{\link{createLogger}}
#' function.
#'
#' @export
registerLogger <- function(logger) {
.Deprecated("ParallelLogger::registerLogger")
if (!is(logger, "Logger"))
stop("Logger must be of class 'Logger'")
settings <- getLoggerSettings()
settings$loggers[[length(settings$loggers) + 1]] <- logger
setLoggerSettings(settings)
invisible(NULL)
}
#' Unregister a logger
#'
#' @details
#' Unregisters a logger from the logging system.
#'
#' Deprecated. This function has moved to ParallelLogger.
#'
#' @param x Can either be an integer (e.g. 2 to remove the second logger), the name of the logger, or
#' the logger object itself.
#'
#' @return
#' Returns TRUE if the logger was removed.
#'
#' @export
unregisterLogger <- function(x) {
.Deprecated("ParallelLogger::unregisterLogger")
settings <- getLoggerSettings()
if (is.integer(x) || is.numeric(x)) {
if (x <= length(settings$loggers)) {
settings$loggers[[x]] <- NULL
setLoggerSettings(settings)
return(TRUE)
} else {
warning("Could not find logger ", x)
return(FALSE)
}
} else if (is.character(x)) {
for (i in 1:length(settings$loggers)) {
if (settings$loggers[[i]]$name == x) {
settings$loggers[[i]] <- NULL
setLoggerSettings(settings)
return(TRUE)
}
}
warning("Could not find logger ", x)
return(FALSE)
} else if (is(x, "Logger")) {
for (i in 1:length(settings$loggers)) {
if (settings$loggers == x) {
settings$loggers[[i]] <- NULL
setLoggerSettings(settings)
return(TRUE)
}
}
}
warning("Could not find logger ", x)
return(FALSE)
}
#' Get all registered loggers
#'
#' @return
#' Returns all registered loggers.
#'
#' @export
getLoggers <- function() {
.Deprecated("ParallelLogger::getLoggers")
settings <- getLoggerSettings()
return(settings$loggers)
}
#' Remove all registered loggers
#'
#' @export
clearLoggers <- function() {
settings <- getLoggerSettings()
settings$loggers <- list()
setLoggerSettings(settings)
}
#' Add the default console logger
#'
#' @details
#' Creates a logger that writes to the console using the "INFO" threshold and the
#' \code{\link{layoutSimple}} layout.
#'
#' @export
addDefaultConsoleLogger <- function() {
registerLogger(createLogger())
}
#' Add the default file logger
#'
#' @details
#' Creates a logger that writes to a file using the "TRACE" threshold and the
#' \code{\link{layoutParallel}} layout. The output can be viewed with the built-in log viewer that can
#' be started using \code{\link{launchLogViewer}}.
#'
#' @param fileName The name of the file to write to.
#'
#' @export
addDefaultFileLogger <- function(fileName) {
.Deprecated("ParallelLogger::addDefaultFileLogger")
registerLogger(createLogger(name = "DEFAULT",
threshold = "TRACE",
appenders = list(createFileAppender(layout = layoutParallel,
fileName = fileName))))
}
levelToInt <- function(level) {
if (level == "TRACE")
return(1)
if (level == "DEBUG")
return(2)
if (level == "INFO")
return(3)
if (level == "WARN")
return(4)
if (level == "ERROR")
return(5)
if (level == "FATAL")
return(6)
}
log <- function(level, ...) {
.Deprecated("ParallelLogger::log")
message <- .makeMessage(...)
settings <- getLoggerSettings()
for (logger in settings$loggers) {
if (levelToInt(level) >= levelToInt(logger$threshold)) {
logger$logFunction(this = logger, level = level, message = message)
}
}
}
#' Log a message at the TRACE level
#'
#' @details
#' Log a message at the specified level. The message will be sent to all the registered loggers.
#'
#' @param ... Zero or more objects which can be coerced to character (and which are pasted together
#' with no separator).
#'
#' @export
logTrace <- function(...) {
log(level = "TRACE", ...)
}
#' Log a message at the DEBUG level
#'
#' @details
#' Log a message at the specified level. The message will be sent to all the registered loggers.
#'
#' @param ... Zero or more objects which can be coerced to character (and which are pasted together
#' with no separator).
#'
#' @export
logDebug <- function(...) {
log(level = "DEBUG", ...)
}
#' Log a message at the INFO level
#'
#' @details
#' Log a message at the specified level. The message will be sent to all the registered loggers.
#'
#' Deprecated. This function has moved to ParallelLogger.
#'
#' @param ... Zero or more objects which can be coerced to character (and which are pasted together
#' with no separator).
#'
#' @export
logInfo <- function(...) {
log(level = "INFO", ...)
}
#' Log a message at the WARN level
#'
#' @details
#' Log a message at the specified level. The message will be sent to all the registered loggers. This
#' function is automatically called when a warning is thrown, and should not be called directly. Use
#' \code{warning()} instead.
#'
#' @param ... Zero or more objects which can be coerced to character (and which are pasted together
#' with no separator).
#'
#' @export
logWarn <- function(...) {
log(level = "WARN", ...)
}
#' Log a message at the ERROR level
#'
#' @details
#' Log a message at the specified level. The message will be sent to all the registered loggers.
#'
#' @param ... Zero or more objects which can be coerced to character (and which are pasted together
#' with no separator).
#'
#' @export
logError <- function(...) {
log(level = "ERROR", ...)
}
#' Log a message at the FATAL level
#'
#' @details
#' Log a message at the specified level. The message will be sent to all the registered loggers. This
#' function is be automatically called when an error occurs, and should not be called directly. Use
#' \code{stop()} instead.
#'
#' @param ... Zero or more objects which can be coerced to character (and which are pasted together
#' with no separator).
#'
#' @export
logFatal <- function(...) {
log(level = "FATAL", ...)
}
#' Simple logging layout
#'
#' @description
#' A layout function to be used with an appender. This layout simply includes the message itself.
#'
#' @param level The level of the message (e.g. "INFO")
#' @param message The message to layout.
#'
#' @export
layoutSimple <- function(level, message) {
# Avoid check notes about non-used parameters:
if (level == "WARN") {
message <- paste("Warning:", message)
}
return(message)
}
#' Logging layout with timestamp
#'
#' @description
#' A layout function to be used with an appender. This layout adds the time to the message.
#'
#' @param level The level of the message (e.g. "INFO")
#' @param message The message to layout.
#'
#' @export
layoutTimestamp <- function(level, message) {
# Avoid check notes about non-used parameters:
missing(level)
time <- format(Sys.time(), "%Y-%m-%d %H:%M:%S")
sprintf("%s\t%s", time, message)
}
#' Logging layout for parallel computing
#'
#' @description
#' A layout function to be used with an appender. This layout adds the time, thread, level, package
#' name, and function name to the message.
#'
#' @param level The level of the message (e.g. "INFO")
#' @param message The message to layout.
#'
#' @export
layoutParallel <- function(level, message) {
time <- format(Sys.time(), "%Y-%m-%d %H:%M:%S")
threadNumber <- getOption("threadNumber")
if (is.null(threadNumber)) {
threadLabel <- "Main thread"
} else {
threadLabel <- paste("Thread", threadNumber)
}
functionName <- ""
packageName <- ""
if (sys.nframe() > 4) {
for (i in 4:sys.nframe()) {
packageName <- utils::packageName(env = sys.frame(-i))
if (length(packageName) != 0 && packageName != "base" && packageName != "snow" && packageName !=
"OhdsiRTools") {
if (class(sys.call(-i)[[1]]) == "function") {
# USing do.call without quotes means the function name is lost
functionName <- ""
} else {
functionName <- as.character(sys.call(-i)[[1]])
}
break
}
}
}
if (length(functionName) == 0) {
functionName <- ""
} else {
functionName <- functionName[length(functionName)]
}
if (is.null(packageName)) {
packageName <- ""
}
message <- gsub("\n", " ", message)
sprintf("%s\t[%s]\t%s\t%s\t%s\t%s", time, threadLabel, level, packageName, functionName, message)
}
#' Logging layout with stacktrace
#'
#' @description
#' A layout function to be used with an appender. This layout adds the strack trace to the message.
#'
#' @param level The level of the message (e.g. "INFO")
#' @param message The message to layout.
#'
#' @export
layoutStackTrace <- function(level, message) {
# Avoid check notes about non-used parameters:
missing(level)
time <- format(Sys.time(), "%Y-%m-%d %H:%M:%S")
stackTrace <- c()
nFrame <- -4
fun <- sys.call(nFrame)
while (!is.null(fun) && class(fun[[1]]) != "function") {
stackTrace <- c(stackTrace, as.character(fun[[1]]))
nFrame <- nFrame - 1
fun <- sys.call(nFrame)
}
stackTrace <- paste(rev(stackTrace), collapse = " - ")
sprintf("%s\t%s\t%s", time, stackTrace, message)
}
|
# Jake Yeung
# Date of Creation: 2018-09-06
# File: ~/projects/sleep_deprivation/scripts/functions/ChangeMdlNames.R
# Change model names as a function so we can change things during plotting in a global way
MdlKeys <- function(){
keys <- c("flat", "sleep", "circadian", "ampfree.step", "mix", "mixedaf")
return(keys)
}
MdlVals <- function(){
# vals <- c("F", "S", "C", "A", "S+C", "S+A")
vals <- c("F", "S", "C_A", "A", "S+C", "S+C_A")
return(vals)
}
MdlHash <- function(){
# mdl.hash <- hash(list("flat" = "F",
# "sleep" = "S",
# "circadian" = "C",
# "ampfree.step" = "A",
# "mix" = "S+C",
# "mixedaf" = "S+A"))
mdl.hash <- hash(MdlKeys(),
MdlVals())
return(mdl.hash)
}
ConvertMdlNames <- function(mdl.old, mdl.hash, jlevels){
if (missing(mdl.hash)){
mdl.hash <- MdlHash()
}
if (missing(jlevels)){
jlevels <- MdlVals()
}
mdl.old <- as.character(mdl.old)
mdl.new <- sapply(mdl.old, function(x){
xnew <- mdl.hash[[x]]
if (is.null(xnew)){
warning(paste("Cannot convert", x, "using hash", mdl.hash, "defaulting NA"))
xnew <- NA
}
return(xnew)
})
# set as factor with levels
mdl.new <- factor(mdl.new, levels = jlevels)
return(mdl.new)
}
| /R/MdlNames.R | no_license | jakeyeung/SleepDepAnalysis | R | false | false | 1,329 | r | # Jake Yeung
# Date of Creation: 2018-09-06
# File: ~/projects/sleep_deprivation/scripts/functions/ChangeMdlNames.R
# Change model names as a function so we can change things during plotting in a global way
MdlKeys <- function(){
keys <- c("flat", "sleep", "circadian", "ampfree.step", "mix", "mixedaf")
return(keys)
}
MdlVals <- function(){
# vals <- c("F", "S", "C", "A", "S+C", "S+A")
vals <- c("F", "S", "C_A", "A", "S+C", "S+C_A")
return(vals)
}
MdlHash <- function(){
# mdl.hash <- hash(list("flat" = "F",
# "sleep" = "S",
# "circadian" = "C",
# "ampfree.step" = "A",
# "mix" = "S+C",
# "mixedaf" = "S+A"))
mdl.hash <- hash(MdlKeys(),
MdlVals())
return(mdl.hash)
}
ConvertMdlNames <- function(mdl.old, mdl.hash, jlevels){
if (missing(mdl.hash)){
mdl.hash <- MdlHash()
}
if (missing(jlevels)){
jlevels <- MdlVals()
}
mdl.old <- as.character(mdl.old)
mdl.new <- sapply(mdl.old, function(x){
xnew <- mdl.hash[[x]]
if (is.null(xnew)){
warning(paste("Cannot convert", x, "using hash", mdl.hash, "defaulting NA"))
xnew <- NA
}
return(xnew)
})
# set as factor with levels
mdl.new <- factor(mdl.new, levels = jlevels)
return(mdl.new)
}
|
\name{summary.mlist}
\alias{summary.mlist}
\title{ mist Summaries}
\description{ Generates a summary for the data.frame generated by as.data.frame.mlist }
\usage{\method{summary}{mlist}(object,...)}
\arguments{
\item{...}{ further arguments}
\item{object}{ mlist}
}
\author{Witold Wolski \email{wolski@molgen.mpg.de}}
\seealso{\code{\link{summary.massvector}},
\code{\link{as.data.frame.mlist}},
}
\examples{
data(mvl)
summary(mvl)
data(mvl)
mvl<-mvl[1:100]
data(cal)
test <- getintcalib(mvl,cal,error=500)
summary(test)
}
\keyword{misc}
| /man/summary.mlist.Rd | no_license | cran/mscalib | R | false | false | 569 | rd | \name{summary.mlist}
\alias{summary.mlist}
\title{ mist Summaries}
\description{ Generates a summary for the data.frame generated by as.data.frame.mlist }
\usage{\method{summary}{mlist}(object,...)}
\arguments{
\item{...}{ further arguments}
\item{object}{ mlist}
}
\author{Witold Wolski \email{wolski@molgen.mpg.de}}
\seealso{\code{\link{summary.massvector}},
\code{\link{as.data.frame.mlist}},
}
\examples{
data(mvl)
summary(mvl)
data(mvl)
mvl<-mvl[1:100]
data(cal)
test <- getintcalib(mvl,cal,error=500)
summary(test)
}
\keyword{misc}
|
#####################
#PREPARATION
#####################
# Packages
setwd("~/Desktop")
if(!require("dplyr")){install.packages("dplyr")} #Verifies whether the package is installed, if not then it installs it
library("dplyr", lib.loc="/Library/Frameworks/R.framework/Versions/3.4/Resources/library") #Loads the package
library("e1071")
data_new<- read.csv("~/Desktop/New Example data file.csv")
#Data pre-processing
data_svm = data_new
data_svm = data.frame(scale(data_svm[,-c(1:2)],scale = T, center = T) )
data_svm[,ncol(data_svm)+1] = rep_len(1:12,nrow(data_svm))
colnames(data_svm) = c(colnames(data_svm[,-ncol(data_svm)]),"#month")
testing = data_svm[c(193:240),]
training = data_svm[-c(193:240),]
training = na.omit(training)
# Model Estimation - Radial Kernel
tune_svm = tune(svm, Dependent~., data = training, kernel = "radial",ranges = list(cost = 1:20, gamma = 2^c(-6:2),epsilon = 10^c(-3:-1))) #It fits several models and estimates by CV the MSE. It chooses the best model.
summary_training(tune_svm)
svm_fit = svm(Dependent~., data = training, kernel = "radial",gamma=tune_svm$best.parameters$gamma,cost = tune_svm$best.parameters$cost, epsilon= tune_svm$best.parameters$epsilon)
y_predict = predict(svm_fit, newdata = testing[,-1])
#####################
#GRADIENT ESTIMATION
#####################
#The gradient of the model trained above will be estimated separately for each point provided.
#The derivatives are estimated using numerical methods given that we do not have an analytical expression for them
#The method used hereby is the fourth-order method for the first derivative, namely
# df(x)/dx = 1/12/h*[-f(x+2h)+8f(x+h)-8f(x-h)+f(x-2h)]
# Where h is the size of the step to estimate the derivative
gradient = function(point){
y = point
x_aux = as.data.frame(matrix(data=0*1:(4*length(y)*length(y)),nrow = length(y)*4, ncol = length(y)))
colnames(x_aux) = colnames(y)
for (i in 1:nrow(x_aux)){
x_aux[i,1:ncol(x_aux)]= y
}
coefh = c(2,1,-1,-2)
for (i in 1:ncol(x_aux)){
h = abs(0.05*y[i])
for (j in 1:4){
x_aux[(i-1)*4+j,i] = x_aux[(i-1)*4+j,i]+coefh[j]*h
}
}
x_aux$t_id = point[,1]
y_predict = predict(svm_fit, newdata = x_aux)
f_aux = as.data.frame(matrix(data=y_predict,nrow = ncol(y), ncol = 4,byrow = T))
rownames(f_aux) = colnames(y)
colnames(f_aux) = c("2h","h","-h","-2h")
h = abs(0.05*y)
elasticity = 1/12/h*(-f_aux$`2h`+8*f_aux$h-8*f_aux$`-h`+f_aux$`-2h`)
return (elasticity)
}
#####################
#SUMMARY TABLES
#####################
#Summary Table Version 1: Just for forecast
#Estimating elasticity for forecast
y = testing[,-1]
elasticity_matrix = as.data.frame(matrix(0,nrow = nrow(y),ncol = (ncol(y))))
colnames(elasticity_matrix) = colnames(y)
for (i in 1:nrow(y)){
elasticity_matrix[i,]=gradient(y[i,])
}
elasticity_matrix
#summary_testing Table
ny = nrow(testing)/12 #Number of years in the summary_testing, i.e. number of rows
summary_testing = matrix(0*1:(ny*(4*ncol(testing[,-1])+5)), nrow = ny, ncol = 4*ncol(testing[,-1])+5)
colnames(summary_testing) = c("Rows","Year","y","Annual change dependent","Percentual change dependent (%)",paste("Annual Change",names(testing[,-1])),paste("Elasticity",names(testing[,-1])),paste("Contribution",names(testing[,-1])),paste("Percentual Contribution",names(testing[,-1]),"%"))
summary_testing = as.data.frame(summary_testing)
for (i in 1:ny){
summary_testing[i,1] = paste("Row",1+12*(i-1),"to",12*i,sep = " ")
summary_testing[i,2] = paste("Year",i,sep = " ")
summary_testing[i,3] = y_predict[1+12*(i-1)] #Value of dependent at time t
summary_testing[i,4] = y_predict[12*i]-y_predict[1+12*(i-1)] #Difference of dependent at time t+12 and at time t
summary_testing[i,5] = summary_testing[i,4]/summary_testing[i,3]*100 #Percentual change of dependent
summary_testing[i,6:12] = as.numeric(testing[12*i,-1]-testing[1+12*(i-1),-1]) #Difference of independents at time t+12 and at time t
summary_testing[i,13:19] = as.numeric(elasticity_matrix[1+12*(i-1),]) #Elasticities of independents at time t
summary_testing[i,20:26] = summary_testing[i,6:12]*summary_testing[i,13:19] #Total differentials = difference * elasticities
summary_testing[i,27:33] = summary_testing[i,20:26]/summary_testing[i,3]*100 #Change as percent of dependent
}
write.csv2(x = summary_testing[,c(1:2,5,27:33)],file = "summary_testing.csv")
#summary Table Version 2: Just for training points
#Estimating elasticity for trainig points
y = data_svm[,-1]
elasticity_matrix = as.data.frame(matrix(0,nrow = nrow(y),ncol = (ncol(y))))
colnames(elasticity_matrix) = colnames(y)
for (i in 1:nrow(y)){
elasticity_matrix[i,]=gradient(y[i,])
}
elasticity_matrix
#summary_training Table Training
ny = nrow(training)/12 #Number of years in the summary_training, i.e. number of rows
summary_training = matrix(0*1:(ny*(4*ncol(testing[,-1])+5)), nrow = ny, ncol = 4*ncol(testing[,-1])+5)
colnames(summary_training) = c("Rows","Year","y","Annual change dependent","Percentual change dependent (%)",paste("Annual Change",names(testing[,-1])),paste("Elasticity",names(testing[,-1])),paste("Contribution",names(testing[,-1])),paste("Percentual Contribution",names(testing[,-1]),"%"))
summary_training = as.data.frame(summary_training)
for (i in 1:ny){
summary_training[i,1] = paste("Row",1+12*(i-1),"to",12*i,sep = " ")
summary_training[i,2] = paste("Year",i,sep = " ")
summary_training[i,3] = training[1+12*(i-1),1] #Value of dependent at time t
summary_training[i,4] = training[12*i,1]-training[1+12*(i-1),1] #Difference of dependent at time t+12 and at time t
summary_training[i,5] = summary_training[i,4]/summary_training[i,3]*100 #Percentual change of dependent
summary_training[i,6:12] = as.numeric(training[12*i,-1]-training[1+12*(i-1),-1]) #Difference of independents at time t+12 and at time t
summary_training[i,13:19] = as.numeric(elasticity_matrix[1+12*(i-1),]) #Elasticities of independents at time t
summary_training[i,20:26] = summary_training[i,6:12]*summary_training[i,13:19] #Total differentials = difference * elasticities
summary_training[i,27:33] = summary_training[i,20:26]/summary_training[i,3]*100 #Change as percent of dependent
}
write.csv2(x = summary_training[,c(1:2,5,27:33)],file = "summary_training.csv")
#summary Table Version 3: For all points
final_summary = rbind(summary_testing, summary_training)
write.csv2(x = final_summary[,c(1:2,5,27:33)],file = "final_summary.csv")
| /Restaurant data/Elasticity.R | no_license | alex-papaioannou/Sample-Code-DSSG | R | false | false | 6,508 | r | #####################
#PREPARATION
#####################
# Packages
setwd("~/Desktop")
if(!require("dplyr")){install.packages("dplyr")} #Verifies whether the package is installed, if not then it installs it
library("dplyr", lib.loc="/Library/Frameworks/R.framework/Versions/3.4/Resources/library") #Loads the package
library("e1071")
data_new<- read.csv("~/Desktop/New Example data file.csv")
#Data pre-processing
data_svm = data_new
data_svm = data.frame(scale(data_svm[,-c(1:2)],scale = T, center = T) )
data_svm[,ncol(data_svm)+1] = rep_len(1:12,nrow(data_svm))
colnames(data_svm) = c(colnames(data_svm[,-ncol(data_svm)]),"#month")
testing = data_svm[c(193:240),]
training = data_svm[-c(193:240),]
training = na.omit(training)
# Model Estimation - Radial Kernel
tune_svm = tune(svm, Dependent~., data = training, kernel = "radial",ranges = list(cost = 1:20, gamma = 2^c(-6:2),epsilon = 10^c(-3:-1))) #It fits several models and estimates by CV the MSE. It chooses the best model.
summary_training(tune_svm)
svm_fit = svm(Dependent~., data = training, kernel = "radial",gamma=tune_svm$best.parameters$gamma,cost = tune_svm$best.parameters$cost, epsilon= tune_svm$best.parameters$epsilon)
y_predict = predict(svm_fit, newdata = testing[,-1])
#####################
#GRADIENT ESTIMATION
#####################
#The gradient of the model trained above will be estimated separately for each point provided.
#The derivatives are estimated using numerical methods given that we do not have an analytical expression for them
#The method used hereby is the fourth-order method for the first derivative, namely
# df(x)/dx = 1/12/h*[-f(x+2h)+8f(x+h)-8f(x-h)+f(x-2h)]
# Where h is the size of the step to estimate the derivative
gradient = function(point){
y = point
x_aux = as.data.frame(matrix(data=0*1:(4*length(y)*length(y)),nrow = length(y)*4, ncol = length(y)))
colnames(x_aux) = colnames(y)
for (i in 1:nrow(x_aux)){
x_aux[i,1:ncol(x_aux)]= y
}
coefh = c(2,1,-1,-2)
for (i in 1:ncol(x_aux)){
h = abs(0.05*y[i])
for (j in 1:4){
x_aux[(i-1)*4+j,i] = x_aux[(i-1)*4+j,i]+coefh[j]*h
}
}
x_aux$t_id = point[,1]
y_predict = predict(svm_fit, newdata = x_aux)
f_aux = as.data.frame(matrix(data=y_predict,nrow = ncol(y), ncol = 4,byrow = T))
rownames(f_aux) = colnames(y)
colnames(f_aux) = c("2h","h","-h","-2h")
h = abs(0.05*y)
elasticity = 1/12/h*(-f_aux$`2h`+8*f_aux$h-8*f_aux$`-h`+f_aux$`-2h`)
return (elasticity)
}
#####################
#SUMMARY TABLES
#####################
#Summary Table Version 1: Just for forecast
#Estimating elasticity for forecast
y = testing[,-1]
elasticity_matrix = as.data.frame(matrix(0,nrow = nrow(y),ncol = (ncol(y))))
colnames(elasticity_matrix) = colnames(y)
for (i in 1:nrow(y)){
elasticity_matrix[i,]=gradient(y[i,])
}
elasticity_matrix
#summary_testing Table
ny = nrow(testing)/12 #Number of years in the summary_testing, i.e. number of rows
summary_testing = matrix(0*1:(ny*(4*ncol(testing[,-1])+5)), nrow = ny, ncol = 4*ncol(testing[,-1])+5)
colnames(summary_testing) = c("Rows","Year","y","Annual change dependent","Percentual change dependent (%)",paste("Annual Change",names(testing[,-1])),paste("Elasticity",names(testing[,-1])),paste("Contribution",names(testing[,-1])),paste("Percentual Contribution",names(testing[,-1]),"%"))
summary_testing = as.data.frame(summary_testing)
for (i in 1:ny){
summary_testing[i,1] = paste("Row",1+12*(i-1),"to",12*i,sep = " ")
summary_testing[i,2] = paste("Year",i,sep = " ")
summary_testing[i,3] = y_predict[1+12*(i-1)] #Value of dependent at time t
summary_testing[i,4] = y_predict[12*i]-y_predict[1+12*(i-1)] #Difference of dependent at time t+12 and at time t
summary_testing[i,5] = summary_testing[i,4]/summary_testing[i,3]*100 #Percentual change of dependent
summary_testing[i,6:12] = as.numeric(testing[12*i,-1]-testing[1+12*(i-1),-1]) #Difference of independents at time t+12 and at time t
summary_testing[i,13:19] = as.numeric(elasticity_matrix[1+12*(i-1),]) #Elasticities of independents at time t
summary_testing[i,20:26] = summary_testing[i,6:12]*summary_testing[i,13:19] #Total differentials = difference * elasticities
summary_testing[i,27:33] = summary_testing[i,20:26]/summary_testing[i,3]*100 #Change as percent of dependent
}
write.csv2(x = summary_testing[,c(1:2,5,27:33)],file = "summary_testing.csv")
#summary Table Version 2: Just for training points
#Estimating elasticity for trainig points
y = data_svm[,-1]
elasticity_matrix = as.data.frame(matrix(0,nrow = nrow(y),ncol = (ncol(y))))
colnames(elasticity_matrix) = colnames(y)
for (i in 1:nrow(y)){
elasticity_matrix[i,]=gradient(y[i,])
}
elasticity_matrix
#summary_training Table Training
ny = nrow(training)/12 #Number of years in the summary_training, i.e. number of rows
summary_training = matrix(0*1:(ny*(4*ncol(testing[,-1])+5)), nrow = ny, ncol = 4*ncol(testing[,-1])+5)
colnames(summary_training) = c("Rows","Year","y","Annual change dependent","Percentual change dependent (%)",paste("Annual Change",names(testing[,-1])),paste("Elasticity",names(testing[,-1])),paste("Contribution",names(testing[,-1])),paste("Percentual Contribution",names(testing[,-1]),"%"))
summary_training = as.data.frame(summary_training)
for (i in 1:ny){
summary_training[i,1] = paste("Row",1+12*(i-1),"to",12*i,sep = " ")
summary_training[i,2] = paste("Year",i,sep = " ")
summary_training[i,3] = training[1+12*(i-1),1] #Value of dependent at time t
summary_training[i,4] = training[12*i,1]-training[1+12*(i-1),1] #Difference of dependent at time t+12 and at time t
summary_training[i,5] = summary_training[i,4]/summary_training[i,3]*100 #Percentual change of dependent
summary_training[i,6:12] = as.numeric(training[12*i,-1]-training[1+12*(i-1),-1]) #Difference of independents at time t+12 and at time t
summary_training[i,13:19] = as.numeric(elasticity_matrix[1+12*(i-1),]) #Elasticities of independents at time t
summary_training[i,20:26] = summary_training[i,6:12]*summary_training[i,13:19] #Total differentials = difference * elasticities
summary_training[i,27:33] = summary_training[i,20:26]/summary_training[i,3]*100 #Change as percent of dependent
}
write.csv2(x = summary_training[,c(1:2,5,27:33)],file = "summary_training.csv")
#summary Table Version 3: For all points
final_summary = rbind(summary_testing, summary_training)
write.csv2(x = final_summary[,c(1:2,5,27:33)],file = "final_summary.csv")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PIAssetDatabase.r
\name{PIAssetDatabase}
\alias{PIAssetDatabase}
\title{Generate an instance of the PIAssetDatabase PI Web API class}
\usage{
PIAssetDatabase(webId = NULL, id = NULL, name = NULL,
description = NULL, path = NULL, extendedProperties = NULL,
links = NULL, webException = NULL)
}
\arguments{
\item{webId}{(string)}
\item{id}{(string)}
\item{name}{(string)}
\item{description}{(string)}
\item{path}{(string)}
\item{extendedProperties}{(object)}
\item{links}{()}
\item{webException}{()}
}
\value{
PIAssetDatabase
}
\description{
Generate an instance of the PIAssetDatabase PI Web API class
}
\examples{
assetDatabase <- PIAssetDatabase(webId =
"I1RDDqD5loBNH0erqeqJodtALAquulo6433EKdHra7fsmL0g", id =
"a3a5ebaa-37ae-42dc-9d1e-b6bb7ec98bd2", name = "MyDatabase", description = "PI BI Project
Asset Model", path = "\\\\\\\\MyAssetServer\\\\MyDatabase")
}
| /man/PIAssetDatabase.Rd | permissive | eddyrene/PI-Web-API-Client-R | R | false | true | 1,031 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PIAssetDatabase.r
\name{PIAssetDatabase}
\alias{PIAssetDatabase}
\title{Generate an instance of the PIAssetDatabase PI Web API class}
\usage{
PIAssetDatabase(webId = NULL, id = NULL, name = NULL,
description = NULL, path = NULL, extendedProperties = NULL,
links = NULL, webException = NULL)
}
\arguments{
\item{webId}{(string)}
\item{id}{(string)}
\item{name}{(string)}
\item{description}{(string)}
\item{path}{(string)}
\item{extendedProperties}{(object)}
\item{links}{()}
\item{webException}{()}
}
\value{
PIAssetDatabase
}
\description{
Generate an instance of the PIAssetDatabase PI Web API class
}
\examples{
assetDatabase <- PIAssetDatabase(webId =
"I1RDDqD5loBNH0erqeqJodtALAquulo6433EKdHra7fsmL0g", id =
"a3a5ebaa-37ae-42dc-9d1e-b6bb7ec98bd2", name = "MyDatabase", description = "PI BI Project
Asset Model", path = "\\\\\\\\MyAssetServer\\\\MyDatabase")
}
|
library(ggplot2)
library(grid)
library(gtable)
library(ggthemes)
library(gridExtra)
library(mgcv)
library(data.table)
##check for collinearity and correlation, this only applies to the explanatory variables!
source("vif.R")
source ("AED.R")
#Bin A
BinA= subset (count, Bin=='A')
expA=as.data.frame(data.table(cbind(treatment=BinA$treatment, T=BinA$T, A=BinA$A, ID=BinA$reptreat)))
cor(expA, method = "spearman")
vif_func(in_frame=expA,thresh=5,trace=T)
corvif(expA)
pairs(expA, lower.panel = panel.smooth2, upper.panel = panel.cor, diag.panel = panel.hist)
#boxplots
boxplot(CellsN~treatment, data=BinA)
boxplot(CellsN~reptreat, data=BinA)
boxplot (CellsN~Tn, data=BinA)
BA <- gamm (CellsN~s(Tn, by=treatment), method="REML", data = BinA)
BA1 <- gamm (CellsN~s(Tn, by=treatment, bs="cr", k=4), method="REML", data = BinA)
BA2 <- gamm (CellsN~s(Tn, by=treatment, bs="cs", k=4), method="REML", data = BinA) #best
BA3 <- gamm (CellsN~te(Tn, by=treatment), method="REML", data = BinA)
fBin <- CellsN~s(Tn, by=treatment, bs="cs", k=4)
BA4 <- gamm (fBin, method="REML", random=list(reptreat=~1), data = BinA) #same with BA2
BA5 <- gamm (fBin, method="REML", correlation= corAR1 (form=~1|treatment/reptreat), data = BinA)
BA6 <- gamm (fBin, method="REML", correlation= corAR1 (form=~1|treatment/reptreat), weights = varIdent(form=~1| reptreat),
data = BinA) #best
BA7 <- gamm (fBin, method="REML", correlation= corAR1 (form=~1|treatment/reptreat), random=list(reptreat=~1),
data = BinA)
anova(BA$lme, BA1$lme, BA2$lme, BA3$lme, BA4$lme, BA5$lme, BA6$lme, BA7$lme)
A <- gam (CellsN~s(Tn, by=treatment), method="REML", data = BinA)
A1 <- gam (CellsN~s(Tn, by=treatment, bs="cr", k=4), method="REML", data = BinA)
A2 <- gam (CellsN~s(Tn, by=treatment, bs="cs", k=4), method="REML", data = BinA)
A3 <- gam (CellsN~te(Tn, by=treatment), method="REML", data = BinA)
A4 <- gam (CellsN~s(Tn, by=treatment, bs="cs", k=4) + s(reptreat, bs="re"), method="REML", data = BinA)
AIC (A, A1, A2, A3, A4)
#BinB
BinB= subset (count, Bin=='B')
expB=as.data.frame(data.table(cbind(treatment=BinB$treatment, T=BinB$T, A=BinB$A, ID=BinB$reptreat)))
cor(expB, method = "spearman")
vif_func(in_frame=expB,thresh=5,trace=T)
corvif(expB)
pairs(expB, lower.panel = panel.smooth2, upper.panel = panel.cor, diag.panel = panel.hist)
#boxplots
boxplot(CellsN~treatment, data=BinB)
boxplot(CellsN~reptreat, data=BinB)
boxplot (CellsN~Tn, data=BinB)
BB <- gamm (CellsN~s(Tn, by=treatment), method="REML", data = BinB)
BB1 <- gamm (CellsN~s(Tn, by=treatment, bs="cr", k=4), method="REML", data = BinB)
BB2 <- gamm (CellsN~s(Tn, by=treatment, bs="cs", k=4), method="REML", data = BinB) #best
BB3 <- gamm (CellsN~te(Tn, by=treatment), method="REML", data = BinB)
fBin <- CellsN~s(Tn, by=treatment, bs="cs", k=4)
BB4 <- gamm (fBin, method="REML", random=list(reptreat=~1), data = BinB) #same with BB2
BB5 <- gamm (fBin, method="REML", correlation= corAR1 (form=~1|treatment/reptreat), data = BinB)
BB6 <- gamm (fBin, method="REML", correlation= corAR1 (form=~1|treatment/reptreat), weights = varIdent(form=~1| reptreat),
data = BinB) #best
BB7 <- gamm (fBin, method="REML", correlation= corAR1 (form=~1|treatment/reptreat), random=list(reptreat=~1),
data = BinB)
anova(BB$lme, BB1$lme, BB2$lme, BB3$lme, BB4$lme, BB5$lme, BB6$lme, BB7$lme)
B <- gam (CellsN~s(Tn, by=treatment), method="REML", data = BinB)
B1 <- gam (CellsN~s(Tn, by=treatment, bs="cr", k=4), method="REML", data = BinB)
B2 <- gam (CellsN~s(Tn, by=treatment, bs="cs", k=4), method="REML", data = BinB)
B3 <- gam (CellsN~te(Tn, by=treatment), method="REML", data = BinB)
B4 <- gam (CellsN~s(Tn, by=treatment, bs="cs", k=) + s(reptreat, bs="re"), method="REML", data = BinB)
AIC (B, B1, B2, B3, B4)
#BinC
BinC= subset (count, Bin=='C')
expC=as.data.frame(data.table(cbind(treatment=BinC$treatment, T=BinC$T, A=BinC$A, ID=BinC$reptreat)))
cor(expC, method = "spearman")
vif_func(in_frame=expC,thresh=5,trace=T)
corvif(expC)
pairs(expC, lower.panel = panel.smooth2, upper.panel = panel.cor, diag.panel = panel.hist)
#boxplots
boxplot(CellsN~treatment, data=BinC)
boxplot(CellsN~reptreat, data=BinC)
boxplot (CellsN~Tn, data=BinC)
BC <- gamm (CellsN~s(Tn, by=treatment), method="REML", data = BinC)
BC1 <- gamm (CellsN~s(Tn, by=treatment, bs="cr", k=4), method="REML", data = BinC)
BC2 <- gamm (CellsN~s(Tn, by=treatment, bs="cs", k=4), method="REML", data = BinC) #best
BC3 <- gamm (CellsN~te(Tn, by=treatment), method="REML", data = BinC)
fBin <- CellsN~s(Tn, by=treatment, bs="cs", k=4)
BC4 <- gamm (fBin, method="REML", random=list(reptreat=~1), data = BinC) #same with BC2
BC5 <- gamm (fBin, method="REML", correlation= corAR1 (form=~1|treatment/reptreat), data = BinC)
BC6 <- gamm (fBin, method="REML", correlation= corAR1 (form=~1|treatment/reptreat), weights = varIdent(form=~1| reptreat),
data = BinC) #best
BC7 <- gamm (fBin, method="REML", correlation= corAR1 (form=~1|treatment/reptreat), random=list(reptreat=~1),
data = BinC)
anova(BC$lme, BC1$lme, BC2$lme, BC3$lme, BC4$lme, BC5$lme, BC6$lme, BC7$lme)
C <- gam (CellsN~s(Tn, by=treatment), method="REML", data = BinC)
C1 <- gam (CellsN~s(Tn, by=treatment, bs="cr", k=4), method="REML", data = BinC)
C2 <- gam (CellsN~s(Tn, by=treatment, bs="cs", k=4), method="REML", data = BinC)
C3 <- gam (CellsN~te(Tn, by=treatment), method="REML", data = BinC)
C4 <- gam (CellsN~s(Tn, by=treatment, bs="cs", k=4) + s(reptreat, bs="re"), method="REML", data = BinC)
AIC (C, C1, C2, C3, C4)
| /Working scripts/dec 2014/count bins.R | no_license | kaye11/Some-R-scripts | R | false | false | 5,601 | r | library(ggplot2)
library(grid)
library(gtable)
library(ggthemes)
library(gridExtra)
library(mgcv)
library(data.table)
##check for collinearity and correlation, this only applies to the explanatory variables!
source("vif.R")
source ("AED.R")
#Bin A
BinA= subset (count, Bin=='A')
expA=as.data.frame(data.table(cbind(treatment=BinA$treatment, T=BinA$T, A=BinA$A, ID=BinA$reptreat)))
cor(expA, method = "spearman")
vif_func(in_frame=expA,thresh=5,trace=T)
corvif(expA)
pairs(expA, lower.panel = panel.smooth2, upper.panel = panel.cor, diag.panel = panel.hist)
#boxplots
boxplot(CellsN~treatment, data=BinA)
boxplot(CellsN~reptreat, data=BinA)
boxplot (CellsN~Tn, data=BinA)
BA <- gamm (CellsN~s(Tn, by=treatment), method="REML", data = BinA)
BA1 <- gamm (CellsN~s(Tn, by=treatment, bs="cr", k=4), method="REML", data = BinA)
BA2 <- gamm (CellsN~s(Tn, by=treatment, bs="cs", k=4), method="REML", data = BinA) #best
BA3 <- gamm (CellsN~te(Tn, by=treatment), method="REML", data = BinA)
fBin <- CellsN~s(Tn, by=treatment, bs="cs", k=4)
BA4 <- gamm (fBin, method="REML", random=list(reptreat=~1), data = BinA) #same with BA2
BA5 <- gamm (fBin, method="REML", correlation= corAR1 (form=~1|treatment/reptreat), data = BinA)
BA6 <- gamm (fBin, method="REML", correlation= corAR1 (form=~1|treatment/reptreat), weights = varIdent(form=~1| reptreat),
data = BinA) #best
BA7 <- gamm (fBin, method="REML", correlation= corAR1 (form=~1|treatment/reptreat), random=list(reptreat=~1),
data = BinA)
anova(BA$lme, BA1$lme, BA2$lme, BA3$lme, BA4$lme, BA5$lme, BA6$lme, BA7$lme)
A <- gam (CellsN~s(Tn, by=treatment), method="REML", data = BinA)
A1 <- gam (CellsN~s(Tn, by=treatment, bs="cr", k=4), method="REML", data = BinA)
A2 <- gam (CellsN~s(Tn, by=treatment, bs="cs", k=4), method="REML", data = BinA)
A3 <- gam (CellsN~te(Tn, by=treatment), method="REML", data = BinA)
A4 <- gam (CellsN~s(Tn, by=treatment, bs="cs", k=4) + s(reptreat, bs="re"), method="REML", data = BinA)
AIC (A, A1, A2, A3, A4)
#BinB
BinB= subset (count, Bin=='B')
expB=as.data.frame(data.table(cbind(treatment=BinB$treatment, T=BinB$T, A=BinB$A, ID=BinB$reptreat)))
cor(expB, method = "spearman")
vif_func(in_frame=expB,thresh=5,trace=T)
corvif(expB)
pairs(expB, lower.panel = panel.smooth2, upper.panel = panel.cor, diag.panel = panel.hist)
#boxplots
boxplot(CellsN~treatment, data=BinB)
boxplot(CellsN~reptreat, data=BinB)
boxplot (CellsN~Tn, data=BinB)
BB <- gamm (CellsN~s(Tn, by=treatment), method="REML", data = BinB)
BB1 <- gamm (CellsN~s(Tn, by=treatment, bs="cr", k=4), method="REML", data = BinB)
BB2 <- gamm (CellsN~s(Tn, by=treatment, bs="cs", k=4), method="REML", data = BinB) #best
BB3 <- gamm (CellsN~te(Tn, by=treatment), method="REML", data = BinB)
fBin <- CellsN~s(Tn, by=treatment, bs="cs", k=4)
BB4 <- gamm (fBin, method="REML", random=list(reptreat=~1), data = BinB) #same with BB2
BB5 <- gamm (fBin, method="REML", correlation= corAR1 (form=~1|treatment/reptreat), data = BinB)
BB6 <- gamm (fBin, method="REML", correlation= corAR1 (form=~1|treatment/reptreat), weights = varIdent(form=~1| reptreat),
data = BinB) #best
BB7 <- gamm (fBin, method="REML", correlation= corAR1 (form=~1|treatment/reptreat), random=list(reptreat=~1),
data = BinB)
anova(BB$lme, BB1$lme, BB2$lme, BB3$lme, BB4$lme, BB5$lme, BB6$lme, BB7$lme)
B <- gam (CellsN~s(Tn, by=treatment), method="REML", data = BinB)
B1 <- gam (CellsN~s(Tn, by=treatment, bs="cr", k=4), method="REML", data = BinB)
B2 <- gam (CellsN~s(Tn, by=treatment, bs="cs", k=4), method="REML", data = BinB)
B3 <- gam (CellsN~te(Tn, by=treatment), method="REML", data = BinB)
B4 <- gam (CellsN~s(Tn, by=treatment, bs="cs", k=) + s(reptreat, bs="re"), method="REML", data = BinB)
AIC (B, B1, B2, B3, B4)
#BinC
BinC= subset (count, Bin=='C')
expC=as.data.frame(data.table(cbind(treatment=BinC$treatment, T=BinC$T, A=BinC$A, ID=BinC$reptreat)))
cor(expC, method = "spearman")
vif_func(in_frame=expC,thresh=5,trace=T)
corvif(expC)
pairs(expC, lower.panel = panel.smooth2, upper.panel = panel.cor, diag.panel = panel.hist)
#boxplots
boxplot(CellsN~treatment, data=BinC)
boxplot(CellsN~reptreat, data=BinC)
boxplot (CellsN~Tn, data=BinC)
BC <- gamm (CellsN~s(Tn, by=treatment), method="REML", data = BinC)
BC1 <- gamm (CellsN~s(Tn, by=treatment, bs="cr", k=4), method="REML", data = BinC)
BC2 <- gamm (CellsN~s(Tn, by=treatment, bs="cs", k=4), method="REML", data = BinC) #best
BC3 <- gamm (CellsN~te(Tn, by=treatment), method="REML", data = BinC)
fBin <- CellsN~s(Tn, by=treatment, bs="cs", k=4)
BC4 <- gamm (fBin, method="REML", random=list(reptreat=~1), data = BinC) #same with BC2
BC5 <- gamm (fBin, method="REML", correlation= corAR1 (form=~1|treatment/reptreat), data = BinC)
BC6 <- gamm (fBin, method="REML", correlation= corAR1 (form=~1|treatment/reptreat), weights = varIdent(form=~1| reptreat),
data = BinC) #best
BC7 <- gamm (fBin, method="REML", correlation= corAR1 (form=~1|treatment/reptreat), random=list(reptreat=~1),
data = BinC)
anova(BC$lme, BC1$lme, BC2$lme, BC3$lme, BC4$lme, BC5$lme, BC6$lme, BC7$lme)
C <- gam (CellsN~s(Tn, by=treatment), method="REML", data = BinC)
C1 <- gam (CellsN~s(Tn, by=treatment, bs="cr", k=4), method="REML", data = BinC)
C2 <- gam (CellsN~s(Tn, by=treatment, bs="cs", k=4), method="REML", data = BinC)
C3 <- gam (CellsN~te(Tn, by=treatment), method="REML", data = BinC)
C4 <- gam (CellsN~s(Tn, by=treatment, bs="cs", k=4) + s(reptreat, bs="re"), method="REML", data = BinC)
AIC (C, C1, C2, C3, C4)
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/source.R
\name{new_source}
\alias{new_source}
\title{Create a source object.}
\usage{
new_source(path, text, skip = 0)
}
\arguments{
\item{path}{Either a path to a file, or a connection. Reading directly
from a file is most efficient.}
\item{text}{A character or raw vector. If a character vector, only the
first element is used.}
\item{skip}{Number of lines to skip before reading data.}
}
\description{
Create a source object.
}
\keyword{internal}
| /man/new_source.Rd | no_license | christophergandrud/readr | R | false | false | 539 | rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/source.R
\name{new_source}
\alias{new_source}
\title{Create a source object.}
\usage{
new_source(path, text, skip = 0)
}
\arguments{
\item{path}{Either a path to a file, or a connection. Reading directly
from a file is most efficient.}
\item{text}{A character or raw vector. If a character vector, only the
first element is used.}
\item{skip}{Number of lines to skip before reading data.}
}
\description{
Create a source object.
}
\keyword{internal}
|
test_that("[,Chromatograms works", {
ints <- abs(rnorm(12, sd = 20))
ch <- Chromatogram(rtime = 1:length(ints), ints)
ints <- abs(rnorm(20, sd = 14))
ch1 <- Chromatogram(rtime = 1:length(ints), ints)
ints <- abs(rnorm(14, sd = 24))
ch2 <- Chromatogram(rtime = 1:length(ints), ints)
ints <- abs(rnorm(40, sd = 34))
ch3 <- Chromatogram(rtime = 1:length(ints), ints)
chrs <- Chromatograms(list(ch, ch1, ch2, ch3), nrow = 2)
## o Subset using indices
expect_true(is(chrs[1, 1], "Chromatogram"))
expect_equal(chrs[1, 2], ch2)
## extract a row
expect_equal(chrs[1, , drop = TRUE], list(`1` = ch, `2` = ch2))
expect_equal(chrs[1, , drop = FALSE], Chromatograms(list(ch, ch2), nrow = 1))
## Test the default
expect_equal(chrs[1, ], Chromatograms(list(ch, ch2), nrow = 1))
## extract a column
expect_equal(chrs[, 2, drop = TRUE], list(`1` = ch2, `2` = ch3))
res <- chrs[, 2, drop = FALSE]
res_exp <- Chromatograms(list(ch2, ch3), ncol = 1,
dimnames = list(NULL, "2"))
## Have to re-place the rownames of pheno data othewise we compare numeric
## against character
rownames(pData(res)) <- rownames(pData(res))
expect_equal(res, res_exp)
## Repeat with colnames:
colnames(chrs) <- c("a", "b")
expect_true(is(chrs[1, 1], "Chromatogram"))
expect_equal(chrs[1, 2], ch2)
## extract a row
expect_equal(chrs[1, , drop = TRUE], list(a = ch, b = ch2))
res_exp <- Chromatograms(list(ch, ch2), nrow = 1)
colnames(res_exp) <- c("a", "b")
expect_equal(chrs[1, , drop = FALSE], res_exp)
## Test the default
expect_equal(chrs[1, ], res_exp)
## extract a column
expect_equal(chrs[, 2, drop = TRUE], list(`1` = ch2, `2` = ch3))
res_exp <- Chromatograms(list(ch2, ch3), ncol = 1)
colnames(res_exp) <- "b"
expect_equal(chrs[, 2, drop = FALSE], res_exp)
## Check also the featureData
res <- chrs[2, ]
expect_equal(rownames(res), "2")
expect_equal(featureNames(res), "2")
## o Subset using logical
chrs <- Chromatograms(list(ch, ch1, ch2, ch3), nrow = 2)
expect_true(is(chrs[c(TRUE, FALSE), c(TRUE, FALSE)], "Chromatogram"))
expect_equal(chrs[c(TRUE, FALSE), c(FALSE, TRUE)], ch2)
## extract a row
expect_equal(chrs[c(TRUE, FALSE), , drop = TRUE], list(`1` = ch, `2` = ch2))
expect_equal(chrs[c(TRUE, FALSE), , drop = FALSE],
Chromatograms(list(ch, ch2), nrow = 1))
expect_equal(chrs[c(TRUE, FALSE), ],
Chromatograms(list(ch, ch2), nrow = 1))
## extract a column
expect_equal(chrs[, c(FALSE, TRUE), drop = TRUE], list(`1` = ch2, `2` = ch3))
res <- chrs[, c(FALSE, TRUE), drop = FALSE]
rownames(pData(res)) <- rownames(pData(res))
expect_equal(res, Chromatograms(list(ch2, ch3), ncol = 1,
dimnames = list(NULL, "2")))
## Repeat with colnames
colnames(chrs) <- c("a", "b")
expect_equal(chrs[c(TRUE, FALSE), , drop = TRUE], list(a = ch, b = ch2))
res_exp <- Chromatograms(list(ch, ch2), nrow = 1)
colnames(res_exp) <- c("a", "b")
expect_equal(chrs[c(TRUE, FALSE), , drop = FALSE], res_exp)
expect_equal(chrs[c(TRUE, FALSE), ], res_exp)
## extract a column
expect_equal(chrs[, c(FALSE, TRUE), drop = TRUE], list(`1` = ch2, `2` = ch3))
res_exp <- Chromatograms(list(ch2, ch3), ncol = 1)
colnames(res_exp) <- "b"
expect_equal(chrs[, c(FALSE, TRUE)], res_exp)
expect_equal(chrs[, c(FALSE, TRUE)], res_exp)
## Subset using names
expect_equal(chrs[, "a", drop = TRUE], list(`1` = ch, `2` = ch1))
res_exp <- Chromatograms(list(ch, ch1), ncol = 1)
colnames(res_exp) <- "a"
expect_equal(chrs[, "a", drop = FALSE], res_exp)
expect_equal(chrs[, "a"], res_exp)
## Check phenoData while subsetting.
pd <- data.frame(name = letters[1:2], idx = 1:2)
chrs <- Chromatograms(list(ch, ch1, ch2, ch3), nrow = 2,
phenoData = AnnotatedDataFrame(pd))
res <- chrs[, 2]
pd_exp <- droplevels(pd[2, ])
expect_equal(pData(res), pd_exp)
rownames(pd) <- c("g", "h")
chrs <- Chromatograms(list(ch, ch1, ch2, ch3), nrow = 2,
phenoData = AnnotatedDataFrame(pd))
res <- chrs[, 2]
pd_exp <- droplevels(pd[2, ])
expect_equal(pData(res), pd_exp)
## Check featureData while subsetting
fd <- data.frame(a = c("first", "second"), mz = c(2, 4))
rownames(fd) <- fd$a
chrs <- Chromatograms(list(ch, ch1, ch2, ch3), nrow = 2,
phenoData = AnnotatedDataFrame(pd),
featureData = fd)
expect_equal(rownames(chrs), rownames(fd))
expect_equal(fData(chrs[, 1]), fd)
expect_equal(fData(chrs[2, ]), droplevels(fd[2, ]))
})
test_that("[<-,Chromatograms works", {
ints <- abs(rnorm(12, sd = 20))
ch <- Chromatogram(rtime = 1:length(ints), ints)
ints <- abs(rnorm(20, sd = 14))
ch1 <- Chromatogram(rtime = 1:length(ints), ints)
ints <- abs(rnorm(14, sd = 24))
ch2 <- Chromatogram(rtime = 1:length(ints), ints)
ints <- abs(rnorm(40, sd = 34))
ch3 <- Chromatogram(rtime = 1:length(ints), ints)
chrs <- Chromatograms(list(ch, ch1, ch2, ch3), nrow = 2)
colnames(chrs) <- c("a", "b")
ints <- abs(rnorm(94, sd = 200))
ch4 <- Chromatogram(rtime = 1:length(ints), ints)
## errors
expect_error(chrs[1:2, 1:2] <- list(ch4, ch4, ch4, ch4))
expect_error(chrs["z", ] <- list(ch4, ch4))
## Single element.
chrs[1, 2] <- ch4
expect_equal(chrs[1, 2], ch4)
chrs[, 2] <- list(ch2, ch3)
expect_equal(chrs[, 2, drop = TRUE], list(`1` = ch2, `2` = ch3))
chrs[2, 1] <- list(ch4)
expect_equal(chrs[2, 1], ch4)
chrs[, "a"] <- list(ch2, ch1)
expect_equal(chrs[, 1, drop = TRUE], list(`1` = ch2, `2` = ch1))
expect_error(chrs[, 1] <- list(ch, ch2, ch3))
chrs[, c(TRUE, FALSE)] <- list(ch4, ch4)
expect_equal(chrs[, 1, drop = TRUE], list(`1` = ch4, `2` = ch4))
})
test_that("plot,Chromatograms works", {
ints <- abs(rnorm(123, mean = 200, sd = 19))
ch1 <- Chromatogram(rtime = seq_along(ints), intensity = ints, mz = 231)
ints <- abs(rnorm(122, mean = 300, sd = 35))
ch2 <- Chromatogram(rtime = seq_along(ints), intensity = ints, mz = 231)
ints <- abs(rnorm(124, mean = 214, sd = 49))
ch3 <- Chromatogram(rtime = seq_along(ints) + 300, intensity = ints,
mz = 403)
ints <- abs(rnorm(123, mean = 530, sd = 89))
ch4 <- Chromatogram(rtime = seq_along(ints) + 300, intensity = ints,
mz = 403)
chrs <- Chromatograms(list(ch1, ch2, ch3, ch4, ch1, ch2), ncol = 2,
byrow = TRUE)
plot(chrs)
plot(chrs[1, , drop = FALSE])
plot(chrs[1, 1, drop = FALSE])
plot(chrs[1, ])
plot(chrs[1, 1])
plot(chrs[, 2])
})
test_that("colnames<-, sampleNames, sampleNames<-,Chromatograms works", {
ints <- abs(rnorm(12, sd = 20))
ch <- Chromatogram(rtime = 1:length(ints), ints)
ints <- abs(rnorm(20, sd = 14))
ch1 <- Chromatogram(rtime = 1:length(ints), ints)
ints <- abs(rnorm(14, sd = 24))
ch2 <- Chromatogram(rtime = 1:length(ints), ints)
ints <- abs(rnorm(40, sd = 34))
ch3 <- Chromatogram(rtime = 1:length(ints), ints)
chrs <- Chromatograms(list(ch, ch1, ch2, ch3), nrow = 2)
expect_equal(colnames(chrs), as.character(1:ncol(chrs)))
expect_equal(sampleNames(chrs), as.character(1:ncol(chrs)))
colnames(chrs) <- letters[1:ncol(chrs)]
expect_equal(colnames(chrs), letters[1:ncol(chrs)])
expect_equal(rownames(pData(chrs)), letters[1:ncol(chrs)])
expect_equal(sampleNames(chrs), letters[1:ncol(chrs)])
sampleNames(chrs) <- c("b", "z")
expect_equal(colnames(chrs), c("b", "z"))
## Error
expect_error(colnames(chrs) <- 1:4)
})
test_that("phenoData,pData,pData<-,Chromatograms works", {
## Check if we can access the phenoData.
ints <- abs(rnorm(12, sd = 20))
ch <- Chromatogram(rtime = 1:length(ints), ints)
ints <- abs(rnorm(20, sd = 14))
ch1 <- Chromatogram(rtime = 1:length(ints), ints)
ints <- abs(rnorm(14, sd = 24))
ch2 <- Chromatogram(rtime = 1:length(ints), ints)
ints <- abs(rnorm(40, sd = 34))
ch3 <- Chromatogram(rtime = 1:length(ints), ints)
chrs <- Chromatograms(list(ch, ch1, ch2, ch3), nrow = 2)
pd_exp <- annotatedDataFrameFrom(matrix(ncol = 2, nrow = 2), byrow = FALSE)
rownames(pData(pd_exp)) <- NULL
pd_exp <- as(pd_exp, "NAnnotatedDataFrame")
expect_equal(phenoData(chrs), pd_exp)
## Check error when assigning a phenoData with different names
pd <- data.frame(name = letters[1:2], idx = 1:2)
rownames(pd) <- letters[1:2]
expect_error(pData(chrs) <- pd)
pd <- data.frame(name = letters[1:2], idx = 1:2)
chrs <- Chromatograms(list(ch, ch1, ch2, ch3), nrow = 2,
phenoData = AnnotatedDataFrame(pd))
chrs_2 <- Chromatograms(list(ch, ch1, ch2, ch3), nrow = 2,
phenoData = pd)
expect_equal(chrs, chrs_2)
expect_equal(phenoData(chrs), as(AnnotatedDataFrame(pd), "NAnnotatedDataFrame"))
## pData.
expect_equal(pData(chrs), pd)
pd_2 <- cbind(pd, other = 1:2)
pData(chrs) <- pd_2
expect_equal(pData(chrs), pd_2)
rownames(pd_2) <- c("g", "h")
expect_error(pData(chrs) <- pd_2)
colnames(chrs) <- c("g", "h")
pData(chrs) <- pd_2
expect_equal(pData(chrs), pd_2)
expect_equal(colnames(chrs), rownames(pd_2))
## $
expect_equal(chrs$name, factor(letters[1:2]))
expect_equal(chrs$idx, 1:2)
chrs$idx <- c(2, 1)
expect_equal(chrs$idx, c(2, 1))
chrs$new_variable <- c("it", "works")
expect_equal(chrs$new_variable, c("it", "works"))
expect_error(chrs$new_variable <- 1:4)
chrs$new_variable <- 1
})
test_that("rownames<-, featureNames, featureNames<-,Chromatograms works", {
ints <- abs(rnorm(12, sd = 20))
ch <- Chromatogram(rtime = 1:length(ints), ints)
ints <- abs(rnorm(20, sd = 14))
ch1 <- Chromatogram(rtime = 1:length(ints), ints)
ints <- abs(rnorm(14, sd = 24))
ch2 <- Chromatogram(rtime = 1:length(ints), ints)
ints <- abs(rnorm(40, sd = 34))
ch3 <- Chromatogram(rtime = 1:length(ints), ints)
chrs <- Chromatograms(list(ch, ch1, ch2, ch3), nrow = 2)
expect_equal(rownames(chrs), as.character(1:nrow(chrs)))
expect_equal(featureNames(chrs), as.character(1:nrow(chrs)))
rownames(chrs) <- letters[1:nrow(chrs)]
expect_true(validObject(chrs))
expect_equal(rownames(chrs), letters[1:nrow(chrs)])
expect_equal(featureNames(chrs), letters[1:nrow(chrs)])
expect_error(rownames(chrs) <- letters[1:20])
featureNames(chrs) <- c("b", "z")
expect_equal(rownames(chrs), c("b", "z"))
})
test_that("featureData,fData,fData<-,Chromatograms works", {
ints <- abs(rnorm(12, sd = 20))
ch <- Chromatogram(rtime = 1:length(ints), ints)
ints <- abs(rnorm(20, sd = 14))
ch1 <- Chromatogram(rtime = 1:length(ints), ints)
ints <- abs(rnorm(14, sd = 24))
ch2 <- Chromatogram(rtime = 1:length(ints), ints)
ints <- abs(rnorm(40, sd = 34))
ch3 <- Chromatogram(rtime = 1:length(ints), ints)
chrs <- Chromatograms(list(ch, ch1, ch2, ch3), nrow = 2)
fd_exp <- annotatedDataFrameFrom(matrix(ncol = 2, nrow = 2), byrow = TRUE)
expect_equal(featureData(chrs), fd_exp)
## Check error when assigning a featureData with different names
fd <- data.frame(name = letters[1:2], idx = 1:2)
rownames(fd) <- letters[1:2]
expect_error(fData(chrs) <- pd)
fd <- data.frame(name = letters[1:2], idx = 1:2)
chrs <- Chromatograms(list(ch, ch1, ch2, ch3), nrow = 2,
featureData = fd)
expect_equal(featureData(chrs), AnnotatedDataFrame(fd))
expect_equal(fData(chrs), fd)
fd_2 <- cbind(fd, other = 1:2)
fData(chrs) <- fd_2
rownames(fd_2) <- as.character(1:2)
expect_equal(featureData(chrs), AnnotatedDataFrame(fd_2))
expect_equal(fData(chrs), fd_2)
fd_3 <- cbind(fd_2, another = 3:4)
featureData(chrs) <- fd_3
expect_equal(featureData(chrs), AnnotatedDataFrame(fd_3))
expect_equal(fvarLabels(chrs), colnames(fd_3))
})
test_that("isEmpty,Chromatograms works", {
ints <- abs(rnorm(12, sd = 20))
ch <- Chromatogram(rtime = 1:length(ints), ints)
ints <- abs(rnorm(20, sd = 14))
ch1 <- Chromatogram(rtime = 1:length(ints), ints)
ints <- abs(rnorm(14, sd = 24))
ch2 <- Chromatogram(rtime = 1:length(ints), ints)
ints <- abs(rnorm(40, sd = 34))
ch3 <- Chromatogram(rtime = 1:length(ints), ints)
chrs <- Chromatograms(list(ch, ch1, ch2, ch3), nrow = 2)
expect_true(!isEmpty(chrs))
plot(chrs)
chrs <- Chromatograms()
expect_true(isEmpty(chrs))
expect_warning(plot(chrs))
ints <- rep(NA_real_, 105)
ch1 <- Chromatogram(rtime = 1:length(ints), ints)
ints <- rep(NA_real_, 64)
ch2 <- Chromatogram(rtime = 1:length(ints), ints)
chrs <- Chromatograms(list(ch1, ch2), nrow = 2)
expect_true(isEmpty(chrs))
expect_warning(plot(chrs))
## Only one row is empty.
ints <- rep(NA_real_, 105)
ch1 <- Chromatogram(rtime = 1:length(ints), ints)
ints <- abs(rnorm(64))
ch2 <- Chromatogram(rtime = 1:length(ints), ints)
chrs <- Chromatograms(list(ch1, ch2), nrow = 2)
expect_true(!isEmpty(chrs))
expect_warning(plot(chrs))
## 2x2 first row NA
chrs <- Chromatograms(list(ch1, ch2, ch1, ch2), nrow = 2)
expect_warning(plot(chrs))
## 2x2 first col NA
chrs <- Chromatograms(list(ch1, ch1, ch2, ch2), nrow = 2)
expect_true(!isEmpty(chrs))
plot(chrs)
})
test_that(".mz_chromatograms, precursorMz etc,Chromatograms works", {
ints <- abs(rnorm(12, sd = 20))
ch <- Chromatogram(rtime = 1:length(ints), ints)
ints <- abs(rnorm(20, sd = 14))
ch1 <- Chromatogram(rtime = 1:length(ints), ints)
ints <- abs(rnorm(14, sd = 24))
ch2 <- Chromatogram(rtime = 1:length(ints), ints)
ints <- abs(rnorm(40, sd = 34))
ch3 <- Chromatogram(rtime = 1:length(ints), ints)
chrs <- Chromatograms(list(ch, ch1, ch2, ch3), nrow = 2)
## Base function
expect_error(MSnbase:::.mz_chromatograms(chrs, mz = "other"))
chrs_f <- chrs
fData(chrs_f) <- data.frame(precursorIsolationWindowTargetMZ = c(123, 456))
res <- MSnbase:::.mz_chromatograms(chrs_f, mz = "precursorMz")
expect_equal(colnames(res), c("mzmin", "mzmax"))
expect_equal(nrow(res), nrow(chrs_f))
expect_equal(res[, "mzmin"], res[, "mzmax"])
expect_equal(res[, "mzmin"], c(123, 456))
res <- MSnbase:::.mz_chromatograms(chrs_f)
expect_true(all(is.na(res)))
expect_equal(colnames(res), c("mzmin", "mzmax"))
expect_equal(nrow(res), nrow(chrs))
## method implementations:
## precursorMz
expect_true(nrow(precursorMz(Chromatograms())) == 0)
## with precursor m/z data in the featureData data.frame
chrs_f <- chrs
fData(chrs_f) <- data.frame(precursorIsolationWindowTargetMZ = c(123, 456))
res <- MSnbase:::.mz_chromatograms(chrs_f, "precursorMz")
expect_equal(colnames(res), c("mzmin", "mzmax"))
expect_equal(nrow(res), nrow(chrs_f))
expect_equal(res[, "mzmin"], res[, "mzmax"])
expect_equal(res[, "mzmin"], c(123, 456))
expect_equal(precursorMz(chrs_f), res)
## Extracting precursor m/z data from the Chromatogram objects.
res <- MSnbase:::.mz_chromatograms(chrs)
expect_true(all(is.na(res)))
expect_equal(colnames(res), c("mzmin", "mzmax"))
expect_equal(nrow(res), nrow(chrs))
expect_equal(res, precursorMz(chrs))
## Define chrs.
chrs_2 <- chrs
chrs_2[1, 1]@precursorMz <- range(123)
expect_error(MSnbase:::.mz_chromatograms(chrs_2, "precursorMz"),
"Chromatograms in row 1 have different precursorMz")
chrs_2[1, 2]@precursorMz <- range(123)
chrs_2[2, 1]@precursorMz <- range(456)
chrs_2[2, 2]@precursorMz <- range(456)
res <- MSnbase:::.mz_chromatograms(chrs_2, "precursorMz")
expect_equal(colnames(res), c("mzmin", "mzmax"))
expect_equal(nrow(res), nrow(chrs))
expect_equal(res[, "mzmin"], res[, "mzmax"])
expect_equal(res[, "mzmin"], c(123, 456))
expect_equal(res, precursorMz(chrs_2))
## productMz
res <- MSnbase:::.mz_chromatograms(chrs, "productMz")
expect_true(all(is.na(res)))
expect_equal(colnames(res), c("mzmin", "mzmax"))
expect_equal(nrow(res), nrow(chrs))
expect_equal(res, productMz(chrs))
chrs_f <- chrs
fData(chrs_f) <- data.frame(productIsolationWindowTargetMZ = c(3, 5))
res <- MSnbase:::.mz_chromatograms(chrs_f, "productMz")
expect_equal(colnames(res), c("mzmin", "mzmax"))
expect_equal(nrow(res), nrow(chrs))
expect_equal(res[, "mzmin"], res[, "mzmax"])
expect_equal(res[, "mzmin"], c(3, 5))
expect_equal(res, productMz(chrs_f))
chrs_2 <- chrs
chrs_2[1, 1]@productMz <- range(5)
expect_error(MSnbase:::.mz_chromatograms(chrs_2, "productMz"),
"Chromatograms in row 1 have different productMz")
chrs_2[1, 2]@productMz <- range(5)
res <- MSnbase:::.mz_chromatograms(chrs_2, "productMz")
expect_equal(colnames(res), c("mzmin", "mzmax"))
expect_equal(nrow(res), nrow(chrs))
expect_equal(res[, "mzmin"], res[, "mzmax"])
expect_equal(res[, "mzmin"], c(5, NA))
expect_equal(res, productMz(chrs_2))
## polarity
expect_true(all(polarity(chrs) == -1))
fData(chrs)$polarity <- c(1, 1)
expect_true(all(polarity(chrs) == 1))
## With a real object.
on_disk <- microtofq_on_disk
chrs <- chromatogram(on_disk, mz = c(123.4, 123.6), rt = c(35, 48))
res <- mz(chrs)
expect_true(nrow(res) == 1)
expect_true(all(colnames(res) == c("mzmin", "mzmax")))
expect_equal(unname(res[1, "mzmin"]), 123.4)
expect_equal(unname(res[1, "mzmax"]), 123.6)
expect_true(polarity(chrs) == unique(polarity(microtofq_on_disk)))
})
test_that(".bin_Chromatograms and bin,Chromatograms work", {
ints <- abs(rnorm(12, sd = 20))
ch <- Chromatogram(rtime = 1:length(ints), ints)
ints <- abs(rnorm(20, sd = 14))
ch1 <- Chromatogram(rtime = 1:length(ints), ints)
ints <- abs(rnorm(14, sd = 24))
ch2 <- Chromatogram(rtime = 1:length(ints), ints)
ints <- abs(rnorm(40, sd = 34))
ch3 <- Chromatogram(rtime = 1:length(ints), ints)
chrs <- Chromatograms(list(ch, ch1, ch2, ch3), nrow = 2)
chrsb <- MSnbase:::.bin_Chromatograms(chrs, binSize = 2)
## 1st row:
expect_equal(rtime(chrsb[1, 1]), rtime(chrsb[1, 2]))
expect_true(all(intensity(chrsb[1, 1])[rtime(chrsb[1, 1]) >
max(rtime(chrs[1, 1]))] == 0))
expect_true(max(rtime(chrsb[1, 2])) >= max(rtime(chrs[1, 2])))
expect_true(max(rtime(chrsb[1, 1])) >= max(rtime(chrs[1, 1])))
expect_equal(chrsb[1, 2], bin(chrs[1, 2], binSize = 2))
## 2nd row:
expect_equal(rtime(chrsb[2, 1]), rtime(chrsb[2, 2]))
expect_true(all(intensity(chrsb[2, 1])[rtime(chrsb[2, 1]) >
max(rtime(chrs[2, 1]))] == 0))
expect_true(max(rtime(chrsb[2, 2])) >= max(rtime(chrs[2, 2])))
expect_true(max(rtime(chrsb[2, 1])) >= max(rtime(chrs[2, 1])))
expect_equal(chrsb[2, 2], bin(chrs[2, 2], binSize = 2))
})
| /tests/testthat/test_methods-Chromatograms.R | no_license | sgibb/MSnbase | R | false | false | 19,488 | r | test_that("[,Chromatograms works", {
ints <- abs(rnorm(12, sd = 20))
ch <- Chromatogram(rtime = 1:length(ints), ints)
ints <- abs(rnorm(20, sd = 14))
ch1 <- Chromatogram(rtime = 1:length(ints), ints)
ints <- abs(rnorm(14, sd = 24))
ch2 <- Chromatogram(rtime = 1:length(ints), ints)
ints <- abs(rnorm(40, sd = 34))
ch3 <- Chromatogram(rtime = 1:length(ints), ints)
chrs <- Chromatograms(list(ch, ch1, ch2, ch3), nrow = 2)
## o Subset using indices
expect_true(is(chrs[1, 1], "Chromatogram"))
expect_equal(chrs[1, 2], ch2)
## extract a row
expect_equal(chrs[1, , drop = TRUE], list(`1` = ch, `2` = ch2))
expect_equal(chrs[1, , drop = FALSE], Chromatograms(list(ch, ch2), nrow = 1))
## Test the default
expect_equal(chrs[1, ], Chromatograms(list(ch, ch2), nrow = 1))
## extract a column
expect_equal(chrs[, 2, drop = TRUE], list(`1` = ch2, `2` = ch3))
res <- chrs[, 2, drop = FALSE]
res_exp <- Chromatograms(list(ch2, ch3), ncol = 1,
dimnames = list(NULL, "2"))
## Have to re-place the rownames of pheno data othewise we compare numeric
## against character
rownames(pData(res)) <- rownames(pData(res))
expect_equal(res, res_exp)
## Repeat with colnames:
colnames(chrs) <- c("a", "b")
expect_true(is(chrs[1, 1], "Chromatogram"))
expect_equal(chrs[1, 2], ch2)
## extract a row
expect_equal(chrs[1, , drop = TRUE], list(a = ch, b = ch2))
res_exp <- Chromatograms(list(ch, ch2), nrow = 1)
colnames(res_exp) <- c("a", "b")
expect_equal(chrs[1, , drop = FALSE], res_exp)
## Test the default
expect_equal(chrs[1, ], res_exp)
## extract a column
expect_equal(chrs[, 2, drop = TRUE], list(`1` = ch2, `2` = ch3))
res_exp <- Chromatograms(list(ch2, ch3), ncol = 1)
colnames(res_exp) <- "b"
expect_equal(chrs[, 2, drop = FALSE], res_exp)
## Check also the featureData
res <- chrs[2, ]
expect_equal(rownames(res), "2")
expect_equal(featureNames(res), "2")
## o Subset using logical
chrs <- Chromatograms(list(ch, ch1, ch2, ch3), nrow = 2)
expect_true(is(chrs[c(TRUE, FALSE), c(TRUE, FALSE)], "Chromatogram"))
expect_equal(chrs[c(TRUE, FALSE), c(FALSE, TRUE)], ch2)
## extract a row
expect_equal(chrs[c(TRUE, FALSE), , drop = TRUE], list(`1` = ch, `2` = ch2))
expect_equal(chrs[c(TRUE, FALSE), , drop = FALSE],
Chromatograms(list(ch, ch2), nrow = 1))
expect_equal(chrs[c(TRUE, FALSE), ],
Chromatograms(list(ch, ch2), nrow = 1))
## extract a column
expect_equal(chrs[, c(FALSE, TRUE), drop = TRUE], list(`1` = ch2, `2` = ch3))
res <- chrs[, c(FALSE, TRUE), drop = FALSE]
rownames(pData(res)) <- rownames(pData(res))
expect_equal(res, Chromatograms(list(ch2, ch3), ncol = 1,
dimnames = list(NULL, "2")))
## Repeat with colnames
colnames(chrs) <- c("a", "b")
expect_equal(chrs[c(TRUE, FALSE), , drop = TRUE], list(a = ch, b = ch2))
res_exp <- Chromatograms(list(ch, ch2), nrow = 1)
colnames(res_exp) <- c("a", "b")
expect_equal(chrs[c(TRUE, FALSE), , drop = FALSE], res_exp)
expect_equal(chrs[c(TRUE, FALSE), ], res_exp)
## extract a column
expect_equal(chrs[, c(FALSE, TRUE), drop = TRUE], list(`1` = ch2, `2` = ch3))
res_exp <- Chromatograms(list(ch2, ch3), ncol = 1)
colnames(res_exp) <- "b"
expect_equal(chrs[, c(FALSE, TRUE)], res_exp)
expect_equal(chrs[, c(FALSE, TRUE)], res_exp)
## Subset using names
expect_equal(chrs[, "a", drop = TRUE], list(`1` = ch, `2` = ch1))
res_exp <- Chromatograms(list(ch, ch1), ncol = 1)
colnames(res_exp) <- "a"
expect_equal(chrs[, "a", drop = FALSE], res_exp)
expect_equal(chrs[, "a"], res_exp)
## Check phenoData while subsetting.
pd <- data.frame(name = letters[1:2], idx = 1:2)
chrs <- Chromatograms(list(ch, ch1, ch2, ch3), nrow = 2,
phenoData = AnnotatedDataFrame(pd))
res <- chrs[, 2]
pd_exp <- droplevels(pd[2, ])
expect_equal(pData(res), pd_exp)
rownames(pd) <- c("g", "h")
chrs <- Chromatograms(list(ch, ch1, ch2, ch3), nrow = 2,
phenoData = AnnotatedDataFrame(pd))
res <- chrs[, 2]
pd_exp <- droplevels(pd[2, ])
expect_equal(pData(res), pd_exp)
## Check featureData while subsetting
fd <- data.frame(a = c("first", "second"), mz = c(2, 4))
rownames(fd) <- fd$a
chrs <- Chromatograms(list(ch, ch1, ch2, ch3), nrow = 2,
phenoData = AnnotatedDataFrame(pd),
featureData = fd)
expect_equal(rownames(chrs), rownames(fd))
expect_equal(fData(chrs[, 1]), fd)
expect_equal(fData(chrs[2, ]), droplevels(fd[2, ]))
})
test_that("[<-,Chromatograms works", {
ints <- abs(rnorm(12, sd = 20))
ch <- Chromatogram(rtime = 1:length(ints), ints)
ints <- abs(rnorm(20, sd = 14))
ch1 <- Chromatogram(rtime = 1:length(ints), ints)
ints <- abs(rnorm(14, sd = 24))
ch2 <- Chromatogram(rtime = 1:length(ints), ints)
ints <- abs(rnorm(40, sd = 34))
ch3 <- Chromatogram(rtime = 1:length(ints), ints)
chrs <- Chromatograms(list(ch, ch1, ch2, ch3), nrow = 2)
colnames(chrs) <- c("a", "b")
ints <- abs(rnorm(94, sd = 200))
ch4 <- Chromatogram(rtime = 1:length(ints), ints)
## errors
expect_error(chrs[1:2, 1:2] <- list(ch4, ch4, ch4, ch4))
expect_error(chrs["z", ] <- list(ch4, ch4))
## Single element.
chrs[1, 2] <- ch4
expect_equal(chrs[1, 2], ch4)
chrs[, 2] <- list(ch2, ch3)
expect_equal(chrs[, 2, drop = TRUE], list(`1` = ch2, `2` = ch3))
chrs[2, 1] <- list(ch4)
expect_equal(chrs[2, 1], ch4)
chrs[, "a"] <- list(ch2, ch1)
expect_equal(chrs[, 1, drop = TRUE], list(`1` = ch2, `2` = ch1))
expect_error(chrs[, 1] <- list(ch, ch2, ch3))
chrs[, c(TRUE, FALSE)] <- list(ch4, ch4)
expect_equal(chrs[, 1, drop = TRUE], list(`1` = ch4, `2` = ch4))
})
test_that("plot,Chromatograms works", {
ints <- abs(rnorm(123, mean = 200, sd = 19))
ch1 <- Chromatogram(rtime = seq_along(ints), intensity = ints, mz = 231)
ints <- abs(rnorm(122, mean = 300, sd = 35))
ch2 <- Chromatogram(rtime = seq_along(ints), intensity = ints, mz = 231)
ints <- abs(rnorm(124, mean = 214, sd = 49))
ch3 <- Chromatogram(rtime = seq_along(ints) + 300, intensity = ints,
mz = 403)
ints <- abs(rnorm(123, mean = 530, sd = 89))
ch4 <- Chromatogram(rtime = seq_along(ints) + 300, intensity = ints,
mz = 403)
chrs <- Chromatograms(list(ch1, ch2, ch3, ch4, ch1, ch2), ncol = 2,
byrow = TRUE)
plot(chrs)
plot(chrs[1, , drop = FALSE])
plot(chrs[1, 1, drop = FALSE])
plot(chrs[1, ])
plot(chrs[1, 1])
plot(chrs[, 2])
})
test_that("colnames<-, sampleNames, sampleNames<-,Chromatograms works", {
ints <- abs(rnorm(12, sd = 20))
ch <- Chromatogram(rtime = 1:length(ints), ints)
ints <- abs(rnorm(20, sd = 14))
ch1 <- Chromatogram(rtime = 1:length(ints), ints)
ints <- abs(rnorm(14, sd = 24))
ch2 <- Chromatogram(rtime = 1:length(ints), ints)
ints <- abs(rnorm(40, sd = 34))
ch3 <- Chromatogram(rtime = 1:length(ints), ints)
chrs <- Chromatograms(list(ch, ch1, ch2, ch3), nrow = 2)
expect_equal(colnames(chrs), as.character(1:ncol(chrs)))
expect_equal(sampleNames(chrs), as.character(1:ncol(chrs)))
colnames(chrs) <- letters[1:ncol(chrs)]
expect_equal(colnames(chrs), letters[1:ncol(chrs)])
expect_equal(rownames(pData(chrs)), letters[1:ncol(chrs)])
expect_equal(sampleNames(chrs), letters[1:ncol(chrs)])
sampleNames(chrs) <- c("b", "z")
expect_equal(colnames(chrs), c("b", "z"))
## Error
expect_error(colnames(chrs) <- 1:4)
})
test_that("phenoData,pData,pData<-,Chromatograms works", {
## Check if we can access the phenoData.
ints <- abs(rnorm(12, sd = 20))
ch <- Chromatogram(rtime = 1:length(ints), ints)
ints <- abs(rnorm(20, sd = 14))
ch1 <- Chromatogram(rtime = 1:length(ints), ints)
ints <- abs(rnorm(14, sd = 24))
ch2 <- Chromatogram(rtime = 1:length(ints), ints)
ints <- abs(rnorm(40, sd = 34))
ch3 <- Chromatogram(rtime = 1:length(ints), ints)
chrs <- Chromatograms(list(ch, ch1, ch2, ch3), nrow = 2)
pd_exp <- annotatedDataFrameFrom(matrix(ncol = 2, nrow = 2), byrow = FALSE)
rownames(pData(pd_exp)) <- NULL
pd_exp <- as(pd_exp, "NAnnotatedDataFrame")
expect_equal(phenoData(chrs), pd_exp)
## Check error when assigning a phenoData with different names
pd <- data.frame(name = letters[1:2], idx = 1:2)
rownames(pd) <- letters[1:2]
expect_error(pData(chrs) <- pd)
pd <- data.frame(name = letters[1:2], idx = 1:2)
chrs <- Chromatograms(list(ch, ch1, ch2, ch3), nrow = 2,
phenoData = AnnotatedDataFrame(pd))
chrs_2 <- Chromatograms(list(ch, ch1, ch2, ch3), nrow = 2,
phenoData = pd)
expect_equal(chrs, chrs_2)
expect_equal(phenoData(chrs), as(AnnotatedDataFrame(pd), "NAnnotatedDataFrame"))
## pData.
expect_equal(pData(chrs), pd)
pd_2 <- cbind(pd, other = 1:2)
pData(chrs) <- pd_2
expect_equal(pData(chrs), pd_2)
rownames(pd_2) <- c("g", "h")
expect_error(pData(chrs) <- pd_2)
colnames(chrs) <- c("g", "h")
pData(chrs) <- pd_2
expect_equal(pData(chrs), pd_2)
expect_equal(colnames(chrs), rownames(pd_2))
## $
expect_equal(chrs$name, factor(letters[1:2]))
expect_equal(chrs$idx, 1:2)
chrs$idx <- c(2, 1)
expect_equal(chrs$idx, c(2, 1))
chrs$new_variable <- c("it", "works")
expect_equal(chrs$new_variable, c("it", "works"))
expect_error(chrs$new_variable <- 1:4)
chrs$new_variable <- 1
})
test_that("rownames<-, featureNames, featureNames<-,Chromatograms works", {
ints <- abs(rnorm(12, sd = 20))
ch <- Chromatogram(rtime = 1:length(ints), ints)
ints <- abs(rnorm(20, sd = 14))
ch1 <- Chromatogram(rtime = 1:length(ints), ints)
ints <- abs(rnorm(14, sd = 24))
ch2 <- Chromatogram(rtime = 1:length(ints), ints)
ints <- abs(rnorm(40, sd = 34))
ch3 <- Chromatogram(rtime = 1:length(ints), ints)
chrs <- Chromatograms(list(ch, ch1, ch2, ch3), nrow = 2)
expect_equal(rownames(chrs), as.character(1:nrow(chrs)))
expect_equal(featureNames(chrs), as.character(1:nrow(chrs)))
rownames(chrs) <- letters[1:nrow(chrs)]
expect_true(validObject(chrs))
expect_equal(rownames(chrs), letters[1:nrow(chrs)])
expect_equal(featureNames(chrs), letters[1:nrow(chrs)])
expect_error(rownames(chrs) <- letters[1:20])
featureNames(chrs) <- c("b", "z")
expect_equal(rownames(chrs), c("b", "z"))
})
test_that("featureData,fData,fData<-,Chromatograms works", {
ints <- abs(rnorm(12, sd = 20))
ch <- Chromatogram(rtime = 1:length(ints), ints)
ints <- abs(rnorm(20, sd = 14))
ch1 <- Chromatogram(rtime = 1:length(ints), ints)
ints <- abs(rnorm(14, sd = 24))
ch2 <- Chromatogram(rtime = 1:length(ints), ints)
ints <- abs(rnorm(40, sd = 34))
ch3 <- Chromatogram(rtime = 1:length(ints), ints)
chrs <- Chromatograms(list(ch, ch1, ch2, ch3), nrow = 2)
fd_exp <- annotatedDataFrameFrom(matrix(ncol = 2, nrow = 2), byrow = TRUE)
expect_equal(featureData(chrs), fd_exp)
## Check error when assigning a featureData with different names
fd <- data.frame(name = letters[1:2], idx = 1:2)
rownames(fd) <- letters[1:2]
expect_error(fData(chrs) <- pd)
fd <- data.frame(name = letters[1:2], idx = 1:2)
chrs <- Chromatograms(list(ch, ch1, ch2, ch3), nrow = 2,
featureData = fd)
expect_equal(featureData(chrs), AnnotatedDataFrame(fd))
expect_equal(fData(chrs), fd)
fd_2 <- cbind(fd, other = 1:2)
fData(chrs) <- fd_2
rownames(fd_2) <- as.character(1:2)
expect_equal(featureData(chrs), AnnotatedDataFrame(fd_2))
expect_equal(fData(chrs), fd_2)
fd_3 <- cbind(fd_2, another = 3:4)
featureData(chrs) <- fd_3
expect_equal(featureData(chrs), AnnotatedDataFrame(fd_3))
expect_equal(fvarLabels(chrs), colnames(fd_3))
})
test_that("isEmpty,Chromatograms works", {
ints <- abs(rnorm(12, sd = 20))
ch <- Chromatogram(rtime = 1:length(ints), ints)
ints <- abs(rnorm(20, sd = 14))
ch1 <- Chromatogram(rtime = 1:length(ints), ints)
ints <- abs(rnorm(14, sd = 24))
ch2 <- Chromatogram(rtime = 1:length(ints), ints)
ints <- abs(rnorm(40, sd = 34))
ch3 <- Chromatogram(rtime = 1:length(ints), ints)
chrs <- Chromatograms(list(ch, ch1, ch2, ch3), nrow = 2)
expect_true(!isEmpty(chrs))
plot(chrs)
chrs <- Chromatograms()
expect_true(isEmpty(chrs))
expect_warning(plot(chrs))
ints <- rep(NA_real_, 105)
ch1 <- Chromatogram(rtime = 1:length(ints), ints)
ints <- rep(NA_real_, 64)
ch2 <- Chromatogram(rtime = 1:length(ints), ints)
chrs <- Chromatograms(list(ch1, ch2), nrow = 2)
expect_true(isEmpty(chrs))
expect_warning(plot(chrs))
## Only one row is empty.
ints <- rep(NA_real_, 105)
ch1 <- Chromatogram(rtime = 1:length(ints), ints)
ints <- abs(rnorm(64))
ch2 <- Chromatogram(rtime = 1:length(ints), ints)
chrs <- Chromatograms(list(ch1, ch2), nrow = 2)
expect_true(!isEmpty(chrs))
expect_warning(plot(chrs))
## 2x2 first row NA
chrs <- Chromatograms(list(ch1, ch2, ch1, ch2), nrow = 2)
expect_warning(plot(chrs))
## 2x2 first col NA
chrs <- Chromatograms(list(ch1, ch1, ch2, ch2), nrow = 2)
expect_true(!isEmpty(chrs))
plot(chrs)
})
test_that(".mz_chromatograms, precursorMz etc,Chromatograms works", {
ints <- abs(rnorm(12, sd = 20))
ch <- Chromatogram(rtime = 1:length(ints), ints)
ints <- abs(rnorm(20, sd = 14))
ch1 <- Chromatogram(rtime = 1:length(ints), ints)
ints <- abs(rnorm(14, sd = 24))
ch2 <- Chromatogram(rtime = 1:length(ints), ints)
ints <- abs(rnorm(40, sd = 34))
ch3 <- Chromatogram(rtime = 1:length(ints), ints)
chrs <- Chromatograms(list(ch, ch1, ch2, ch3), nrow = 2)
## Base function
expect_error(MSnbase:::.mz_chromatograms(chrs, mz = "other"))
chrs_f <- chrs
fData(chrs_f) <- data.frame(precursorIsolationWindowTargetMZ = c(123, 456))
res <- MSnbase:::.mz_chromatograms(chrs_f, mz = "precursorMz")
expect_equal(colnames(res), c("mzmin", "mzmax"))
expect_equal(nrow(res), nrow(chrs_f))
expect_equal(res[, "mzmin"], res[, "mzmax"])
expect_equal(res[, "mzmin"], c(123, 456))
res <- MSnbase:::.mz_chromatograms(chrs_f)
expect_true(all(is.na(res)))
expect_equal(colnames(res), c("mzmin", "mzmax"))
expect_equal(nrow(res), nrow(chrs))
## method implementations:
## precursorMz
expect_true(nrow(precursorMz(Chromatograms())) == 0)
## with precursor m/z data in the featureData data.frame
chrs_f <- chrs
fData(chrs_f) <- data.frame(precursorIsolationWindowTargetMZ = c(123, 456))
res <- MSnbase:::.mz_chromatograms(chrs_f, "precursorMz")
expect_equal(colnames(res), c("mzmin", "mzmax"))
expect_equal(nrow(res), nrow(chrs_f))
expect_equal(res[, "mzmin"], res[, "mzmax"])
expect_equal(res[, "mzmin"], c(123, 456))
expect_equal(precursorMz(chrs_f), res)
## Extracting precursor m/z data from the Chromatogram objects.
res <- MSnbase:::.mz_chromatograms(chrs)
expect_true(all(is.na(res)))
expect_equal(colnames(res), c("mzmin", "mzmax"))
expect_equal(nrow(res), nrow(chrs))
expect_equal(res, precursorMz(chrs))
## Define chrs.
chrs_2 <- chrs
chrs_2[1, 1]@precursorMz <- range(123)
expect_error(MSnbase:::.mz_chromatograms(chrs_2, "precursorMz"),
"Chromatograms in row 1 have different precursorMz")
chrs_2[1, 2]@precursorMz <- range(123)
chrs_2[2, 1]@precursorMz <- range(456)
chrs_2[2, 2]@precursorMz <- range(456)
res <- MSnbase:::.mz_chromatograms(chrs_2, "precursorMz")
expect_equal(colnames(res), c("mzmin", "mzmax"))
expect_equal(nrow(res), nrow(chrs))
expect_equal(res[, "mzmin"], res[, "mzmax"])
expect_equal(res[, "mzmin"], c(123, 456))
expect_equal(res, precursorMz(chrs_2))
## productMz
res <- MSnbase:::.mz_chromatograms(chrs, "productMz")
expect_true(all(is.na(res)))
expect_equal(colnames(res), c("mzmin", "mzmax"))
expect_equal(nrow(res), nrow(chrs))
expect_equal(res, productMz(chrs))
chrs_f <- chrs
fData(chrs_f) <- data.frame(productIsolationWindowTargetMZ = c(3, 5))
res <- MSnbase:::.mz_chromatograms(chrs_f, "productMz")
expect_equal(colnames(res), c("mzmin", "mzmax"))
expect_equal(nrow(res), nrow(chrs))
expect_equal(res[, "mzmin"], res[, "mzmax"])
expect_equal(res[, "mzmin"], c(3, 5))
expect_equal(res, productMz(chrs_f))
chrs_2 <- chrs
chrs_2[1, 1]@productMz <- range(5)
expect_error(MSnbase:::.mz_chromatograms(chrs_2, "productMz"),
"Chromatograms in row 1 have different productMz")
chrs_2[1, 2]@productMz <- range(5)
res <- MSnbase:::.mz_chromatograms(chrs_2, "productMz")
expect_equal(colnames(res), c("mzmin", "mzmax"))
expect_equal(nrow(res), nrow(chrs))
expect_equal(res[, "mzmin"], res[, "mzmax"])
expect_equal(res[, "mzmin"], c(5, NA))
expect_equal(res, productMz(chrs_2))
## polarity
expect_true(all(polarity(chrs) == -1))
fData(chrs)$polarity <- c(1, 1)
expect_true(all(polarity(chrs) == 1))
## With a real object.
on_disk <- microtofq_on_disk
chrs <- chromatogram(on_disk, mz = c(123.4, 123.6), rt = c(35, 48))
res <- mz(chrs)
expect_true(nrow(res) == 1)
expect_true(all(colnames(res) == c("mzmin", "mzmax")))
expect_equal(unname(res[1, "mzmin"]), 123.4)
expect_equal(unname(res[1, "mzmax"]), 123.6)
expect_true(polarity(chrs) == unique(polarity(microtofq_on_disk)))
})
test_that(".bin_Chromatograms and bin,Chromatograms work", {
ints <- abs(rnorm(12, sd = 20))
ch <- Chromatogram(rtime = 1:length(ints), ints)
ints <- abs(rnorm(20, sd = 14))
ch1 <- Chromatogram(rtime = 1:length(ints), ints)
ints <- abs(rnorm(14, sd = 24))
ch2 <- Chromatogram(rtime = 1:length(ints), ints)
ints <- abs(rnorm(40, sd = 34))
ch3 <- Chromatogram(rtime = 1:length(ints), ints)
chrs <- Chromatograms(list(ch, ch1, ch2, ch3), nrow = 2)
chrsb <- MSnbase:::.bin_Chromatograms(chrs, binSize = 2)
## 1st row:
expect_equal(rtime(chrsb[1, 1]), rtime(chrsb[1, 2]))
expect_true(all(intensity(chrsb[1, 1])[rtime(chrsb[1, 1]) >
max(rtime(chrs[1, 1]))] == 0))
expect_true(max(rtime(chrsb[1, 2])) >= max(rtime(chrs[1, 2])))
expect_true(max(rtime(chrsb[1, 1])) >= max(rtime(chrs[1, 1])))
expect_equal(chrsb[1, 2], bin(chrs[1, 2], binSize = 2))
## 2nd row:
expect_equal(rtime(chrsb[2, 1]), rtime(chrsb[2, 2]))
expect_true(all(intensity(chrsb[2, 1])[rtime(chrsb[2, 1]) >
max(rtime(chrs[2, 1]))] == 0))
expect_true(max(rtime(chrsb[2, 2])) >= max(rtime(chrs[2, 2])))
expect_true(max(rtime(chrsb[2, 1])) >= max(rtime(chrs[2, 1])))
expect_equal(chrsb[2, 2], bin(chrs[2, 2], binSize = 2))
})
|
library(tidyverse)
# IBIS Analysis and Output ------------------------------------------------
# Biomass Growth Parameters
# Biomass Turnover Parameters
# NPP
# Data taken from IBIS Inputs
branch_fraction = 0.2
root_fraction = 0.7
leaf_fraction = 0.1
# Merge IBIS Biomass with LUCAS DOM Flows -------------------------------
# Read in the forest flow pathways
forest_flows = read_csv("F:/national-assessment/data/stock-flow-model/flow-pathways.csv")
forest_dom_flows = forest_flows %>%
filter(FromStockTypeID == "DOM: Aboveground Fast", FromStateClassID == "Forest: Pinyon/Juniper Group") %>%
mutate(FromStateClassID = "Shrubland: Big Sagebrush")
| /scripts/preprocessing-ibis-shrubland.R | no_license | bsleeter/california-sig | R | false | false | 666 | r |
library(tidyverse)
# IBIS Analysis and Output ------------------------------------------------
# Biomass Growth Parameters
# Biomass Turnover Parameters
# NPP
# Data taken from IBIS Inputs
branch_fraction = 0.2
root_fraction = 0.7
leaf_fraction = 0.1
# Merge IBIS Biomass with LUCAS DOM Flows -------------------------------
# Read in the forest flow pathways
forest_flows = read_csv("F:/national-assessment/data/stock-flow-model/flow-pathways.csv")
forest_dom_flows = forest_flows %>%
filter(FromStockTypeID == "DOM: Aboveground Fast", FromStateClassID == "Forest: Pinyon/Juniper Group") %>%
mutate(FromStateClassID = "Shrubland: Big Sagebrush")
|
.quotes <- "^(`|[[]|\")(.*)(`|[]]|\")$"
is_quoted <- function(x) grepl(.quotes, x)
to_upper <- function(x) {
x <- as.character(x)
is_quoted <- is_quoted(x)
x[!is_quoted] <- toupper(x[!is_quoted])
x
}
title <- function(x) h3(x, style = "text-align: left; border-bottom: 1px solid #494949; font-size: 16px; margin: 10px;")
is_try_error <- function(x) inherits(x, "try-error")
rm_null <- function(x) x[lengths(x) != 0]
rm_ext <- function(x) sub(pattern = "(.*?)\\..*$", replacement = "\\1", x)
br2 <- function() tagList(br(), br())
button <- function(id, label = "Get Data (csv)", icon = "download", status = "primary"){
tags$button(id = id,
type = "button",
class = glue::glue("btn action-button btn-md btn-{status}"),
HTML(as.character(icon(icon)), label))
}
click_js <- function(id){
glue::glue("document.getElementById('{id}').click();")
} | /R/utils.R | permissive | poissonconsulting/slobr | R | false | false | 903 | r | .quotes <- "^(`|[[]|\")(.*)(`|[]]|\")$"
is_quoted <- function(x) grepl(.quotes, x)
to_upper <- function(x) {
x <- as.character(x)
is_quoted <- is_quoted(x)
x[!is_quoted] <- toupper(x[!is_quoted])
x
}
title <- function(x) h3(x, style = "text-align: left; border-bottom: 1px solid #494949; font-size: 16px; margin: 10px;")
is_try_error <- function(x) inherits(x, "try-error")
rm_null <- function(x) x[lengths(x) != 0]
rm_ext <- function(x) sub(pattern = "(.*?)\\..*$", replacement = "\\1", x)
br2 <- function() tagList(br(), br())
button <- function(id, label = "Get Data (csv)", icon = "download", status = "primary"){
tags$button(id = id,
type = "button",
class = glue::glue("btn action-button btn-md btn-{status}"),
HTML(as.character(icon(icon)), label))
}
click_js <- function(id){
glue::glue("document.getElementById('{id}').click();")
} |
check.ev.cp <- function( tab, s )
{
###
### This function checks the candidate non-basic variable to determine
### if it violates the complementary pivoting requirement
###
### Parameters
### tab = augmented tableau for LLGP with complementary pivoting
### s = the index of the non-basic variable to enter the solution basis
###
###
### get the corresponding non-basic variable
###
nonbasic.variable <- tab$col.headings[s]
###
### skip over N and P variables
###
firstLetter <- substr( nonbasic.variable, 1, 1 )
if ( firstLetter != "X" )
return( TRUE )
ev.class <- get.variable.class( tab, nonbasic.variable )
for ( i in 1:tab$objectives ) {
basic.variable <- tab$row.headings[i]
dv.class <- get.variable.class( tab, basic.variable )
if ( ev.class == dv.class ) {
return( FALSE )
}
}
return( TRUE )
}
| /goalprog/R/check.ev.cp.R | no_license | ingted/R-Examples | R | false | false | 923 | r | check.ev.cp <- function( tab, s )
{
###
### This function checks the candidate non-basic variable to determine
### if it violates the complementary pivoting requirement
###
### Parameters
### tab = augmented tableau for LLGP with complementary pivoting
### s = the index of the non-basic variable to enter the solution basis
###
###
### get the corresponding non-basic variable
###
nonbasic.variable <- tab$col.headings[s]
###
### skip over N and P variables
###
firstLetter <- substr( nonbasic.variable, 1, 1 )
if ( firstLetter != "X" )
return( TRUE )
ev.class <- get.variable.class( tab, nonbasic.variable )
for ( i in 1:tab$objectives ) {
basic.variable <- tab$row.headings[i]
dv.class <- get.variable.class( tab, basic.variable )
if ( ev.class == dv.class ) {
return( FALSE )
}
}
return( TRUE )
}
|
testlist <- list(phi = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), x = c(1.36656528938164e-311, -1.65791256519293e+82, 1.29418168595419e-228, -1.86444977775538e+293, 8.08855267383463e-84, -4.03929894096111e-178, 6.04817943207006e-103, -1.66738461804717e-220, -8.8217241872956e-21, -7.84828807007467e-146, -7.48864562038427e+21, -1.00905374512e-187, 5.22970923741951e-218, 2.77992264324548e-197, -5.29147138128251e+140, -1.71332436886848e-93, -1.52261021137076e-52, 2.0627472502345e-21, 1.07149136185465e+184, 4.41748962512848e+47, -4.05885894997926e-142))
result <- do.call(dcurver:::ddc,testlist)
str(result) | /dcurver/inst/testfiles/ddc/AFL_ddc/ddc_valgrind_files/1609868085-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 831 | r | testlist <- list(phi = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), x = c(1.36656528938164e-311, -1.65791256519293e+82, 1.29418168595419e-228, -1.86444977775538e+293, 8.08855267383463e-84, -4.03929894096111e-178, 6.04817943207006e-103, -1.66738461804717e-220, -8.8217241872956e-21, -7.84828807007467e-146, -7.48864562038427e+21, -1.00905374512e-187, 5.22970923741951e-218, 2.77992264324548e-197, -5.29147138128251e+140, -1.71332436886848e-93, -1.52261021137076e-52, 2.0627472502345e-21, 1.07149136185465e+184, 4.41748962512848e+47, -4.05885894997926e-142))
result <- do.call(dcurver:::ddc,testlist)
str(result) |
install.packages("car")
library(car)
test <- read.csv('test_vif1.csv')
test
cor(test[,c('아이큐','공부시간')])
test <- test[,-1]
m1 <- lm(test$시험점수~., data=test)
summary(m1)
vif(m1) # 다중공선성 확인
test2 <- read.csv('test_vif2.csv')
test2 <- test2[,-1]
test2
test_m <- lm(test2$시험점수~., data=test2)
test_m
summary(test_m)
vif(test_m)
vif(test_m) > 15 # 아이큐, 등급평균
| /R_class13 ML ch6 다중공선성 확인 (200629).R | permissive | Adrian123K/R | R | false | false | 415 | r | install.packages("car")
library(car)
test <- read.csv('test_vif1.csv')
test
cor(test[,c('아이큐','공부시간')])
test <- test[,-1]
m1 <- lm(test$시험점수~., data=test)
summary(m1)
vif(m1) # 다중공선성 확인
test2 <- read.csv('test_vif2.csv')
test2 <- test2[,-1]
test2
test_m <- lm(test2$시험점수~., data=test2)
test_m
summary(test_m)
vif(test_m)
vif(test_m) > 15 # 아이큐, 등급평균
|
##' @importFrom RANN nn2
NULL
##' init of knn impute, if the variable is numeric then imputes with mean of all values
##' otherwise takes the most repeated levels.
##'
##' @param var a variable with missing values as NA
##' @return the var vector with imputed values
knn_impute_init <- function(var)
{
if(is.numeric(var)){
imp <- var
imp[is.na(imp)] <- mean(var,na.rm = TRUE)
}else{
imp <- var
imp[is.na(imp)] <- most_repeated(var)
}
imp
}
##' returns the most repeated value in var
##'
##' @param var a vector
##' @return the most repeated value
most_repeated <- function(var)
{
tt <- table(var)
names(tt)[which.max(tt)]
}
##' imputes only one variable with knn
##'
##' @param var a string indicating the variable to be imputed
##' @param my_data a tibble / data.frame containing the dataset with missing values
##' @param my_data_impute a tibble / data.frame containing the dataset with filled missing values
##' @param k the number of neighbors
impute_variable <- function(var , my_data, my_data_impute, k)
{
na_idx <- my_data %>% pluck(var) %>% is.na() %>% which()
train <- my_data_impute %>% slice(-na_idx)
test <- my_data_impute %>% slice(na_idx) %>% select(-!! sym(var))
resp <- train %>% pluck(var)
train <- train %>% select(-one_of(var))
train %<>% mutate_if(is.character,list(~ factor(.)))
test %<>% mutate_if(is.character, list(~factor(.)))
knn_search <- RANN::nn2( train, query = test, k = k )
neighbor_values <- matrix(resp[knn_search$nn.idx], ncol = k)
if( is.factor(my_data[[var]])){
imputed_values <- apply(neighbor_values, 1, most_repeated)
}else{
imputed_values <- rowMeans(neighbor_values)
}
my_data_impute[[var]][na_idx] <- imputed_values
my_data_impute
}
calculate_loss <- function(var,old_var)
{
if(is.numeric(var)){
mean((var - old_var)^2)
}else{
mean(var != old_var)
}
}
##' imputes missing value in a tibble with a mice inspired
##' knn algorithm
##'
##' @param my_data a tibble / data.frame containing the dataset with missing values
##' @param k the number of neighbors
##' @param tol convergence tolerange
##' @param max_iter max number of iterations
##' @param verbose a logical value indicating if should prinnt the iterations
##'
##' @return a data.frame / tibble with imputed entries where my_data has NA values
##' @export
impute_knn <- function(my_data, k , tol, max_iter ,verbose = FALSE)
{
## init by most repeated if categorical / mean if numerical
my_data_impute <- my_data %>% mutate_all( list( ~ knn_impute_init(.)))
any_na <- function(.)any(is.na(.))
vars <- my_data %>% select_if( any_na) %>% names()
old <- my_data_impute
iter <- 0
loss <- tol + 1
while( loss >= tol & iter <= max_iter) {
if(verbose) message(iter)
for(var in vars){
if(verbose) message(var)
my_data_impute <- impute_variable(var,my_data,my_data_impute,k)
}
loss <- map2_dbl(
old %>% as.list(),
my_data_impute %>% as.list(),
calculate_loss)
loss %<>% mean()
old <- my_data_impute
iter <- iter + 1
}
my_data_impute
}
| /R/knn_impute.R | no_license | welch16/rwlib | R | false | false | 3,106 | r | ##' @importFrom RANN nn2
NULL
##' init of knn impute, if the variable is numeric then imputes with mean of all values
##' otherwise takes the most repeated levels.
##'
##' @param var a variable with missing values as NA
##' @return the var vector with imputed values
knn_impute_init <- function(var)
{
if(is.numeric(var)){
imp <- var
imp[is.na(imp)] <- mean(var,na.rm = TRUE)
}else{
imp <- var
imp[is.na(imp)] <- most_repeated(var)
}
imp
}
##' returns the most repeated value in var
##'
##' @param var a vector
##' @return the most repeated value
most_repeated <- function(var)
{
tt <- table(var)
names(tt)[which.max(tt)]
}
##' imputes only one variable with knn
##'
##' @param var a string indicating the variable to be imputed
##' @param my_data a tibble / data.frame containing the dataset with missing values
##' @param my_data_impute a tibble / data.frame containing the dataset with filled missing values
##' @param k the number of neighbors
impute_variable <- function(var , my_data, my_data_impute, k)
{
na_idx <- my_data %>% pluck(var) %>% is.na() %>% which()
train <- my_data_impute %>% slice(-na_idx)
test <- my_data_impute %>% slice(na_idx) %>% select(-!! sym(var))
resp <- train %>% pluck(var)
train <- train %>% select(-one_of(var))
train %<>% mutate_if(is.character,list(~ factor(.)))
test %<>% mutate_if(is.character, list(~factor(.)))
knn_search <- RANN::nn2( train, query = test, k = k )
neighbor_values <- matrix(resp[knn_search$nn.idx], ncol = k)
if( is.factor(my_data[[var]])){
imputed_values <- apply(neighbor_values, 1, most_repeated)
}else{
imputed_values <- rowMeans(neighbor_values)
}
my_data_impute[[var]][na_idx] <- imputed_values
my_data_impute
}
calculate_loss <- function(var,old_var)
{
if(is.numeric(var)){
mean((var - old_var)^2)
}else{
mean(var != old_var)
}
}
##' imputes missing value in a tibble with a mice inspired
##' knn algorithm
##'
##' @param my_data a tibble / data.frame containing the dataset with missing values
##' @param k the number of neighbors
##' @param tol convergence tolerange
##' @param max_iter max number of iterations
##' @param verbose a logical value indicating if should prinnt the iterations
##'
##' @return a data.frame / tibble with imputed entries where my_data has NA values
##' @export
impute_knn <- function(my_data, k , tol, max_iter ,verbose = FALSE)
{
## init by most repeated if categorical / mean if numerical
my_data_impute <- my_data %>% mutate_all( list( ~ knn_impute_init(.)))
any_na <- function(.)any(is.na(.))
vars <- my_data %>% select_if( any_na) %>% names()
old <- my_data_impute
iter <- 0
loss <- tol + 1
while( loss >= tol & iter <= max_iter) {
if(verbose) message(iter)
for(var in vars){
if(verbose) message(var)
my_data_impute <- impute_variable(var,my_data,my_data_impute,k)
}
loss <- map2_dbl(
old %>% as.list(),
my_data_impute %>% as.list(),
calculate_loss)
loss %<>% mean()
old <- my_data_impute
iter <- iter + 1
}
my_data_impute
}
|
## DSO106 L2 Modeling with Logistic Regression - R
# Laod in libraries
library("caret")
library("magrittr")
library("dplyr")
library("tidyr")
library("lmtest")
library("popbio")
library("e1071")
#Loaded in baseball data
# Determine whether # of home runs hit in a game is a predictor of whether or
# not a team wins
# Data Wrangling
# recode the outcome variable (DV) to zeroes and ones
baseball$WinsR <- NA
baseball$WinsR[baseball$W.L == 'W'] <- 1
baseball$WinsR[baseball$W.L == 'L'] <- 0
# testing logistic regression assumptions
mylogit <- glm(WinsR ~ HR.Count, data=baseball, family='binomial')
# Predict wins and losses
probabilities <- predict(mylogit, type='response')
# Convert probabilities to have a positive or negative prediction
baseball$Predicted <- ifelse(probabilities > .5, 'pos', 'neg')
# Next, recode the predicted variable
baseball$PredictedR <- NA
baseball$PredictedR[baseball$Predicted == 'pos'] <- 1
baseball$PredictedR[baseball$Predicted == 'neg'] <- 0
# Convert our new variables to factors
baseball$PredictedR <- as.factor(baseball$PredictedR)
baseball$WinsR <- as.factor(baseball$WinsR)
# Create a confusion matrix
conf_mat <- caret::confusionMatrix(baseball$PredictedR, baseball$WinsR)
conf_mat
# Logit linearity
# calculate the logit and graph it against the predicted values
baseball1 <- baseball %>% dplyr::select_if(is.numeric)
predictors <- colnames(baseball1)
# finally, create the logit
baseball1 <- baseball1 %>%
mutate(logit=log(probabilities/(1-probabilities))) %>%
gather(key= 'predictors', value= 'predictor.value', -logit)
# With this logit in hand, you can graph to assess for linearity
ggplot(baseball1, aes(logit, predictor.value))+
geom_point(size=.5, alpha=.5)+
geom_smooth(method= 'loess')+
theme_bw()+
facet_wrap(~predictors, scales='free_y')
# can skip testing for the assumption of multicollinearity
# BC there is only 1 IV
# Testing for independent errors
plot(mylogit$residuals)
# seeing an even distribution of points across the x axis
# we have met the assumption of independent errors
# Testing for independent error - alternatives
# Durbin-Watson test to check for independence of errors
dwtest(mylogit, alternative='two.sided')
# Screening for outliers
infl <- influence.measures(mylogit)
summary(infl)
## Running Logistic Regression and Interpreting the Output
summary(mylogit)
# Graphing the logistic model
logi.hist.plot(baseball$HR.Count,baseball$WinsR, boxp=FALSE, type="hist", col="gray")
| /DS106-Machine-Learning/Modeling/Examples/DSO106-L2-Modeling-with-Logistic-Regression-R.R | no_license | valmhstn/DS-Student-Resources | R | false | false | 2,486 | r | ## DSO106 L2 Modeling with Logistic Regression - R
# Laod in libraries
library("caret")
library("magrittr")
library("dplyr")
library("tidyr")
library("lmtest")
library("popbio")
library("e1071")
#Loaded in baseball data
# Determine whether # of home runs hit in a game is a predictor of whether or
# not a team wins
# Data Wrangling
# recode the outcome variable (DV) to zeroes and ones
baseball$WinsR <- NA
baseball$WinsR[baseball$W.L == 'W'] <- 1
baseball$WinsR[baseball$W.L == 'L'] <- 0
# testing logistic regression assumptions
mylogit <- glm(WinsR ~ HR.Count, data=baseball, family='binomial')
# Predict wins and losses
probabilities <- predict(mylogit, type='response')
# Convert probabilities to have a positive or negative prediction
baseball$Predicted <- ifelse(probabilities > .5, 'pos', 'neg')
# Next, recode the predicted variable
baseball$PredictedR <- NA
baseball$PredictedR[baseball$Predicted == 'pos'] <- 1
baseball$PredictedR[baseball$Predicted == 'neg'] <- 0
# Convert our new variables to factors
baseball$PredictedR <- as.factor(baseball$PredictedR)
baseball$WinsR <- as.factor(baseball$WinsR)
# Create a confusion matrix
conf_mat <- caret::confusionMatrix(baseball$PredictedR, baseball$WinsR)
conf_mat
# Logit linearity
# calculate the logit and graph it against the predicted values
baseball1 <- baseball %>% dplyr::select_if(is.numeric)
predictors <- colnames(baseball1)
# finally, create the logit
baseball1 <- baseball1 %>%
mutate(logit=log(probabilities/(1-probabilities))) %>%
gather(key= 'predictors', value= 'predictor.value', -logit)
# With this logit in hand, you can graph to assess for linearity
ggplot(baseball1, aes(logit, predictor.value))+
geom_point(size=.5, alpha=.5)+
geom_smooth(method= 'loess')+
theme_bw()+
facet_wrap(~predictors, scales='free_y')
# can skip testing for the assumption of multicollinearity
# BC there is only 1 IV
# Testing for independent errors
plot(mylogit$residuals)
# seeing an even distribution of points across the x axis
# we have met the assumption of independent errors
# Testing for independent error - alternatives
# Durbin-Watson test to check for independence of errors
dwtest(mylogit, alternative='two.sided')
# Screening for outliers
infl <- influence.measures(mylogit)
summary(infl)
## Running Logistic Regression and Interpreting the Output
summary(mylogit)
# Graphing the logistic model
logi.hist.plot(baseball$HR.Count,baseball$WinsR, boxp=FALSE, type="hist", col="gray")
|
coef.CARBayesST <- function(object,...)
{
#### Return the estimated regression coefficient
if(is.null(nrow(object$samples$beta)))
{
return(NULL)
}else
{
beta <- apply(object$samples$beta, 2, median)
names(beta) <- colnames(object$X)
return(beta)
}
} | /CARBayesST/R/coef.CARBayesST.R | no_license | akhikolla/TestedPackages-NoIssues | R | false | false | 299 | r | coef.CARBayesST <- function(object,...)
{
#### Return the estimated regression coefficient
if(is.null(nrow(object$samples$beta)))
{
return(NULL)
}else
{
beta <- apply(object$samples$beta, 2, median)
names(beta) <- colnames(object$X)
return(beta)
}
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/exports-code.R
\name{is_error_free}
\alias{is_error_free}
\title{Does the code run without throwing an error?
See \code{\link[assertive.code]{is_error_free}}.}
\description{
Does the code run without throwing an error?
See \code{\link[assertive.code]{is_error_free}}.
}
| /man/is_error_free.Rd | no_license | cran/assertive | R | false | true | 352 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/exports-code.R
\name{is_error_free}
\alias{is_error_free}
\title{Does the code run without throwing an error?
See \code{\link[assertive.code]{is_error_free}}.}
\description{
Does the code run without throwing an error?
See \code{\link[assertive.code]{is_error_free}}.
}
|
buff_tail = c(10,1,37,5,12)
garden_bee = c(8, 3, 9, 6, 4)
red_tail = c(18, 9, 1, 2, 4)
carder_bee = c(8, 27, 6, 32, 23)
honey_bee = c(12, 13, 16, 9, 10)
plant_names = c("Thistle", "Vipers", "Golden rain", "Yellow alfalfa", "blackberry")
beesnames <- c("buff_tail", "garden_bee", "red_tail", "carder_bee", "honey_bee")
bees <- c(buff_tail, garden_bee, red_tail, carder_bee, honey_bee)
mat = matrix(bees, 5, 5, byrow=TRUE, dimnames=list(beesnames,plant_names))
mat
df = data.frame(buff_tail, garden_bee, red_tail, carder_bee, honey_bee, row.names = plant_names)
l = list(num = c(1L, 2L, 4L), s = c("hello", "world", "!"), real = c(4.7, 6.3, 7.7, 8.1))
l
| /Third/matrix.r | no_license | sharan98/DSR_lab | R | false | false | 653 | r | buff_tail = c(10,1,37,5,12)
garden_bee = c(8, 3, 9, 6, 4)
red_tail = c(18, 9, 1, 2, 4)
carder_bee = c(8, 27, 6, 32, 23)
honey_bee = c(12, 13, 16, 9, 10)
plant_names = c("Thistle", "Vipers", "Golden rain", "Yellow alfalfa", "blackberry")
beesnames <- c("buff_tail", "garden_bee", "red_tail", "carder_bee", "honey_bee")
bees <- c(buff_tail, garden_bee, red_tail, carder_bee, honey_bee)
mat = matrix(bees, 5, 5, byrow=TRUE, dimnames=list(beesnames,plant_names))
mat
df = data.frame(buff_tail, garden_bee, red_tail, carder_bee, honey_bee, row.names = plant_names)
l = list(num = c(1L, 2L, 4L), s = c("hello", "world", "!"), real = c(4.7, 6.3, 7.7, 8.1))
l
|
#' an example pttkey
#'
#' a tiny example pttkey which also works with the simulated goniometer output see\code{\link[monitorgonio]{simulate_gonio}}.
#' @docType data
#' @usage data(pttkey)
#' @format a data.frame
#' @keywords datasets
"pttkey"
| /R/pttkey.R | no_license | williamcioffi/monitorgonio | R | false | false | 246 | r | #' an example pttkey
#'
#' a tiny example pttkey which also works with the simulated goniometer output see\code{\link[monitorgonio]{simulate_gonio}}.
#' @docType data
#' @usage data(pttkey)
#' @format a data.frame
#' @keywords datasets
"pttkey"
|
testlist <- list(mu = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), impl = NULL, sigma_impl = 0)
result <- do.call(metafolio::impl_error,testlist)
str(result) | /metafolio/inst/testfiles/impl_error/libFuzzer_impl_error/impl_error_valgrind_files/1612989244-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 401 | r | testlist <- list(mu = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), impl = NULL, sigma_impl = 0)
result <- do.call(metafolio::impl_error,testlist)
str(result) |
library(aod)
library(ggplot2)
library(Rcpp)
library(RMySQL)
mydb = dbConnect(MySQL(), user='rnduser', password='hongik_gwh', dbname='falling', host='52.79.138.56')
#dbListTables(mydb)
#dbListFields(mydb, 'RelationView')
#rs = dbSendQuery(mydb,"select relationId,gbByMindCount.relMatchStatus,U.userId, U.gender, TU.gender,U.age, TU.age,CASE When U.age-TU.age>0 and U.age-TU.age<=5 Then '1' When U.age-TU.age<0 and U.age-TU.age>=-5 then '-1' When U.age-TU.age>5 and U.age-TU.age<=10 Then '2' When U.age-TU.age>10 then '3' When U.age-TU.age>=-10 and U.age-TU.age<-5 then '-2' When U.age-TU.age<-10 then '-3' When U.age-TU.age=0 Then '0' END as agegrade, U.countryCode, TU.countryCode,CASE When TimestampDiff(minute,U.fallingStartDate,TU.fallingStartDate)<=10 and TimestampDiff(minute,U.fallingStartDate,TU.fallingStartDate)>0 Then '0' When TimestampDiff(minute,U.fallingStartDate,TU.fallingStartDate)>60 and TimestampDiff(minute,U.fallingStartDate,TU.fallingStartDate)<=1440 Then '1'ELSE '2' END as Timegrade,U.fallingStartDate, TU.fallingStartDate, U.invitedDateByTarget, TU.invitedDateByTarget, U.facebookFriendsCount, TU.facebookFriendsCount, U.emotionId, TU.emotionId, matchDate, inviteCheck, CASE When countmind=0 Then '0' ELSE '1'END as CountMind, hintCheckStatus, foundCheckStatus, CASE When countdiary=0 Then '0' ELSE '1'END as CountDiary from (select M.foundCheck foundCheckStatus, M.hintCheck hintCheckStatus, R.mainUserId mainUserId,R.matchDate, R.inviteCheck, R.relId relationId, R.targetUserId,COUNT(M.mindId) countmind ,R.matchCheck relMatchStatus,COUNT(D.diaryId) countdiary from (RelationView R LEFT OUTER JOIN MindView M ON R.relId=M.relId) LEFT OUTER JOIN DiaryView D ON R.relId=D.relId group by R.relId) as gbByMindCount JOIN UserView as U on gbByMindCount.mainUserId=U.userId JOIN UserView as TU ON TU.userId=gbByMindCount.targetUserId where U.age is not NULL and TU.age is not NULL")
rs = dbSendQuery(mydb,"select relationId,gbByMindCount.relMatchStatus,U.userId, U.gender, TU.gender,U.age, TU.age, CASE When U.age-TU.age>0 and U.age-TU.age<=5 Then '1' When U.age-TU.age<0 and U.age-TU.age>=-5 then '-1' When U.age-TU.age>5 and U.age-TU.age<=10 Then '2' When U.age-TU.age>10 then '3' When U.age-TU.age>=-10 and U.age-TU.age<-5 then '-2' When U.age-TU.age<-10 then '-3' When U.age-TU.age=0 Then '0' END as agegrade, U.countryCode, TU.countryCode,CASE When TimestampDiff(minute,U.fallingStartDate,TU.fallingStartDate)<=10 and TimestampDiff(minute,U.fallingStartDate,TU.fallingStartDate)>0 Then '0' When TimestampDiff(minute,U.fallingStartDate,TU.fallingStartDate)>60 and TimestampDiff(minute,U.fallingStartDate,TU.fallingStartDate)<=1440 Then '1'ELSE '2' END as Timegrade,U.fallingStartDate, TU.fallingStartDate, U.invitedDateByTarget, TU.invitedDateByTarget, U.facebookFriendsCount, TU.facebookFriendsCount, U.emotionId, TU.emotionId, matchDate, inviteCheck, CASE When countmind=0 Then '0' ELSE '1'END mindcount, hintCheckStatus, foundCheckStatus, CASE When countdiary=0 Then '0' ELSE '1'END as diarycount from (select M.foundCheck foundCheckStatus, M.hintCheck hintCheckStatus, R.mainUserId mainUserId,R.matchDate, R.inviteCheck, R.relId relationId, R.targetUserId,COUNT(M.mindId) countmind ,R.matchCheck relMatchStatus,COUNT(D.diaryId) countdiary from (RelationView R LEFT OUTER JOIN MindView M ON R.relId=M.relId) LEFT OUTER JOIN DiaryView D ON R.relId=D.relId group by R.relId) as gbByMindCount JOIN UserView as U on gbByMindCount.mainUserId=U.userId JOIN UserView as TU ON TU.userId=gbByMindCount.targetUserId where U.age is not NULL and TU.age is not NULL")
#select relationId,gbByMindCount.relMatchStatus, U.gender, TU.gender,U.age, TU.age,U.age-TU.age as agegrade, U.countryCode, TU.countryCode,TimestampDiff(hour,U.fallingStartDate,TU.fallingStartDate) as Timegrade, U.fallingStartDate, TU.fallingStartDate, U.invitedDateByTarget, TU.invitedDateByTarget, U.facebookFriendsCount, TU.facebookFriendsCount, U.emotionId, TU.emotionId, matchDate, inviteCheck, countMind, hintCheckStatus, foundCheckStatus from (select M.foundCheck foundCheckStatus, M.hintCheck hintCheckStatus, R.mainUserId mainUserId,R.matchDate, R.inviteCheck, R.relId relationId, R.targetUserId, COUNT(M.mindId) countMind,R.matchCheck relMatchStatus from RelationView R LEFT OUTER JOIN MindView M ON R.relId=M.relId group by R.relId) as gbByMindCount JOIN UserView as U on gbByMindCount.mainUserId=U.userId JOIN UserView as TU ON TU.userId=gbByMindCount.targetUserId")
data = fetch(rs, n=-1)
write.csv(data,"mydata10.csv")
mydata <- read.csv("mydata10.csv")
#mydata <-read.csv("xtabtest.csv")
## view the first few rows of the data
head(mydata)
summary(mydata)
sapply(mydata, sd)
## two-way contingency table of categorical outcome and predictors
## we want to make sure there are not 0 cells
#xtabs(~ relMatchStatus +Timegrade+ agegrade+hintCheckStatus+foundCheckStatus, data = mydata)
xtabs(~ relMatchStatus +gender+agegrade+Timegrade+mindcount+hintCheckStatus+foundCheckStatus+diarycount, data = mydata)
mydata$gender <- factor(mydata$gender)
mydata$agegrade <- factor(mydata$agegrade)
mydata$Timegrade <- factor(mydata$Timegrade)
mydata$mindcount <- factor(mydata$mindcount)
mydata$hintCheckStatus <- factor(mydata$hintCheckStatus)
mydata$foundCheckStatus<- factor(mydata$foundCheckStatus)
mydata$relMatchStatus <- factor(mydata$relMatchStatus)
mydata$diarycount <- factor(mydata$diarycount)
mydata$mindcount
mylogit <- glm(relMatchStatus ~ +mindcount+gender+agegrade+Timegrade+diarycount, data = mydata, family = "binomial") ## X2 = 7.1, df = 5, P(> X2) = 0.21
wald.test(b = coef(mylogit), Sigma = vcov(mylogit), Terms = 2:6)
mylogit <- glm(relMatchStatus ~ +gender+agegrade+Timegrade+hintCheckStatus+foundCheckStatus+mindcount+diarycount, data = mydata, family = "binomial" )
## X2 = 7.1, df = 5, P(> X2) = 0.21
#mylogit <- glm(relMatchStatus ~ + gender+agegrade+hintCheckStatus+foundCheckStatus+CountDiary, data = mydata, family = "binomial")## X2 = 14.6, df = 5, P(> X2) = 0.012
#mylogit <- glm(relMatchStatus ~ + gender+Timegrade+CountMind+hintCheckStatus+foundCheckStatus+CountDiary, data = mydata, family = "binomial") ## X2 = 25.7, df = 6, P(> X2) = 0.00025->>0.27로 떨어짐...
#mylogit <- glm(relMatchStatus ~ + gender+agegrade+Timegrade+hintCheckStatus+foundCheckStatus+CountDiary, data = mydata, family = "binomial")## X2 = 7.8, df = 6, P(> X2) = 0.26
#mylogit <- glm(relMatchStatus ~ +agegrade+CountMind+hintCheckStatus+foundCheckStatus+CountDiary, data = mydata, family = "binomial") ## X2 = 13.8, df = 5, P(> X2) = 0.017
summary(mylogit)
## CIs using profiled log-likelihood
confint(mylogit)
## CIs using standard errors
confint.default(mylogit)
wald.test(b = coef(mylogit), Sigma = vcov(mylogit), Terms = 2:2)
l <- cbind(0,0,0,1,-1,0)
wald.test(b = coef(mylogit), Sigma = vcov(mylogit), L = l)
## odds ratios only
exp(coef(mylogit))
## odds ratios and 95% CI
exp(cbind(OR = coef(mylogit), confint(mylogit)))
newdata1 <- with(mydata,
data.frame(gre = mean(gre), gpa = mean(gpa), rank = factor( 1:4)))
## view data frame
newdata1
newdata1$rankP <- predict(mylogit, newdata = newdata1, type = "response")
newdata1
newdata2 <- with(mydata,
data.frame(gre = rep(seq(from = 200, to = 800, length.out =100), 4),
gpa = mean(gpa), rank = factor(rep(1:4, each = 100))))
newdata3 <- cbind(newdata2, predict(mylogit, newdata = newdata2, type="link", se=TRUE))
newdata3 <- within(newdata3, {
PredictedProb <- plogis(fit)
LL <- plogis(fit - (1.96 * se.fit))
UL <- plogis(fit + (1.96 * se.fit))
})
## view first few rows of final dataset
head(newdata3)
ggplot(newdata3, aes(x = gre, y = PredictedProb)) +
geom_ribbon(aes(ymin = LL, ymax = UL, fill = rank), alpha = .2) +
geom_line(aes(colour = rank), size=1)
with(mylogit, null.deviance - deviance)
with(mylogit, df.null - df.residual)
with(mylogit, pchisq(null.deviance - deviance, df.null - df.residual, lower.tail = FALSE))
logLik(mylogit)
| /logisticregression.R | no_license | eunsooLim/GoodWillHUnting_Analysis | R | false | false | 7,995 | r | library(aod)
library(ggplot2)
library(Rcpp)
library(RMySQL)
mydb = dbConnect(MySQL(), user='rnduser', password='hongik_gwh', dbname='falling', host='52.79.138.56')
#dbListTables(mydb)
#dbListFields(mydb, 'RelationView')
#rs = dbSendQuery(mydb,"select relationId,gbByMindCount.relMatchStatus,U.userId, U.gender, TU.gender,U.age, TU.age,CASE When U.age-TU.age>0 and U.age-TU.age<=5 Then '1' When U.age-TU.age<0 and U.age-TU.age>=-5 then '-1' When U.age-TU.age>5 and U.age-TU.age<=10 Then '2' When U.age-TU.age>10 then '3' When U.age-TU.age>=-10 and U.age-TU.age<-5 then '-2' When U.age-TU.age<-10 then '-3' When U.age-TU.age=0 Then '0' END as agegrade, U.countryCode, TU.countryCode,CASE When TimestampDiff(minute,U.fallingStartDate,TU.fallingStartDate)<=10 and TimestampDiff(minute,U.fallingStartDate,TU.fallingStartDate)>0 Then '0' When TimestampDiff(minute,U.fallingStartDate,TU.fallingStartDate)>60 and TimestampDiff(minute,U.fallingStartDate,TU.fallingStartDate)<=1440 Then '1'ELSE '2' END as Timegrade,U.fallingStartDate, TU.fallingStartDate, U.invitedDateByTarget, TU.invitedDateByTarget, U.facebookFriendsCount, TU.facebookFriendsCount, U.emotionId, TU.emotionId, matchDate, inviteCheck, CASE When countmind=0 Then '0' ELSE '1'END as CountMind, hintCheckStatus, foundCheckStatus, CASE When countdiary=0 Then '0' ELSE '1'END as CountDiary from (select M.foundCheck foundCheckStatus, M.hintCheck hintCheckStatus, R.mainUserId mainUserId,R.matchDate, R.inviteCheck, R.relId relationId, R.targetUserId,COUNT(M.mindId) countmind ,R.matchCheck relMatchStatus,COUNT(D.diaryId) countdiary from (RelationView R LEFT OUTER JOIN MindView M ON R.relId=M.relId) LEFT OUTER JOIN DiaryView D ON R.relId=D.relId group by R.relId) as gbByMindCount JOIN UserView as U on gbByMindCount.mainUserId=U.userId JOIN UserView as TU ON TU.userId=gbByMindCount.targetUserId where U.age is not NULL and TU.age is not NULL")
rs = dbSendQuery(mydb,"select relationId,gbByMindCount.relMatchStatus,U.userId, U.gender, TU.gender,U.age, TU.age, CASE When U.age-TU.age>0 and U.age-TU.age<=5 Then '1' When U.age-TU.age<0 and U.age-TU.age>=-5 then '-1' When U.age-TU.age>5 and U.age-TU.age<=10 Then '2' When U.age-TU.age>10 then '3' When U.age-TU.age>=-10 and U.age-TU.age<-5 then '-2' When U.age-TU.age<-10 then '-3' When U.age-TU.age=0 Then '0' END as agegrade, U.countryCode, TU.countryCode,CASE When TimestampDiff(minute,U.fallingStartDate,TU.fallingStartDate)<=10 and TimestampDiff(minute,U.fallingStartDate,TU.fallingStartDate)>0 Then '0' When TimestampDiff(minute,U.fallingStartDate,TU.fallingStartDate)>60 and TimestampDiff(minute,U.fallingStartDate,TU.fallingStartDate)<=1440 Then '1'ELSE '2' END as Timegrade,U.fallingStartDate, TU.fallingStartDate, U.invitedDateByTarget, TU.invitedDateByTarget, U.facebookFriendsCount, TU.facebookFriendsCount, U.emotionId, TU.emotionId, matchDate, inviteCheck, CASE When countmind=0 Then '0' ELSE '1'END mindcount, hintCheckStatus, foundCheckStatus, CASE When countdiary=0 Then '0' ELSE '1'END as diarycount from (select M.foundCheck foundCheckStatus, M.hintCheck hintCheckStatus, R.mainUserId mainUserId,R.matchDate, R.inviteCheck, R.relId relationId, R.targetUserId,COUNT(M.mindId) countmind ,R.matchCheck relMatchStatus,COUNT(D.diaryId) countdiary from (RelationView R LEFT OUTER JOIN MindView M ON R.relId=M.relId) LEFT OUTER JOIN DiaryView D ON R.relId=D.relId group by R.relId) as gbByMindCount JOIN UserView as U on gbByMindCount.mainUserId=U.userId JOIN UserView as TU ON TU.userId=gbByMindCount.targetUserId where U.age is not NULL and TU.age is not NULL")
#select relationId,gbByMindCount.relMatchStatus, U.gender, TU.gender,U.age, TU.age,U.age-TU.age as agegrade, U.countryCode, TU.countryCode,TimestampDiff(hour,U.fallingStartDate,TU.fallingStartDate) as Timegrade, U.fallingStartDate, TU.fallingStartDate, U.invitedDateByTarget, TU.invitedDateByTarget, U.facebookFriendsCount, TU.facebookFriendsCount, U.emotionId, TU.emotionId, matchDate, inviteCheck, countMind, hintCheckStatus, foundCheckStatus from (select M.foundCheck foundCheckStatus, M.hintCheck hintCheckStatus, R.mainUserId mainUserId,R.matchDate, R.inviteCheck, R.relId relationId, R.targetUserId, COUNT(M.mindId) countMind,R.matchCheck relMatchStatus from RelationView R LEFT OUTER JOIN MindView M ON R.relId=M.relId group by R.relId) as gbByMindCount JOIN UserView as U on gbByMindCount.mainUserId=U.userId JOIN UserView as TU ON TU.userId=gbByMindCount.targetUserId")
data = fetch(rs, n=-1)
write.csv(data,"mydata10.csv")
mydata <- read.csv("mydata10.csv")
#mydata <-read.csv("xtabtest.csv")
## view the first few rows of the data
head(mydata)
summary(mydata)
sapply(mydata, sd)
## two-way contingency table of categorical outcome and predictors
## we want to make sure there are not 0 cells
#xtabs(~ relMatchStatus +Timegrade+ agegrade+hintCheckStatus+foundCheckStatus, data = mydata)
xtabs(~ relMatchStatus +gender+agegrade+Timegrade+mindcount+hintCheckStatus+foundCheckStatus+diarycount, data = mydata)
mydata$gender <- factor(mydata$gender)
mydata$agegrade <- factor(mydata$agegrade)
mydata$Timegrade <- factor(mydata$Timegrade)
mydata$mindcount <- factor(mydata$mindcount)
mydata$hintCheckStatus <- factor(mydata$hintCheckStatus)
mydata$foundCheckStatus<- factor(mydata$foundCheckStatus)
mydata$relMatchStatus <- factor(mydata$relMatchStatus)
mydata$diarycount <- factor(mydata$diarycount)
mydata$mindcount
mylogit <- glm(relMatchStatus ~ +mindcount+gender+agegrade+Timegrade+diarycount, data = mydata, family = "binomial") ## X2 = 7.1, df = 5, P(> X2) = 0.21
wald.test(b = coef(mylogit), Sigma = vcov(mylogit), Terms = 2:6)
mylogit <- glm(relMatchStatus ~ +gender+agegrade+Timegrade+hintCheckStatus+foundCheckStatus+mindcount+diarycount, data = mydata, family = "binomial" )
## X2 = 7.1, df = 5, P(> X2) = 0.21
#mylogit <- glm(relMatchStatus ~ + gender+agegrade+hintCheckStatus+foundCheckStatus+CountDiary, data = mydata, family = "binomial")## X2 = 14.6, df = 5, P(> X2) = 0.012
#mylogit <- glm(relMatchStatus ~ + gender+Timegrade+CountMind+hintCheckStatus+foundCheckStatus+CountDiary, data = mydata, family = "binomial") ## X2 = 25.7, df = 6, P(> X2) = 0.00025->>0.27로 떨어짐...
#mylogit <- glm(relMatchStatus ~ + gender+agegrade+Timegrade+hintCheckStatus+foundCheckStatus+CountDiary, data = mydata, family = "binomial")## X2 = 7.8, df = 6, P(> X2) = 0.26
#mylogit <- glm(relMatchStatus ~ +agegrade+CountMind+hintCheckStatus+foundCheckStatus+CountDiary, data = mydata, family = "binomial") ## X2 = 13.8, df = 5, P(> X2) = 0.017
summary(mylogit)
## CIs using profiled log-likelihood
confint(mylogit)
## CIs using standard errors
confint.default(mylogit)
wald.test(b = coef(mylogit), Sigma = vcov(mylogit), Terms = 2:2)
l <- cbind(0,0,0,1,-1,0)
wald.test(b = coef(mylogit), Sigma = vcov(mylogit), L = l)
## odds ratios only
exp(coef(mylogit))
## odds ratios and 95% CI
exp(cbind(OR = coef(mylogit), confint(mylogit)))
newdata1 <- with(mydata,
data.frame(gre = mean(gre), gpa = mean(gpa), rank = factor( 1:4)))
## view data frame
newdata1
newdata1$rankP <- predict(mylogit, newdata = newdata1, type = "response")
newdata1
newdata2 <- with(mydata,
data.frame(gre = rep(seq(from = 200, to = 800, length.out =100), 4),
gpa = mean(gpa), rank = factor(rep(1:4, each = 100))))
newdata3 <- cbind(newdata2, predict(mylogit, newdata = newdata2, type="link", se=TRUE))
newdata3 <- within(newdata3, {
PredictedProb <- plogis(fit)
LL <- plogis(fit - (1.96 * se.fit))
UL <- plogis(fit + (1.96 * se.fit))
})
## view first few rows of final dataset
head(newdata3)
ggplot(newdata3, aes(x = gre, y = PredictedProb)) +
geom_ribbon(aes(ymin = LL, ymax = UL, fill = rank), alpha = .2) +
geom_line(aes(colour = rank), size=1)
with(mylogit, null.deviance - deviance)
with(mylogit, df.null - df.residual)
with(mylogit, pchisq(null.deviance - deviance, df.null - df.residual, lower.tail = FALSE))
logLik(mylogit)
|
library(data.table)
library(ggplot2)
library(ggthemes)
warnings("off")
setwd(".")
get_error_info <- function(filename)
{
df <- read.table(filename,header=TRUE,sep=",")
order_indices <- order(df$labels)
error_df <- df[order_indices,]
return(error_df)
}
get_r2 <- function(a,b)
{
v <- cor(a,b,method="pearson")^2
return(v)
}
get_mse <- function(a,b)
{
v <- sum((a-b)^2)/length(a);
return(v)
}
get_mae <- function(a,b)
{
v <- sum(abs(a-b))/length(a);
return(v)
}
get_pearson <- function(a,b)
{
v <- cor(a,b,method="pearson");
return(v)
}
get_correlation_plot <- function(df,method)
{
par(mfrow=c(1,2))
plot(x=df$labels,y=df$predictions,
xlab="True Pchembl Values",ylab ="Predicted Pchembl Values",col="red",ylim=c(3,11),xlim=c(3,11))
abline(0,1,col="black")
legend(3.2,11, legend=paste0("Linear Fit of ",method),
col="black",cex=0.8)
df_r2 <- get_r2(df$predictions,df$labels)
df_mse <- get_mse(df$predictions,df$labels)
df_mae <- get_mae(df$predictions,df$labels)
df_pearson <- get_pearson(df$predictions,df$labels)
text(9,3.0,labels=sprintf("R2=%.3f",df_r2),cex=1.2)
text(9,3.5,labels=sprintf("Pearson r=%.3f",df_pearson),cex=1.2)
text(9,4.0,labels=sprintf("MAE=%.3f",df_mae),cex=1.2)
text(9,4.5,labels=sprintf("MSE=%.3f",df_mse),cex=1.2)
residuals <- df$predictions-df$labels
h <- hist(residuals,breaks=20,main="",xlab=paste0("Regular Residual of ",method),ylab="Count",freq=T)
multiplier <- h$counts/h$density
df_density <- density(residuals)
df_density$y <- df_density$y*multiplier
myx <- seq(min(residuals), max(residuals), length.out= 100)
df_normal <- dnorm(x=myx,mean=mean(residuals),sd=sd(residuals))
lines(myx, df_normal * multiplier[1], col = "blue", lwd = 2)
}
conver_character <- function(df)
{
df$uniprot_accession <- as.character(as.vector(df$uniprot_accession))
df$standard_inchi_key <- as.character(as.vector(df$standard_inchi_key))
return(df)
}
#Get data frames with errors
rf_error_df <- get_error_info("../results/RF_supervised_test_predictions.csv")
xgb_error_df <- get_error_info("../results/XGB_supervised_test_predictions.csv")
svm_error_df <- get_error_info("../results/SVM_supervised_test_predictions.csv")
cnn_error_df <- get_error_info("../results/cnn_supervised_test_predictions.csv")
lstm_error_df <- get_error_info("../results/lstm_supervised_test_predictions.csv")
cnn_lstm_error_df <- get_error_info("../results/cnn_lstm_supervised_test_predictions.csv")
gan_cnn_error_df <- get_error_info("../results/GAT_model_prediction_on_Test_set.csv")
rf_error_df <- conver_character(rf_error_df)
svm_error_df <- conver_character(svm_error_df)
xgb_error_df <- conver_character(xgb_error_df)
cnn_error_df <- conver_character(cnn_error_df)
lstm_error_df <- conver_character(lstm_error_df)
cnn_lstm_error_df <- conver_character(cnn_lstm_error_df)
gan_cnn_error_df <- conver_character(gan_cnn_error_df)
rf_error_df <- rf_error_df[order(rf_error_df[,1],rf_error_df[,2]),]
svm_error_df <- svm_error_df[order(svm_error_df[,1],svm_error_df[,2]),]
xgb_error_df <- xgb_error_df[order(xgb_error_df[,1],xgb_error_df[,2]),]
cnn_error_df <- cnn_error_df[order(cnn_error_df[,1],cnn_error_df[,2]),]
lstm_error_df <- lstm_error_df[order(lstm_error_df[,1],lstm_error_df[,2]),]
cnn_lstm_error_df <- cnn_lstm_error_df[order(cnn_lstm_error_df[,1],cnn_lstm_error_df[,2]),]
gan_cnn_error_df <- gan_cnn_error_df[order(gan_cnn_error_df[,1],gan_cnn_error_df[,2]),]
#Make data frame with predictions
N <- nrow(cnn_error_df)
predictions_df <- data.frame(Method = c(rep("True",N),rep("SVM",N),rep("XGB",N),
rep("CNN",N),rep("LSTM",N),rep("CNN-LSTM",N),rep("GA-CNN",N)),
Values = c(cnn_error_df$labels,svm_error_df$predictions,
xgb_error_df$predictions,
cnn_error_df$predictions,
lstm_error_df$predictions,
cnn_lstm_error_df$predictions,gan_cnn_error_df$predictions),
Range = c(c(1:N),c(1:N),c(1:N),c(1:N),c(1:N),c(1:N),c(1:N)))
predictions_df$Values <- as.numeric(as.vector(predictions_df$Values))
predictions_df$Range <- as.numeric(as.vector(predictions_df$Range))
sample <- seq(1,N,30)
predictions_df_revised <- predictions_df[predictions_df$Range%in% sample,]
g3 <- ggplot(predictions_df_revised,aes(Range,Values,colour=Method)) + geom_point() +
geom_smooth(se=FALSE,method=lm,formula=y ~ splines::bs(x, 12)) + #scale_colour_wsj("colors6") +
xlab("Test Samples") + ylab("Pchembl Value") + theme_bw() +
theme(axis.text.x = element_text(size=10),axis.text.y = element_text(size=10),
axis.title.x = element_text(size=14),
axis.title.y = element_text(size=14))
#Save the image on disk
ggsave(filename="../results/Fitting_plot_for_pchembl_values.pdf",plot = g3,
device=pdf(),height=8,width=10,dpi = 300)
dev.off()
pdf(file="../results/RF_Residual_plot_for_pchembl_values.pdf",width = 12,height=7,pointsize=16)
get_correlation_plot(rf_error_df,"RF")
dev.off()
pdf(file="../results/SVM_Residual_plot_for_pchembl_values.pdf",width = 12,height=7,pointsize=16)
get_correlation_plot(svm_error_df,"SVM")
dev.off()
pdf(file="../results/XGB_Residual_plot_for_pchembl_values.pdf",width = 12,height=7,pointsize=16)
get_correlation_plot(xgb_error_df,"XGB")
dev.off()
pdf(file="../results/CNN_Residual_plot_for_pchembl_values.pdf",width = 12,height=7,pointsize=16)
get_correlation_plot(cnn_error_df,"CNN")
dev.off()
pdf(file="../results/LSTM_Residual_plot_for_pchembl_values.pdf",width = 12,height=7,pointsize=16)
get_correlation_plot(lstm_error_df,"LSTM")
dev.off()
pdf(file="../results/CNN_LSTM_Residual_plot_for_pchembl_values.pdf",width = 12,height=7,pointsize=16)
get_correlation_plot(cnn_lstm_error_df,"CNN-LSTM")
dev.off()
pdf(file="../results/GAN_CNN_Residual_plot_for_pchembl_values.pdf",width = 12,height=7,pointsize=16)
get_correlation_plot(gan_cnn_error_df,"GAT-CNN")
dev.off()
| /scripts/make_error_correlation_plots.R | no_license | elbasir/Drug-Repurposing | R | false | false | 5,992 | r | library(data.table)
library(ggplot2)
library(ggthemes)
warnings("off")
setwd(".")
get_error_info <- function(filename)
{
df <- read.table(filename,header=TRUE,sep=",")
order_indices <- order(df$labels)
error_df <- df[order_indices,]
return(error_df)
}
get_r2 <- function(a,b)
{
v <- cor(a,b,method="pearson")^2
return(v)
}
get_mse <- function(a,b)
{
v <- sum((a-b)^2)/length(a);
return(v)
}
get_mae <- function(a,b)
{
v <- sum(abs(a-b))/length(a);
return(v)
}
get_pearson <- function(a,b)
{
v <- cor(a,b,method="pearson");
return(v)
}
get_correlation_plot <- function(df,method)
{
par(mfrow=c(1,2))
plot(x=df$labels,y=df$predictions,
xlab="True Pchembl Values",ylab ="Predicted Pchembl Values",col="red",ylim=c(3,11),xlim=c(3,11))
abline(0,1,col="black")
legend(3.2,11, legend=paste0("Linear Fit of ",method),
col="black",cex=0.8)
df_r2 <- get_r2(df$predictions,df$labels)
df_mse <- get_mse(df$predictions,df$labels)
df_mae <- get_mae(df$predictions,df$labels)
df_pearson <- get_pearson(df$predictions,df$labels)
text(9,3.0,labels=sprintf("R2=%.3f",df_r2),cex=1.2)
text(9,3.5,labels=sprintf("Pearson r=%.3f",df_pearson),cex=1.2)
text(9,4.0,labels=sprintf("MAE=%.3f",df_mae),cex=1.2)
text(9,4.5,labels=sprintf("MSE=%.3f",df_mse),cex=1.2)
residuals <- df$predictions-df$labels
h <- hist(residuals,breaks=20,main="",xlab=paste0("Regular Residual of ",method),ylab="Count",freq=T)
multiplier <- h$counts/h$density
df_density <- density(residuals)
df_density$y <- df_density$y*multiplier
myx <- seq(min(residuals), max(residuals), length.out= 100)
df_normal <- dnorm(x=myx,mean=mean(residuals),sd=sd(residuals))
lines(myx, df_normal * multiplier[1], col = "blue", lwd = 2)
}
conver_character <- function(df)
{
df$uniprot_accession <- as.character(as.vector(df$uniprot_accession))
df$standard_inchi_key <- as.character(as.vector(df$standard_inchi_key))
return(df)
}
#Get data frames with errors
rf_error_df <- get_error_info("../results/RF_supervised_test_predictions.csv")
xgb_error_df <- get_error_info("../results/XGB_supervised_test_predictions.csv")
svm_error_df <- get_error_info("../results/SVM_supervised_test_predictions.csv")
cnn_error_df <- get_error_info("../results/cnn_supervised_test_predictions.csv")
lstm_error_df <- get_error_info("../results/lstm_supervised_test_predictions.csv")
cnn_lstm_error_df <- get_error_info("../results/cnn_lstm_supervised_test_predictions.csv")
gan_cnn_error_df <- get_error_info("../results/GAT_model_prediction_on_Test_set.csv")
rf_error_df <- conver_character(rf_error_df)
svm_error_df <- conver_character(svm_error_df)
xgb_error_df <- conver_character(xgb_error_df)
cnn_error_df <- conver_character(cnn_error_df)
lstm_error_df <- conver_character(lstm_error_df)
cnn_lstm_error_df <- conver_character(cnn_lstm_error_df)
gan_cnn_error_df <- conver_character(gan_cnn_error_df)
rf_error_df <- rf_error_df[order(rf_error_df[,1],rf_error_df[,2]),]
svm_error_df <- svm_error_df[order(svm_error_df[,1],svm_error_df[,2]),]
xgb_error_df <- xgb_error_df[order(xgb_error_df[,1],xgb_error_df[,2]),]
cnn_error_df <- cnn_error_df[order(cnn_error_df[,1],cnn_error_df[,2]),]
lstm_error_df <- lstm_error_df[order(lstm_error_df[,1],lstm_error_df[,2]),]
cnn_lstm_error_df <- cnn_lstm_error_df[order(cnn_lstm_error_df[,1],cnn_lstm_error_df[,2]),]
gan_cnn_error_df <- gan_cnn_error_df[order(gan_cnn_error_df[,1],gan_cnn_error_df[,2]),]
#Make data frame with predictions
N <- nrow(cnn_error_df)
predictions_df <- data.frame(Method = c(rep("True",N),rep("SVM",N),rep("XGB",N),
rep("CNN",N),rep("LSTM",N),rep("CNN-LSTM",N),rep("GA-CNN",N)),
Values = c(cnn_error_df$labels,svm_error_df$predictions,
xgb_error_df$predictions,
cnn_error_df$predictions,
lstm_error_df$predictions,
cnn_lstm_error_df$predictions,gan_cnn_error_df$predictions),
Range = c(c(1:N),c(1:N),c(1:N),c(1:N),c(1:N),c(1:N),c(1:N)))
predictions_df$Values <- as.numeric(as.vector(predictions_df$Values))
predictions_df$Range <- as.numeric(as.vector(predictions_df$Range))
sample <- seq(1,N,30)
predictions_df_revised <- predictions_df[predictions_df$Range%in% sample,]
g3 <- ggplot(predictions_df_revised,aes(Range,Values,colour=Method)) + geom_point() +
geom_smooth(se=FALSE,method=lm,formula=y ~ splines::bs(x, 12)) + #scale_colour_wsj("colors6") +
xlab("Test Samples") + ylab("Pchembl Value") + theme_bw() +
theme(axis.text.x = element_text(size=10),axis.text.y = element_text(size=10),
axis.title.x = element_text(size=14),
axis.title.y = element_text(size=14))
#Save the image on disk
ggsave(filename="../results/Fitting_plot_for_pchembl_values.pdf",plot = g3,
device=pdf(),height=8,width=10,dpi = 300)
dev.off()
pdf(file="../results/RF_Residual_plot_for_pchembl_values.pdf",width = 12,height=7,pointsize=16)
get_correlation_plot(rf_error_df,"RF")
dev.off()
pdf(file="../results/SVM_Residual_plot_for_pchembl_values.pdf",width = 12,height=7,pointsize=16)
get_correlation_plot(svm_error_df,"SVM")
dev.off()
pdf(file="../results/XGB_Residual_plot_for_pchembl_values.pdf",width = 12,height=7,pointsize=16)
get_correlation_plot(xgb_error_df,"XGB")
dev.off()
pdf(file="../results/CNN_Residual_plot_for_pchembl_values.pdf",width = 12,height=7,pointsize=16)
get_correlation_plot(cnn_error_df,"CNN")
dev.off()
pdf(file="../results/LSTM_Residual_plot_for_pchembl_values.pdf",width = 12,height=7,pointsize=16)
get_correlation_plot(lstm_error_df,"LSTM")
dev.off()
pdf(file="../results/CNN_LSTM_Residual_plot_for_pchembl_values.pdf",width = 12,height=7,pointsize=16)
get_correlation_plot(cnn_lstm_error_df,"CNN-LSTM")
dev.off()
pdf(file="../results/GAN_CNN_Residual_plot_for_pchembl_values.pdf",width = 12,height=7,pointsize=16)
get_correlation_plot(gan_cnn_error_df,"GAT-CNN")
dev.off()
|
\name{LNHP}
\alias{LNHP}
\alias{LN.prop}
\docType{data}
\title{London house price and hedonic data (SpatialPointsDataFrame)}
\description{
This data set is sampled from a house price data set provided by the Nationwide
Building Society of the UK, and combined with various hedonic variables.
}
\usage{data(LNHP)}
\format{
A \dQuote{SpatialPointsDataFrame} object named \dQuote{LN.prop}.
}
\details{
In this data set, attributes are interpreted as follows:
PURCHASE: sale price in GBP
FLOORSZ: floor size of the property in square metres;
BATH2: 1 if the property has 2 or more bathrooms, 0 otherwise;
BEDS2: 1 if the property has 2 or more bedrooms, 0 otherwise;
CENTHEAT: 1 if the property has central heating, 0 otherwise;
GARAGE1: 1 if the property has one or more garages, 0 otherwise;
BLDPWW1: 1 if the property was built prior to 1914, 0 otherwise;
BLDINTW: is 1 if the property was built between 1914 and 1939, 0 otherwise;
BLD60S: 1 if the property was built between 1960 and 1969, 0 otherwise;
BLD70S: 1 if the property was built between 1970 and 1979, 0 otherwise;
BLD80S: 1 if the property was built between 1980 and 1989, 0 otherwise;
BLD90S: 1 if the property was built between 1990 and 2000, 0 otherwise;
TYPEDETCH: 1 if the property is detached (i.e. it is stand-alone), 0 otherwise;
TYPETRRD: 1 if the property is in a terrace of similar houses, 0 otherwise;
TYPEFLAT: 1 if the property is a flat or apartment, 0 otherwise;
PROF: percentage of the workforce in professional or managerial occupations in
the census enumeration district in which the house is located.
UNEMPLOY: percentage of unemployment in the census enumeration district
in which the house is located.
X: X coordinate.
Y: Y coordinat.
}
\references{
Lu, B., M. Charlton, P. Harris & A. S. Fotheringham (2014) Geographically weighted
regression with a non-Euclidean distance metric: a case study using hedonic house
price data. International Journal of Geographical Information Science, 28, 660-681.
}
\keyword{Point data, example}
| /man/LNHP.Rd | no_license | SunYa0/shp2graph | R | false | false | 2,152 | rd | \name{LNHP}
\alias{LNHP}
\alias{LN.prop}
\docType{data}
\title{London house price and hedonic data (SpatialPointsDataFrame)}
\description{
This data set is sampled from a house price data set provided by the Nationwide
Building Society of the UK, and combined with various hedonic variables.
}
\usage{data(LNHP)}
\format{
A \dQuote{SpatialPointsDataFrame} object named \dQuote{LN.prop}.
}
\details{
In this data set, attributes are interpreted as follows:
PURCHASE: sale price in GBP
FLOORSZ: floor size of the property in square metres;
BATH2: 1 if the property has 2 or more bathrooms, 0 otherwise;
BEDS2: 1 if the property has 2 or more bedrooms, 0 otherwise;
CENTHEAT: 1 if the property has central heating, 0 otherwise;
GARAGE1: 1 if the property has one or more garages, 0 otherwise;
BLDPWW1: 1 if the property was built prior to 1914, 0 otherwise;
BLDINTW: is 1 if the property was built between 1914 and 1939, 0 otherwise;
BLD60S: 1 if the property was built between 1960 and 1969, 0 otherwise;
BLD70S: 1 if the property was built between 1970 and 1979, 0 otherwise;
BLD80S: 1 if the property was built between 1980 and 1989, 0 otherwise;
BLD90S: 1 if the property was built between 1990 and 2000, 0 otherwise;
TYPEDETCH: 1 if the property is detached (i.e. it is stand-alone), 0 otherwise;
TYPETRRD: 1 if the property is in a terrace of similar houses, 0 otherwise;
TYPEFLAT: 1 if the property is a flat or apartment, 0 otherwise;
PROF: percentage of the workforce in professional or managerial occupations in
the census enumeration district in which the house is located.
UNEMPLOY: percentage of unemployment in the census enumeration district
in which the house is located.
X: X coordinate.
Y: Y coordinat.
}
\references{
Lu, B., M. Charlton, P. Harris & A. S. Fotheringham (2014) Geographically weighted
regression with a non-Euclidean distance metric: a case study using hedonic house
price data. International Journal of Geographical Information Science, 28, 660-681.
}
\keyword{Point data, example}
|
#' ---
#' title: "Cubic Spline Model"
#' author: "Michael Clark"
#' date: ""
#' ---
#'
#'
#' See Wood (2017) Generalized Additive Models or my [document](https://m-clark.github.io/generalized-additive-models/).
library(tidyverse) # for processing and plotting
#' # Create the data
size = c(1.42,1.58,1.78,1.99,1.99,1.99,2.13,2.13,2.13,
2.32,2.32,2.32,2.32,2.32,2.43,2.43,2.78,2.98,2.98)
wear = c(4.0,4.2,2.5,2.6,2.8,2.4,3.2,2.4,2.6,4.8,2.9,
3.8,3.0,2.7,3.1,3.3,3.0,2.8,1.7)
x = size - min(size)
x = x / max(x)
d = data.frame(wear, x)
#' Cubic spline function
rk <- function(x, z) {
((z-0.5)^2 - 1/12) * ((x-0.5)^2 - 1/12)/4 -
((abs(x-z)-0.5)^4 - (abs(x-z)-0.5)^2/2 + 7/240) / 24
}
#' Generate the model matrix.
splX <- function(x, knots) {
q = length(knots) + 2 # number of parameters
n = length(x) # number of observations
X = matrix(1, n, q) # initialized model matrix
X[ ,2] = x # set second column to x
X[ ,3:q] = outer(x, knots, FUN = rk) # remaining to cubic spline basis
X
}
splS <- function(knots) {
q = length(knots) + 2
S = matrix(0, q, q) # initialize matrix
S[3:q, 3:q] = outer(knots, knots, FUN = rk) # fill in non-zero part
S
}
#' Matrix square root function. Note that there are various packages with their own.
matSqrt <- function(S) {
d = eigen(S, symmetric = T)
rS = d$vectors %*% diag(d$values^.5) %*% t(d$vectors)
rS
}
#' Penalized fitting function.
prsFit <- function(y, x, knots, lambda) {
q = length(knots) + 2 # dimension of basis
n = length(x) # number of observations
Xa = rbind(splX(x, knots), matSqrt(splS(knots))*sqrt(lambda)) # augmented model matrix
y[(n+1):(n+q)] = 0 # augment the data vector
lm(y ~ Xa - 1) # fit and return penalized regression spline
}
#' # Example 1
#' Unpenalized
#'
knots = 1:4/5
X = splX(x, knots) # generate model matrix
mod1 = lm(wear ~ X - 1) # fit model
xp = 0:100/100 # x values for prediction
Xp = splX(xp, knots) # prediction matrix
#' Visualize
ggplot(aes(x = x, y = wear), data = data.frame(x, wear)) +
geom_point(color = "#FF5500") +
geom_line(aes(x = xp, y = Xp %*% coef(mod1)),
data = data.frame(xp, Xp),
color = "#00AAFF") +
labs(x = 'Scaled Engine size', y = 'Wear Index') +
theme_minimal()
#' # Example 2
# Add penalty lambda
knots = 1:7/8
d2 = data.frame(x = xp)
for (i in c(.1, .01, .001, .0001, .00001, .000001)){
# fit penalized regression
mod2 = prsFit(
y = wear,
x = x,
knots = knots,
lambda = i
)
# spline choosing lambda
Xp = splX(xp, knots) # matrix to map parameters to fitted values at xp
LP = Xp %*% coef(mod2)
d2[, paste0('lambda = ', i)] = LP[, 1]
}
#' Examine
# head(d2)
#' Visualize via ggplot
d3 = d2 %>%
pivot_longer(cols = -x,
names_to = 'lambda',
values_to = 'value') %>%
mutate(lambda = fct_inorder(lambda))
ggplot(d3) +
geom_point(aes(x = x, y = wear), col = '#FF5500', data = d) +
geom_line(aes(x = x, y = value), col = "#00AAFF") +
facet_wrap(~lambda) +
theme_minimal()
| /ModelFitting/cubicsplines.R | no_license | carliedario/Miscellaneous-R-Code | R | false | false | 3,200 | r | #' ---
#' title: "Cubic Spline Model"
#' author: "Michael Clark"
#' date: ""
#' ---
#'
#'
#' See Wood (2017) Generalized Additive Models or my [document](https://m-clark.github.io/generalized-additive-models/).
library(tidyverse) # for processing and plotting
#' # Create the data
size = c(1.42,1.58,1.78,1.99,1.99,1.99,2.13,2.13,2.13,
2.32,2.32,2.32,2.32,2.32,2.43,2.43,2.78,2.98,2.98)
wear = c(4.0,4.2,2.5,2.6,2.8,2.4,3.2,2.4,2.6,4.8,2.9,
3.8,3.0,2.7,3.1,3.3,3.0,2.8,1.7)
x = size - min(size)
x = x / max(x)
d = data.frame(wear, x)
#' Cubic spline function
rk <- function(x, z) {
((z-0.5)^2 - 1/12) * ((x-0.5)^2 - 1/12)/4 -
((abs(x-z)-0.5)^4 - (abs(x-z)-0.5)^2/2 + 7/240) / 24
}
#' Generate the model matrix.
splX <- function(x, knots) {
q = length(knots) + 2 # number of parameters
n = length(x) # number of observations
X = matrix(1, n, q) # initialized model matrix
X[ ,2] = x # set second column to x
X[ ,3:q] = outer(x, knots, FUN = rk) # remaining to cubic spline basis
X
}
splS <- function(knots) {
q = length(knots) + 2
S = matrix(0, q, q) # initialize matrix
S[3:q, 3:q] = outer(knots, knots, FUN = rk) # fill in non-zero part
S
}
#' Matrix square root function. Note that there are various packages with their own.
matSqrt <- function(S) {
d = eigen(S, symmetric = T)
rS = d$vectors %*% diag(d$values^.5) %*% t(d$vectors)
rS
}
#' Penalized fitting function.
prsFit <- function(y, x, knots, lambda) {
q = length(knots) + 2 # dimension of basis
n = length(x) # number of observations
Xa = rbind(splX(x, knots), matSqrt(splS(knots))*sqrt(lambda)) # augmented model matrix
y[(n+1):(n+q)] = 0 # augment the data vector
lm(y ~ Xa - 1) # fit and return penalized regression spline
}
#' # Example 1
#' Unpenalized
#'
knots = 1:4/5
X = splX(x, knots) # generate model matrix
mod1 = lm(wear ~ X - 1) # fit model
xp = 0:100/100 # x values for prediction
Xp = splX(xp, knots) # prediction matrix
#' Visualize
ggplot(aes(x = x, y = wear), data = data.frame(x, wear)) +
geom_point(color = "#FF5500") +
geom_line(aes(x = xp, y = Xp %*% coef(mod1)),
data = data.frame(xp, Xp),
color = "#00AAFF") +
labs(x = 'Scaled Engine size', y = 'Wear Index') +
theme_minimal()
#' # Example 2
# Add penalty lambda
knots = 1:7/8
d2 = data.frame(x = xp)
for (i in c(.1, .01, .001, .0001, .00001, .000001)){
# fit penalized regression
mod2 = prsFit(
y = wear,
x = x,
knots = knots,
lambda = i
)
# spline choosing lambda
Xp = splX(xp, knots) # matrix to map parameters to fitted values at xp
LP = Xp %*% coef(mod2)
d2[, paste0('lambda = ', i)] = LP[, 1]
}
#' Examine
# head(d2)
#' Visualize via ggplot
d3 = d2 %>%
pivot_longer(cols = -x,
names_to = 'lambda',
values_to = 'value') %>%
mutate(lambda = fct_inorder(lambda))
ggplot(d3) +
geom_point(aes(x = x, y = wear), col = '#FF5500', data = d) +
geom_line(aes(x = x, y = value), col = "#00AAFF") +
facet_wrap(~lambda) +
theme_minimal()
|
# Netherlands Biodiversity API
#
# Access to the digitised Natural History collection at the Naturalis Biodiversity Center
#
# OpenAPI spec version: v2
# Contact: support@naturalis.nl
# Generated by: https://github.com/swagger-api/swagger-codegen.git
#' SourceSystem Class
#'
#'
#'
#' @field code character
#' @field name character
#'
#' @section Methods:
#' \describe{
#'
#' \item{\code{$new()}}{
#'
#' Constructor SourceSystem object.
#'
#' }
#' \item{\code{$fromList(SourceSystemList)}}{
#'
#' Create SourceSystem object from list.
#'
#' }
#'
#' \item{\code{$toList()}}{
#'
#' Get list representation of SourceSystem.
#'
#' }
#' \item{\code{fromJSONString(SourceSystemJson)}}{
#'
#' Create SourceSystem object from list.
#'
#' }
#' \item{\code{toJSONString(pretty=TRUE)}}{
#'
#' Get JSON representation of SourceSystem.
#'
#' }
#' }
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
SourceSystem <- R6::R6Class(
"SourceSystem",
public = list(
`code` = NULL,
`name` = NULL,
initialize = function(
`code`,
`name`) {
if (!missing(`code`)) {
stopifnot(
is.character(`code`),
length(`code`) == 1
)
self[["code"]] <- `code`
}
if (!missing(`name`)) {
stopifnot(
is.character(`name`),
length(`name`) == 1
)
self[["name"]] <- `name`
}
},
toList = function() {
SourceSystemList <- list()
if (!is.null(self[["code"]])) {
SourceSystemList[["code"]] <-
self[["code"]]
}
if (!is.null(self[["name"]])) {
SourceSystemList[["name"]] <-
self[["name"]]
}
## omit empty nested lists in returned list
SourceSystemList[vapply(
SourceSystemList,
length,
FUN.VALUE = integer(1)
) > 0]
},
fromList = function(SourceSystemList,
typeMapping = NULL) {
self[["code"]] <-
SourceSystemList[["code"]]
self[["name"]] <-
SourceSystemList[["name"]]
invisible(self)
},
toJSONString = function(pretty = TRUE) {
jsonlite::toJSON(
self$toList(),
simplifyVector = TRUE,
auto_unbox = TRUE,
pretty = pretty
)
},
fromJSONString = function(SourceSystemJson,
typeMapping = NULL) {
SourceSystemList <- jsonlite::fromJSON(
SourceSystemJson,
simplifyVector = FALSE
)
self[["code"]] <-
SourceSystemList[["code"]]
self[["name"]] <-
SourceSystemList[["name"]]
invisible(self)
}
)
)
| /R/SourceSystem.r | no_license | mbjoseph/nbaR | R | false | false | 2,711 | r | # Netherlands Biodiversity API
#
# Access to the digitised Natural History collection at the Naturalis Biodiversity Center
#
# OpenAPI spec version: v2
# Contact: support@naturalis.nl
# Generated by: https://github.com/swagger-api/swagger-codegen.git
#' SourceSystem Class
#'
#'
#'
#' @field code character
#' @field name character
#'
#' @section Methods:
#' \describe{
#'
#' \item{\code{$new()}}{
#'
#' Constructor SourceSystem object.
#'
#' }
#' \item{\code{$fromList(SourceSystemList)}}{
#'
#' Create SourceSystem object from list.
#'
#' }
#'
#' \item{\code{$toList()}}{
#'
#' Get list representation of SourceSystem.
#'
#' }
#' \item{\code{fromJSONString(SourceSystemJson)}}{
#'
#' Create SourceSystem object from list.
#'
#' }
#' \item{\code{toJSONString(pretty=TRUE)}}{
#'
#' Get JSON representation of SourceSystem.
#'
#' }
#' }
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
SourceSystem <- R6::R6Class(
"SourceSystem",
public = list(
`code` = NULL,
`name` = NULL,
initialize = function(
`code`,
`name`) {
if (!missing(`code`)) {
stopifnot(
is.character(`code`),
length(`code`) == 1
)
self[["code"]] <- `code`
}
if (!missing(`name`)) {
stopifnot(
is.character(`name`),
length(`name`) == 1
)
self[["name"]] <- `name`
}
},
toList = function() {
SourceSystemList <- list()
if (!is.null(self[["code"]])) {
SourceSystemList[["code"]] <-
self[["code"]]
}
if (!is.null(self[["name"]])) {
SourceSystemList[["name"]] <-
self[["name"]]
}
## omit empty nested lists in returned list
SourceSystemList[vapply(
SourceSystemList,
length,
FUN.VALUE = integer(1)
) > 0]
},
fromList = function(SourceSystemList,
typeMapping = NULL) {
self[["code"]] <-
SourceSystemList[["code"]]
self[["name"]] <-
SourceSystemList[["name"]]
invisible(self)
},
toJSONString = function(pretty = TRUE) {
jsonlite::toJSON(
self$toList(),
simplifyVector = TRUE,
auto_unbox = TRUE,
pretty = pretty
)
},
fromJSONString = function(SourceSystemJson,
typeMapping = NULL) {
SourceSystemList <- jsonlite::fromJSON(
SourceSystemJson,
simplifyVector = FALSE
)
self[["code"]] <-
SourceSystemList[["code"]]
self[["name"]] <-
SourceSystemList[["name"]]
invisible(self)
}
)
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fit_and_reporting.R
\name{umxUnexplainedCausalNexus}
\alias{umxUnexplainedCausalNexus}
\title{umxUnexplainedCausalNexus}
\usage{
umxUnexplainedCausalNexus(from, delta, to, model = NULL)
}
\arguments{
\item{from}{A variable in the model for which you want to compute the effect of a change.}
\item{delta}{A the amount to simulate changing \sQuote{from} by.}
\item{to}{The dependent variable that you want to watch changing.}
\item{model}{The model containing variables from and to.}
}
\description{
umxUnexplainedCausalNexus report the effect of a change (delta) in a variable (from) on an output (to)
}
\examples{
\dontrun{
umxUnexplainedCausalNexus(from="yrsEd", delta = .5, to = "income35", model)
}
}
\references{
\itemize{
\item https://github.com/tbates/umx/
}
}
\seealso{
\itemize{
\item \code{\link[=mxCheckIdentification]{mxCheckIdentification()}}, \code{\link[=mxCompare]{mxCompare()}}
}
Other Modify or Compare Models:
\code{\link{umxEquate}()},
\code{\link{umxFixAll}()},
\code{\link{umxMI}()},
\code{\link{umxModify}()},
\code{\link{umxSetParameters}()},
\code{\link{umx}}
}
\concept{Modify or Compare Models}
| /man/umxUnexplainedCausalNexus.Rd | no_license | jishanling/umx | R | false | true | 1,205 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fit_and_reporting.R
\name{umxUnexplainedCausalNexus}
\alias{umxUnexplainedCausalNexus}
\title{umxUnexplainedCausalNexus}
\usage{
umxUnexplainedCausalNexus(from, delta, to, model = NULL)
}
\arguments{
\item{from}{A variable in the model for which you want to compute the effect of a change.}
\item{delta}{A the amount to simulate changing \sQuote{from} by.}
\item{to}{The dependent variable that you want to watch changing.}
\item{model}{The model containing variables from and to.}
}
\description{
umxUnexplainedCausalNexus report the effect of a change (delta) in a variable (from) on an output (to)
}
\examples{
\dontrun{
umxUnexplainedCausalNexus(from="yrsEd", delta = .5, to = "income35", model)
}
}
\references{
\itemize{
\item https://github.com/tbates/umx/
}
}
\seealso{
\itemize{
\item \code{\link[=mxCheckIdentification]{mxCheckIdentification()}}, \code{\link[=mxCompare]{mxCompare()}}
}
Other Modify or Compare Models:
\code{\link{umxEquate}()},
\code{\link{umxFixAll}()},
\code{\link{umxMI}()},
\code{\link{umxModify}()},
\code{\link{umxSetParameters}()},
\code{\link{umx}}
}
\concept{Modify or Compare Models}
|
# ------------------------------------------------------------------------------
# percentile graphics with application rates
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# load data
# ------------------------------------------------------------------------------
library(gtable)
# pwc output array
# recall: this was created in 03write_update_run_pwc and saved in 05_write_output_into_df
# recall: this is an array of all of the output_FOL002_parent_only_Custom_Parent_daily.csv files
load(paste(pwcdir, "io/pwcout.RData", sep = ""))
dim(pwcoutdf)
# subset Ave.Conc.H2O
pwc_h2_output <- pwcoutdf[,2,1:Nsims] #1depth, 2Ave.Conc.H20, 3Ave.Conc.benth, 4Peak.Conc.H20
dim(pwc_h2_output) #days*simulations
# subset Ave.Conc.benth
pwc_ben_output <- pwcoutdf[,3,1:Nsims] #1depth, 2Ave.Conc.H20, 3Ave.Conc.benth, 4Peak.Conc.H20
dim(pwc_ben_output) #days*simulations
# przm output array
# recall: this was created in 03write_update_run_pwc and saved in 05_write_output_into_df
# recall: this is an array of all of the output.zts files (dim = num_of_days*output_cols*sims)
load(paste(pwcdir, "io/przmout.RData", sep = ""))
dim(outputdf)
# subset RUNF0
przm_h2_output <- outputdf[,4,1:Nsims] #"YYYY","MM","DD","RUNF0","ESLS0","RFLX1","EFLX1","DCON1","INFL0"
dim(przm_h2_output) #days*simulations
# ------------------------------------------------------------------------------
# percentile plot: pwc Ave.Conc.H2O
# ------------------------------------------------------------------------------
# --------------------------------
# data set-up
# --------------------------------
dim(pwc_h2_output) #days*sims
# create blank matrix to fill with percentiles
percentiles <- matrix(data=NA, nrow=dim(pwc_h2_output)[1], ncol=8)
colnames(percentiles) <- c("day", "percent.001", "percent.023", "percent.159", "percent.5",
"percent.841", "percent.977", "percent.999")
percentiles <- as.data.frame(percentiles)
# date format
percentiles$day <- seq(as.Date("2008-01-01"), as.Date("2014-12-31"), by="days")
# compute percentiles
for (i in 1:dim(percentiles)[1]){
p001 <- quantile(pwc_h2_output[i,], probs=.001, na.rm=T)
percentiles[i,2] <- p001
p023 <- quantile(pwc_h2_output[i,], probs=.023, na.rm=T)
percentiles[i,3] <- p023
p159 <- quantile(pwc_h2_output[i,], probs=.159, na.rm=T)
percentiles[i,4] <- p159
p5 <- quantile(pwc_h2_output[i,], probs=.5, na.rm=T)
percentiles[i,5] <- p5
p841 <- quantile(pwc_h2_output[i,], probs=.841, na.rm=T)
percentiles[i,6] <- p841
p977 <- quantile(pwc_h2_output[i,], probs=.977, na.rm=T)
percentiles[i,7] <- p977
p999 <- quantile(pwc_h2_output[i,], probs=.999, na.rm=T)
percentiles[i,8] <- p999
}
percentiles$percent.001 <- percentiles$percent.001*1000000 #convert units to ug/L
percentiles$percent.023 <- percentiles$percent.023*1000000
percentiles$percent.159 <- percentiles$percent.159*1000000
percentiles$percent.5 <- percentiles$percent.5*1000000
percentiles$percent.841 <- percentiles$percent.841*1000000
percentiles$percent.977 <- percentiles$percent.977*1000000
percentiles$percent.999 <- percentiles$percent.999*1000000
# read in deterministic output
determ <- read.csv("C:/Users/echelsvi/git/yuan_urban_pesticides/deterministic/input/FOL002/outputs/output_FOL002_parent_only_Custom_Parent_daily.csv",
header= FALSE, sep= ",", skip = 5, stringsAsFactors = FALSE, row.names=NULL)
colnames(determ) <- c("Depth(m)","Ave.Conc.H20","Ave.Conc.benth","Peak.Conc.H20")
determ <- as.data.frame(determ)
# subset Ave.conc.H20, add to percentiles df
percentiles$deterministic <- determ$Ave.Conc.H20*1000000 #convert units to ug/L
# impose a false zero
for (i in 1:dim(percentiles)[1]){
if (percentiles[i,2] < 1e-8){
percentiles[i,2] <- 1e-8
}
}
# --------------------------------
# read in observed data
# --------------------------------
obs_water <- read.csv(file="C:/Users/echelsvi/git/yuan_urban_pesticides/observed_concentrations/cdpr_stormdrain_bifenthrin_water_09-14_all.csv",
header=T, sep=",")
# change column formats
obs_water$date <- as.Date(obs_water$Sample.Date, format="%d-%b-%Y")
obs_water$Result <- as.numeric(levels(obs_water$Result))[obs_water$Result]
# subset folsom sites
obs_water_folsom <- obs_water[which(obs_water$Site.ID == "FOL002"), ]
# --------------------------------
# plot percentile data + observed data + tox rates
# --------------------------------
# set colors
sd3 <- "#08519c"
sd2 <- "#4292c6"
sd1 <- "#9ecae1"
med <- "#08519c"
det <- "#d9f0a3"
obs <- "#e31a1c"
fish <- "#fee391"
invert <- "#ec7014"
# plot
pwc_pplot <- ggplot(percentiles, aes(x=day, group=1)) +
geom_ribbon(aes(ymin=percent.001, ymax=percent.999, fill="3 SD")) +
geom_ribbon(aes(ymin=percent.023, ymax=percent.977, fill="2 SD")) +
geom_ribbon(aes(ymin=percent.159, ymax=percent.841, fill="1 SD")) +
geom_hline(aes(yintercept=0.075, color="Acute/Chronic Fish"), linetype="dashed", size=1) + #aquatic life benchmarks for fish and invertebrates
geom_hline(yintercept=0.04 , linetype="dashed", color=fish, size=1) +
geom_hline(yintercept=0.8, linetype="dashed", color=invert, size=1) +
geom_hline(aes(yintercept=0.0013 , color="Acute/Chronic Invertebrate"),linetype="dashed", size=1) +
geom_line(aes(y=percent.5, color="Probabilistic Median"), linetype="solid", size=1) + #probabilistic
geom_line(aes(y=deterministic, color="Deterministic"), linetype="solid", size=1) + #deterministic
geom_point(data=obs_water_folsom, aes(x=date, y=Result, color="CDPR Observed"), size=3)+ # CDPR observed data
scale_x_date(date_breaks="1 year", labels=NULL, limits=as.Date(c('2011-01-01', '2014-12-31')), expand=c(0.01,0.01)) +
scale_y_continuous(trans="log10", breaks=trans_breaks("log10", function(x) 10^x),
labels=trans_format("log10", math_format(10^.x)), limits=c(NA,10)) +
labs(title = "", x = "", y = "Bifenthrin Concentration \n in the Water Column (ug/L) (log10)", color = "") +
theme_bw() +
#theme(legend.justification=c(0,0), legend.position=c(0.01,0.01), legend.box="horizontal")+
#guides(colour = guide_legend(nrow = 1))+
theme(legend.position="bottom")+
scale_fill_manual(name="", values=c("3 SD"=sd3, "2 SD"=sd2, "1 SD" =sd1))+
scale_color_manual(name="", values=c("CDPR Observed"=obs, "Deterministic"=det,"Probabilistic Median" =med,
"Acute/Chronic Invertebrate"=invert, "Acute/Chronic Fish" = fish))+
theme(axis.text.y = element_text(size = 12))+ #axis text size
theme(axis.text.x = element_text(size = 12))+
theme(axis.title.y = element_text(size = 12, margin = margin(t = 0, r = 10, b = 0, l = 0)))+ #y axis label
theme(plot.title = element_text(size = 14))+
theme(legend.text = element_text(size = 10))
print(pwc_pplot)
# read in app rate data
calpip_s <- read.table("C:/Users/echelsvi/git/yuan_urban_pesticides/bifenthrin_application_rates/CALPIP/output_for_pwc_with_ma_folsom.txt",
header=F, sep= ",")
calpip_s$date <- seq(as.Date("2009-01-01"), as.Date("2014-12-31"), by="day")#format 1961-01-01
calpip_s <- calpip_s[,c("V6", "date")]
names(calpip_s) <- c("app_rate", "date")
a_plot <- ggplot(data=calpip_s, aes(x=date, y=app_rate)) +
geom_bar(stat="identity", fill="#525252") +
labs(title = "", x = "", y = "Bifenthrin Application (kg/ha)", color = "") +
theme_bw() +
theme(legend.position = "none") +
scale_x_date(date_breaks="1 year", date_labels="%Y", limits=as.Date(c('2011-01-01', '2014-12-31')), expand=c(0.01,0.01)) +
theme(axis.title.x=element_blank(), axis.text.x=element_blank(), axis.ticks.x=element_blank())+
theme(axis.text.y = element_text(size = 12))+ #axis text size
theme(axis.text.x = element_text(size = 12))+
theme(axis.title.y = element_text(size = 12))
print(a_plot)
# read in weather file
precip <- read.table(file=paste(pwcdir_weather, "17484_grid_folsom.wea", sep=""), header=FALSE, sep=",")
colnames(precip) <- c("month", "day", "year", "precip_cm", "et_cm", "temp_c", "windspeed_cms", "solar_la")
precip$date <- seq(as.Date("2008-01-01"), as.Date("2014-12-31"), by="days")
p_plot <- ggplot(precip, aes(x=date,y=precip_cm))+
geom_bar(stat="identity", fill="black")+
theme_bw()+
labs(title = "", x = "", y = "Precipitation (cm)", color = "") +
scale_y_reverse() +
scale_x_date(date_breaks="1 year",labels=NULL, limits=as.Date(c('2011-01-01', '2014-12-31')), expand=c(0.01,0.01)) +
theme(axis.title.x=element_blank(), axis.text.x=element_blank(), axis.ticks.x=element_blank())+
theme(axis.text.y = element_text(size = 12))+ #axis text size
theme(axis.text.x = element_text(size = 12))+
theme(axis.title.y = element_text(size = 12, margin = margin(t = 0, r = 25, b = 0, l = 0)))
print(p_plot)
# plot everything together
grid.newpage()
png(filename= paste(pwcdir, "figures/percentile_11-14_pwc_ave_h2_panel.png", sep=""),width=20, height=10, units="in",res=300)
panel_plot <- cowplot::plot_grid(p_plot, pwc_pplot, a_plot, align = "h", nrow = 3, rel_heights = c(0.25, 0.5, 0.25))
panel_plot <- egg::ggarrange(p_plot, pwc_pplot, a_plot, heights = c(0.25, 0.5, 0.25))
print(panel_plot)
dev.off()
# ------------------------------------------------------------------------------
# percentile plot: pwc Ave.Conc.benth
# ------------------------------------------------------------------------------
# --------------------------------
# data set-up
# --------------------------------
load(paste(pwcdir, "io/con_fac_output.RData", sep = ""))
dim(con_fac_output) #sims*1
dim(pwc_ben_output) #days*sims
# convert to sediment concentrations
sed_output <- pwc_ben_output
for (c in 1:dim(sed_output)[2]){
this_con_fac <- con_fac_output[c,]
for (r in 1:dim(sed_output)[1]){
sed_output[r,c] <- sed_output[r,c]*this_con_fac
}
}
# create blank matrix to fill with percentiles
percentiles <- matrix(data=NA, nrow=dim(sed_output)[1], ncol=8)
colnames(percentiles) <- c("day", "percent.001", "percent.023", "percent.159", "percent.5",
"percent.841", "percent.977", "percent.999")
percentiles <- as.data.frame(percentiles)
# date format
percentiles$day <- seq(as.Date("2008-01-01"), as.Date("2014-12-31"), by="days")
# compute percentiles
for (i in 1:dim(percentiles)[1]){
p001 <- quantile(sed_output[i,], probs=.001, na.rm=T)
percentiles[i,2] <- p001
p023 <- quantile(sed_output[i,], probs=.023, na.rm=T)
percentiles[i,3] <- p023
p159 <- quantile(sed_output[i,], probs=.159, na.rm=T)
percentiles[i,4] <- p159
p5 <- quantile(sed_output[i,], probs=.5, na.rm=T)
percentiles[i,5] <- p5
p841 <- quantile(sed_output[i,], probs=.841, na.rm=T)
percentiles[i,6] <- p841
p977 <- quantile(sed_output[i,], probs=.977, na.rm=T)
percentiles[i,7] <- p977
p999 <- quantile(sed_output[i,], probs=.999, na.rm=T)
percentiles[i,8] <- p999
}
percentiles$percent.001 <- percentiles$percent.001*1000000 #convert units to ug/L
percentiles$percent.023 <- percentiles$percent.023*1000000
percentiles$percent.159 <- percentiles$percent.159*1000000
percentiles$percent.5 <- percentiles$percent.5*1000000
percentiles$percent.841 <- percentiles$percent.841*1000000
percentiles$percent.977 <- percentiles$percent.977*1000000
percentiles$percent.999 <- percentiles$percent.999*1000000
# read in deterministic output
determ <- read.csv("C:/Users/echelsvi/git/yuan_urban_pesticides/deterministic/input/FOL002/outputs/output_FOL002_parent_only_Custom_Parent_daily.csv",
header= FALSE, sep= ",", skip = 5, stringsAsFactors = FALSE, row.names=NULL)
colnames(determ) <- c("Depth(m)","Ave.Conc.H20","Ave.Conc.benth","Peak.Conc.H20")
determ <- as.data.frame(determ)
# read conversion factor from output
con <- file("C:/Users/echelsvi/git/yuan_urban_pesticides/deterministic/input/FOL002/outputs/output_FOL002_parent_only_Custom_Parent.txt")
open(con)
con_fac_line <- read.table(con,skip=15,nrow=1) #16-th line
con_fac <- as.numeric(con_fac_line%>%select_if(is.numeric))
print(con_fac)
close(con)
# subset Ave.conc.benth, conversions, add to percentiles df
percentiles$deterministic <- determ$Ave.Conc.benth*1000000*con_fac #convert units to ug/L
# impose a false zero
for (i in 1:dim(percentiles)[1]){
if (percentiles[i,2] < 1e-8){
percentiles[i,2] <- 1e-8
}
}
# --------------------------------
# read in observed data
# --------------------------------
obs_sed <- read.csv(file="C:/Users/echelsvi/git/yuan_urban_pesticides/observed_concentrations/cdpr_stormdrain_bifenthrin_sediment_09-14_all.csv",
header=T, sep=",")
# subset folsom sites
obs_sed_folsom <- obs_sed[which(obs_sed$Site.ID == "FOL002"), ]
# change column formats
obs_sed_folsom$date <- as.Date(obs_sed_folsom$Sample.Date, format="%d-%b-%y")
# --------------------------------
# plot percentiles
# --------------------------------
# set colors
sd3 <- "#6a51a3"
sd2 <- "#807dba"
sd1 <- "#bcbddc"
med <- "#6a51a3"
det <- "#d9f0a3"
fish <- "#fee391"
invert <- "#ec7014"
# plot
pwc_pplot <- ggplot(percentiles, aes(x=day, group=1)) +
geom_ribbon(aes(ymin=percent.001, ymax=percent.999, fill="3 SD")) +
geom_ribbon(aes(ymin=percent.023, ymax=percent.977, fill="2 SD")) +
geom_ribbon(aes(ymin=percent.159, ymax=percent.841, fill="1 SD")) +
geom_hline(yintercept=200, linetype="dashed", color=invert, size=1) +
geom_hline(aes(yintercept=0.13 , color="Acute/Chronic Invertebrate"),linetype="dashed", size=1) +
geom_line(aes(y=percent.5, color="Probabilistic Median"), linetype="solid", size=1) +
geom_line(aes(y=deterministic, color="Deterministic"), linetype="solid", size=1) +
geom_point(data=obs_sed_folsom, aes(x=date, y=Result, color="CDPR Observed"), size=3)+ # CDPR observed data
scale_x_date(date_breaks="1 year", labels=NULL, limits=as.Date(c('2011-01-01', '2014-12-31')), expand=c(0.01,0.01)) +
scale_y_continuous(trans="log10", breaks=trans_breaks("log10", function(x) 10^x),
labels=trans_format("log10", math_format(10^.x)), limits=c(NA,10000)) +
labs(title = "", x = "", y = "Bifenthrin Sediment Concentration \n (total mass, ug)/(dry sed mass,kg) (log10)", color = "") +
theme_bw() +
#theme(legend.justification=c(0,0), legend.position=c(0.01,0.01), legend.box="horizontal")+
#guides(colour = guide_legend(nrow = 1))+
theme(legend.position = "bottom")+
scale_fill_manual(name="", values=c("3 SD"=sd3, "2 SD"=sd2, "1 SD" =sd1))+
scale_color_manual(name="", values=c("CDPR Observed"=obs, "Deterministic"=det,"Probabilistic Median" =med,
"Acute/Chronic Invertebrate"=invert))+
theme(axis.text.y = element_text(size = 12))+ #axis text size
theme(axis.text.x = element_text(size = 12))+
theme(axis.title.y = element_text(size = 12, margin = margin(t = 0, r = 10, b = 0, l = 0)))+
theme(plot.title = element_text(size = 14))+
theme(legend.text = element_text(size = 10))
print(pwc_pplot)
dev.off()
# plot everything together
grid.newpage()
png(filename= paste(pwcdir, "figures/percentile_11-14_pwc_ave_benthic_panel.png", sep=""),width=20, height=10, units="in",res=300)
panel_plot <- cowplot::plot_grid(p_plot, pwc_pplot,a_plot, align = "h", nrow = 3, rel_heights = c(0.25, 0.5, 0.25))
panel_plot <- egg::ggarrange(p_plot, pwc_pplot,a_plot, heights = c(0.25, 0.5, 0.25))
print(panel_plot)
dev.off()
# ------------------------------------------------------------------------------
# the end
# ------------------------------------------------------------------------------ | /probabilistic/FOL002/src/08_percentile_graphics_with_observed_data_and_alb_panels.R | no_license | puruckertom/yuan_urban_pesticides | R | false | false | 15,696 | r | # ------------------------------------------------------------------------------
# percentile graphics with application rates
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# load data
# ------------------------------------------------------------------------------
library(gtable)
# pwc output array
# recall: this was created in 03write_update_run_pwc and saved in 05_write_output_into_df
# recall: this is an array of all of the output_FOL002_parent_only_Custom_Parent_daily.csv files
load(paste(pwcdir, "io/pwcout.RData", sep = ""))
dim(pwcoutdf)
# subset Ave.Conc.H2O
pwc_h2_output <- pwcoutdf[,2,1:Nsims] #1depth, 2Ave.Conc.H20, 3Ave.Conc.benth, 4Peak.Conc.H20
dim(pwc_h2_output) #days*simulations
# subset Ave.Conc.benth
pwc_ben_output <- pwcoutdf[,3,1:Nsims] #1depth, 2Ave.Conc.H20, 3Ave.Conc.benth, 4Peak.Conc.H20
dim(pwc_ben_output) #days*simulations
# przm output array
# recall: this was created in 03write_update_run_pwc and saved in 05_write_output_into_df
# recall: this is an array of all of the output.zts files (dim = num_of_days*output_cols*sims)
load(paste(pwcdir, "io/przmout.RData", sep = ""))
dim(outputdf)
# subset RUNF0
przm_h2_output <- outputdf[,4,1:Nsims] #"YYYY","MM","DD","RUNF0","ESLS0","RFLX1","EFLX1","DCON1","INFL0"
dim(przm_h2_output) #days*simulations
# ------------------------------------------------------------------------------
# percentile plot: pwc Ave.Conc.H2O
# ------------------------------------------------------------------------------
# --------------------------------
# data set-up
# --------------------------------
dim(pwc_h2_output) #days*sims
# create blank matrix to fill with percentiles
percentiles <- matrix(data=NA, nrow=dim(pwc_h2_output)[1], ncol=8)
colnames(percentiles) <- c("day", "percent.001", "percent.023", "percent.159", "percent.5",
"percent.841", "percent.977", "percent.999")
percentiles <- as.data.frame(percentiles)
# date format
percentiles$day <- seq(as.Date("2008-01-01"), as.Date("2014-12-31"), by="days")
# compute percentiles
for (i in 1:dim(percentiles)[1]){
p001 <- quantile(pwc_h2_output[i,], probs=.001, na.rm=T)
percentiles[i,2] <- p001
p023 <- quantile(pwc_h2_output[i,], probs=.023, na.rm=T)
percentiles[i,3] <- p023
p159 <- quantile(pwc_h2_output[i,], probs=.159, na.rm=T)
percentiles[i,4] <- p159
p5 <- quantile(pwc_h2_output[i,], probs=.5, na.rm=T)
percentiles[i,5] <- p5
p841 <- quantile(pwc_h2_output[i,], probs=.841, na.rm=T)
percentiles[i,6] <- p841
p977 <- quantile(pwc_h2_output[i,], probs=.977, na.rm=T)
percentiles[i,7] <- p977
p999 <- quantile(pwc_h2_output[i,], probs=.999, na.rm=T)
percentiles[i,8] <- p999
}
percentiles$percent.001 <- percentiles$percent.001*1000000 #convert units to ug/L
percentiles$percent.023 <- percentiles$percent.023*1000000
percentiles$percent.159 <- percentiles$percent.159*1000000
percentiles$percent.5 <- percentiles$percent.5*1000000
percentiles$percent.841 <- percentiles$percent.841*1000000
percentiles$percent.977 <- percentiles$percent.977*1000000
percentiles$percent.999 <- percentiles$percent.999*1000000
# read in deterministic output
determ <- read.csv("C:/Users/echelsvi/git/yuan_urban_pesticides/deterministic/input/FOL002/outputs/output_FOL002_parent_only_Custom_Parent_daily.csv",
header= FALSE, sep= ",", skip = 5, stringsAsFactors = FALSE, row.names=NULL)
colnames(determ) <- c("Depth(m)","Ave.Conc.H20","Ave.Conc.benth","Peak.Conc.H20")
determ <- as.data.frame(determ)
# subset Ave.conc.H20, add to percentiles df
percentiles$deterministic <- determ$Ave.Conc.H20*1000000 #convert units to ug/L
# impose a false zero
for (i in 1:dim(percentiles)[1]){
if (percentiles[i,2] < 1e-8){
percentiles[i,2] <- 1e-8
}
}
# --------------------------------
# read in observed data
# --------------------------------
obs_water <- read.csv(file="C:/Users/echelsvi/git/yuan_urban_pesticides/observed_concentrations/cdpr_stormdrain_bifenthrin_water_09-14_all.csv",
header=T, sep=",")
# change column formats
obs_water$date <- as.Date(obs_water$Sample.Date, format="%d-%b-%Y")
obs_water$Result <- as.numeric(levels(obs_water$Result))[obs_water$Result]
# subset folsom sites
obs_water_folsom <- obs_water[which(obs_water$Site.ID == "FOL002"), ]
# --------------------------------
# plot percentile data + observed data + tox rates
# --------------------------------
# set colors
sd3 <- "#08519c"
sd2 <- "#4292c6"
sd1 <- "#9ecae1"
med <- "#08519c"
det <- "#d9f0a3"
obs <- "#e31a1c"
fish <- "#fee391"
invert <- "#ec7014"
# plot
pwc_pplot <- ggplot(percentiles, aes(x=day, group=1)) +
geom_ribbon(aes(ymin=percent.001, ymax=percent.999, fill="3 SD")) +
geom_ribbon(aes(ymin=percent.023, ymax=percent.977, fill="2 SD")) +
geom_ribbon(aes(ymin=percent.159, ymax=percent.841, fill="1 SD")) +
geom_hline(aes(yintercept=0.075, color="Acute/Chronic Fish"), linetype="dashed", size=1) + #aquatic life benchmarks for fish and invertebrates
geom_hline(yintercept=0.04 , linetype="dashed", color=fish, size=1) +
geom_hline(yintercept=0.8, linetype="dashed", color=invert, size=1) +
geom_hline(aes(yintercept=0.0013 , color="Acute/Chronic Invertebrate"),linetype="dashed", size=1) +
geom_line(aes(y=percent.5, color="Probabilistic Median"), linetype="solid", size=1) + #probabilistic
geom_line(aes(y=deterministic, color="Deterministic"), linetype="solid", size=1) + #deterministic
geom_point(data=obs_water_folsom, aes(x=date, y=Result, color="CDPR Observed"), size=3)+ # CDPR observed data
scale_x_date(date_breaks="1 year", labels=NULL, limits=as.Date(c('2011-01-01', '2014-12-31')), expand=c(0.01,0.01)) +
scale_y_continuous(trans="log10", breaks=trans_breaks("log10", function(x) 10^x),
labels=trans_format("log10", math_format(10^.x)), limits=c(NA,10)) +
labs(title = "", x = "", y = "Bifenthrin Concentration \n in the Water Column (ug/L) (log10)", color = "") +
theme_bw() +
#theme(legend.justification=c(0,0), legend.position=c(0.01,0.01), legend.box="horizontal")+
#guides(colour = guide_legend(nrow = 1))+
theme(legend.position="bottom")+
scale_fill_manual(name="", values=c("3 SD"=sd3, "2 SD"=sd2, "1 SD" =sd1))+
scale_color_manual(name="", values=c("CDPR Observed"=obs, "Deterministic"=det,"Probabilistic Median" =med,
"Acute/Chronic Invertebrate"=invert, "Acute/Chronic Fish" = fish))+
theme(axis.text.y = element_text(size = 12))+ #axis text size
theme(axis.text.x = element_text(size = 12))+
theme(axis.title.y = element_text(size = 12, margin = margin(t = 0, r = 10, b = 0, l = 0)))+ #y axis label
theme(plot.title = element_text(size = 14))+
theme(legend.text = element_text(size = 10))
print(pwc_pplot)
# read in app rate data
calpip_s <- read.table("C:/Users/echelsvi/git/yuan_urban_pesticides/bifenthrin_application_rates/CALPIP/output_for_pwc_with_ma_folsom.txt",
header=F, sep= ",")
calpip_s$date <- seq(as.Date("2009-01-01"), as.Date("2014-12-31"), by="day")#format 1961-01-01
calpip_s <- calpip_s[,c("V6", "date")]
names(calpip_s) <- c("app_rate", "date")
a_plot <- ggplot(data=calpip_s, aes(x=date, y=app_rate)) +
geom_bar(stat="identity", fill="#525252") +
labs(title = "", x = "", y = "Bifenthrin Application (kg/ha)", color = "") +
theme_bw() +
theme(legend.position = "none") +
scale_x_date(date_breaks="1 year", date_labels="%Y", limits=as.Date(c('2011-01-01', '2014-12-31')), expand=c(0.01,0.01)) +
theme(axis.title.x=element_blank(), axis.text.x=element_blank(), axis.ticks.x=element_blank())+
theme(axis.text.y = element_text(size = 12))+ #axis text size
theme(axis.text.x = element_text(size = 12))+
theme(axis.title.y = element_text(size = 12))
print(a_plot)
# read in weather file
precip <- read.table(file=paste(pwcdir_weather, "17484_grid_folsom.wea", sep=""), header=FALSE, sep=",")
colnames(precip) <- c("month", "day", "year", "precip_cm", "et_cm", "temp_c", "windspeed_cms", "solar_la")
precip$date <- seq(as.Date("2008-01-01"), as.Date("2014-12-31"), by="days")
p_plot <- ggplot(precip, aes(x=date,y=precip_cm))+
geom_bar(stat="identity", fill="black")+
theme_bw()+
labs(title = "", x = "", y = "Precipitation (cm)", color = "") +
scale_y_reverse() +
scale_x_date(date_breaks="1 year",labels=NULL, limits=as.Date(c('2011-01-01', '2014-12-31')), expand=c(0.01,0.01)) +
theme(axis.title.x=element_blank(), axis.text.x=element_blank(), axis.ticks.x=element_blank())+
theme(axis.text.y = element_text(size = 12))+ #axis text size
theme(axis.text.x = element_text(size = 12))+
theme(axis.title.y = element_text(size = 12, margin = margin(t = 0, r = 25, b = 0, l = 0)))
print(p_plot)
# plot everything together
grid.newpage()
png(filename= paste(pwcdir, "figures/percentile_11-14_pwc_ave_h2_panel.png", sep=""),width=20, height=10, units="in",res=300)
panel_plot <- cowplot::plot_grid(p_plot, pwc_pplot, a_plot, align = "h", nrow = 3, rel_heights = c(0.25, 0.5, 0.25))
panel_plot <- egg::ggarrange(p_plot, pwc_pplot, a_plot, heights = c(0.25, 0.5, 0.25))
print(panel_plot)
dev.off()
# ------------------------------------------------------------------------------
# percentile plot: pwc Ave.Conc.benth
# ------------------------------------------------------------------------------
# --------------------------------
# data set-up
# --------------------------------
load(paste(pwcdir, "io/con_fac_output.RData", sep = ""))
dim(con_fac_output) #sims*1
dim(pwc_ben_output) #days*sims
# convert to sediment concentrations
sed_output <- pwc_ben_output
for (c in 1:dim(sed_output)[2]){
this_con_fac <- con_fac_output[c,]
for (r in 1:dim(sed_output)[1]){
sed_output[r,c] <- sed_output[r,c]*this_con_fac
}
}
# create blank matrix to fill with percentiles
percentiles <- matrix(data=NA, nrow=dim(sed_output)[1], ncol=8)
colnames(percentiles) <- c("day", "percent.001", "percent.023", "percent.159", "percent.5",
"percent.841", "percent.977", "percent.999")
percentiles <- as.data.frame(percentiles)
# date format
percentiles$day <- seq(as.Date("2008-01-01"), as.Date("2014-12-31"), by="days")
# compute percentiles
for (i in 1:dim(percentiles)[1]){
p001 <- quantile(sed_output[i,], probs=.001, na.rm=T)
percentiles[i,2] <- p001
p023 <- quantile(sed_output[i,], probs=.023, na.rm=T)
percentiles[i,3] <- p023
p159 <- quantile(sed_output[i,], probs=.159, na.rm=T)
percentiles[i,4] <- p159
p5 <- quantile(sed_output[i,], probs=.5, na.rm=T)
percentiles[i,5] <- p5
p841 <- quantile(sed_output[i,], probs=.841, na.rm=T)
percentiles[i,6] <- p841
p977 <- quantile(sed_output[i,], probs=.977, na.rm=T)
percentiles[i,7] <- p977
p999 <- quantile(sed_output[i,], probs=.999, na.rm=T)
percentiles[i,8] <- p999
}
percentiles$percent.001 <- percentiles$percent.001*1000000 #convert units to ug/L
percentiles$percent.023 <- percentiles$percent.023*1000000
percentiles$percent.159 <- percentiles$percent.159*1000000
percentiles$percent.5 <- percentiles$percent.5*1000000
percentiles$percent.841 <- percentiles$percent.841*1000000
percentiles$percent.977 <- percentiles$percent.977*1000000
percentiles$percent.999 <- percentiles$percent.999*1000000
# read in deterministic output
determ <- read.csv("C:/Users/echelsvi/git/yuan_urban_pesticides/deterministic/input/FOL002/outputs/output_FOL002_parent_only_Custom_Parent_daily.csv",
header= FALSE, sep= ",", skip = 5, stringsAsFactors = FALSE, row.names=NULL)
colnames(determ) <- c("Depth(m)","Ave.Conc.H20","Ave.Conc.benth","Peak.Conc.H20")
determ <- as.data.frame(determ)
# read conversion factor from output
con <- file("C:/Users/echelsvi/git/yuan_urban_pesticides/deterministic/input/FOL002/outputs/output_FOL002_parent_only_Custom_Parent.txt")
open(con)
con_fac_line <- read.table(con,skip=15,nrow=1) #16-th line
con_fac <- as.numeric(con_fac_line%>%select_if(is.numeric))
print(con_fac)
close(con)
# subset Ave.conc.benth, conversions, add to percentiles df
percentiles$deterministic <- determ$Ave.Conc.benth*1000000*con_fac #convert units to ug/L
# impose a false zero
for (i in 1:dim(percentiles)[1]){
if (percentiles[i,2] < 1e-8){
percentiles[i,2] <- 1e-8
}
}
# --------------------------------
# read in observed data
# --------------------------------
obs_sed <- read.csv(file="C:/Users/echelsvi/git/yuan_urban_pesticides/observed_concentrations/cdpr_stormdrain_bifenthrin_sediment_09-14_all.csv",
header=T, sep=",")
# subset folsom sites
obs_sed_folsom <- obs_sed[which(obs_sed$Site.ID == "FOL002"), ]
# change column formats
obs_sed_folsom$date <- as.Date(obs_sed_folsom$Sample.Date, format="%d-%b-%y")
# --------------------------------
# plot percentiles
# --------------------------------
# set colors
sd3 <- "#6a51a3"
sd2 <- "#807dba"
sd1 <- "#bcbddc"
med <- "#6a51a3"
det <- "#d9f0a3"
fish <- "#fee391"
invert <- "#ec7014"
# plot
pwc_pplot <- ggplot(percentiles, aes(x=day, group=1)) +
geom_ribbon(aes(ymin=percent.001, ymax=percent.999, fill="3 SD")) +
geom_ribbon(aes(ymin=percent.023, ymax=percent.977, fill="2 SD")) +
geom_ribbon(aes(ymin=percent.159, ymax=percent.841, fill="1 SD")) +
geom_hline(yintercept=200, linetype="dashed", color=invert, size=1) +
geom_hline(aes(yintercept=0.13 , color="Acute/Chronic Invertebrate"),linetype="dashed", size=1) +
geom_line(aes(y=percent.5, color="Probabilistic Median"), linetype="solid", size=1) +
geom_line(aes(y=deterministic, color="Deterministic"), linetype="solid", size=1) +
geom_point(data=obs_sed_folsom, aes(x=date, y=Result, color="CDPR Observed"), size=3)+ # CDPR observed data
scale_x_date(date_breaks="1 year", labels=NULL, limits=as.Date(c('2011-01-01', '2014-12-31')), expand=c(0.01,0.01)) +
scale_y_continuous(trans="log10", breaks=trans_breaks("log10", function(x) 10^x),
labels=trans_format("log10", math_format(10^.x)), limits=c(NA,10000)) +
labs(title = "", x = "", y = "Bifenthrin Sediment Concentration \n (total mass, ug)/(dry sed mass,kg) (log10)", color = "") +
theme_bw() +
#theme(legend.justification=c(0,0), legend.position=c(0.01,0.01), legend.box="horizontal")+
#guides(colour = guide_legend(nrow = 1))+
theme(legend.position = "bottom")+
scale_fill_manual(name="", values=c("3 SD"=sd3, "2 SD"=sd2, "1 SD" =sd1))+
scale_color_manual(name="", values=c("CDPR Observed"=obs, "Deterministic"=det,"Probabilistic Median" =med,
"Acute/Chronic Invertebrate"=invert))+
theme(axis.text.y = element_text(size = 12))+ #axis text size
theme(axis.text.x = element_text(size = 12))+
theme(axis.title.y = element_text(size = 12, margin = margin(t = 0, r = 10, b = 0, l = 0)))+
theme(plot.title = element_text(size = 14))+
theme(legend.text = element_text(size = 10))
print(pwc_pplot)
dev.off()
# plot everything together
grid.newpage()
png(filename= paste(pwcdir, "figures/percentile_11-14_pwc_ave_benthic_panel.png", sep=""),width=20, height=10, units="in",res=300)
panel_plot <- cowplot::plot_grid(p_plot, pwc_pplot,a_plot, align = "h", nrow = 3, rel_heights = c(0.25, 0.5, 0.25))
panel_plot <- egg::ggarrange(p_plot, pwc_pplot,a_plot, heights = c(0.25, 0.5, 0.25))
print(panel_plot)
dev.off()
# ------------------------------------------------------------------------------
# the end
# ------------------------------------------------------------------------------ |
# This module takes in Rich Holowczak's option quote data and returns implied volatilities in a standard format.
mthCodes <- c("A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S",
"T", "U", "V", "W", "X");
findMonth <- function(mthCode){(which(mthCodes==mthCode)-1)%%12+1;}
callPut <- function(mthCode){((which(mthCodes==mthCode))<=12);}
generateIvols <- function(spxData){
startDate <- as.Date(unique(spxData$Date),format="%m/%d/%y");
year <- spxData$Expiration_Year+2000;
mthCode <-spxData$Expiration_Month_Code;
month <- sapply(mthCode,findMonth);
day <- spxData$Expiration_Day_of_Month-1;
expiry <- as.Date(paste(year,month,day,sep="/"));
days <- difftime(expiry,startDate);
t <- as.numeric(days)/365.25; # Better code would compute business time
cp <- sapply(mthCode,callPut); # Call or put?
out2 <- NULL;
for (numdays in sort(unique(days))){ #One expiration at a time
vdc <- spxData[(days==numdays)&cp,];# Select just this expiration
vdp <- spxData[(days == numdays)&(!cp),];# Select just this expiration
expiration <- unique(expiry[(days == numdays)]);
callStrikes <- sort(unique(vdc$Strike_Price));
putStrikes <- sort(unique(vdp$Strike_Price));
strikes <- callStrikes[callStrikes%in%putStrikes];
nK <- length(strikes);
vols <- numeric(nK);
imid <- numeric(nK);
ca <- numeric(nK);
cb <- numeric(nK);
pa <- numeric(nK);
pb <- numeric(nK);
if((nK>=6)&(numdays>0)){
cbb <- vdc$Option_Bid_Price;
pbb <- vdp$Option_Bid_Price;
cba <- vdc$Option_Offer_Price;
pba <- vdp$Option_Offer_Price;
for (i in 1:nK){
k <- strikes[i];
cb[i] <- mean(cbb[vdc$Strike_Price ==k]);
pb[i] <- mean(pbb[vdp$Strike_Price ==k]);
ca[i] <- mean(cba[vdc$Strike_Price ==k]);
pa[i] <- mean(pba[vdp$Strike_Price ==k]);
ibid <- cb[i]-pb[i];
iask <- ca[i]-pa[i];
imid[i] <- (ibid+iask)/2;
}
pvGuess <- 1;
fGuess <- mean(imid+strikes);
nearTheMoneyStrikes <- strikes[order(abs(imid))][1:6];
include <- (strikes%in%nearTheMoneyStrikes); #Optimize only on near-the-money strikes
obj <- function(params){
f <- params[1]; pv <- params[2];
ifit <- pv*(f-strikes); # This is a vector length nk
errmid <- (ifit-imid)*include;
return(sum(errmid^2));
}
fit <- optim(c(fGuess,pvGuess),obj,method="L-BFGS-B", lower=c(min(strikes),0.5),upper=c(max(strikes),2));
ffit <- fit$par[1];
pvfit <- fit$par[2];
# Get implieds for OTM options
texp <- numdays/365.25;
ivolcbid <- BSImpliedVolCall(ffit, strikes, texp, 0,cb/pvfit);
ivolcask <- BSImpliedVolCall(ffit, strikes, texp, 0,ca/pvfit);
ivolpbid <- BSImpliedVolPut(ffit, strikes, texp, 0,pb/pvfit);
ivolpask <- BSImpliedVolPut(ffit, strikes, texp, 0,pa/pvfit);
ivolbid <- ivolcbid*(strikes>ffit)+ivolpbid*(strikes<=ffit); # This version outputs OTM vols
ivolask <- ivolcask*(strikes>ffit)+ivolpask*(strikes<=ffit); # This version outputs OTM vols
callBid <- BSFormula(ffit, strikes, texp, 0, ivolbid);
callAsk <- BSFormula(ffit, strikes, texp, 0, ivolask);
exclude <- (cb==0)|(pb==0)
callMid <- (callBid+callAsk)/2;
out <- data.frame(expiration,texp,strikes,ivolbid,ivolask,ffit,callMid);
out$callMid[exclude]<-NA;
out$ivolbid[exclude]<-NA;
colnames(out) <- c("Expiry","Texp","Strike","Bid","Ask","Fwd","CallMid");
out2 <- rbind(out2,out);
} # end of if()
} # End of for{}
out2$Bid[(out2$Bid<10^-8)]<-NA;
out3 <- out2[order(out2$Texp,out2$Strike),];# Sort columns for output
return(out3);
} # End of function
| /ipynb/computeImpliedVols.R | no_license | rongxinyu/capstone | R | false | false | 3,776 | r | # This module takes in Rich Holowczak's option quote data and returns implied volatilities in a standard format.
mthCodes <- c("A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S",
"T", "U", "V", "W", "X");
findMonth <- function(mthCode){(which(mthCodes==mthCode)-1)%%12+1;}
callPut <- function(mthCode){((which(mthCodes==mthCode))<=12);}
generateIvols <- function(spxData){
startDate <- as.Date(unique(spxData$Date),format="%m/%d/%y");
year <- spxData$Expiration_Year+2000;
mthCode <-spxData$Expiration_Month_Code;
month <- sapply(mthCode,findMonth);
day <- spxData$Expiration_Day_of_Month-1;
expiry <- as.Date(paste(year,month,day,sep="/"));
days <- difftime(expiry,startDate);
t <- as.numeric(days)/365.25; # Better code would compute business time
cp <- sapply(mthCode,callPut); # Call or put?
out2 <- NULL;
for (numdays in sort(unique(days))){ #One expiration at a time
vdc <- spxData[(days==numdays)&cp,];# Select just this expiration
vdp <- spxData[(days == numdays)&(!cp),];# Select just this expiration
expiration <- unique(expiry[(days == numdays)]);
callStrikes <- sort(unique(vdc$Strike_Price));
putStrikes <- sort(unique(vdp$Strike_Price));
strikes <- callStrikes[callStrikes%in%putStrikes];
nK <- length(strikes);
vols <- numeric(nK);
imid <- numeric(nK);
ca <- numeric(nK);
cb <- numeric(nK);
pa <- numeric(nK);
pb <- numeric(nK);
if((nK>=6)&(numdays>0)){
cbb <- vdc$Option_Bid_Price;
pbb <- vdp$Option_Bid_Price;
cba <- vdc$Option_Offer_Price;
pba <- vdp$Option_Offer_Price;
for (i in 1:nK){
k <- strikes[i];
cb[i] <- mean(cbb[vdc$Strike_Price ==k]);
pb[i] <- mean(pbb[vdp$Strike_Price ==k]);
ca[i] <- mean(cba[vdc$Strike_Price ==k]);
pa[i] <- mean(pba[vdp$Strike_Price ==k]);
ibid <- cb[i]-pb[i];
iask <- ca[i]-pa[i];
imid[i] <- (ibid+iask)/2;
}
pvGuess <- 1;
fGuess <- mean(imid+strikes);
nearTheMoneyStrikes <- strikes[order(abs(imid))][1:6];
include <- (strikes%in%nearTheMoneyStrikes); #Optimize only on near-the-money strikes
obj <- function(params){
f <- params[1]; pv <- params[2];
ifit <- pv*(f-strikes); # This is a vector length nk
errmid <- (ifit-imid)*include;
return(sum(errmid^2));
}
fit <- optim(c(fGuess,pvGuess),obj,method="L-BFGS-B", lower=c(min(strikes),0.5),upper=c(max(strikes),2));
ffit <- fit$par[1];
pvfit <- fit$par[2];
# Get implieds for OTM options
texp <- numdays/365.25;
ivolcbid <- BSImpliedVolCall(ffit, strikes, texp, 0,cb/pvfit);
ivolcask <- BSImpliedVolCall(ffit, strikes, texp, 0,ca/pvfit);
ivolpbid <- BSImpliedVolPut(ffit, strikes, texp, 0,pb/pvfit);
ivolpask <- BSImpliedVolPut(ffit, strikes, texp, 0,pa/pvfit);
ivolbid <- ivolcbid*(strikes>ffit)+ivolpbid*(strikes<=ffit); # This version outputs OTM vols
ivolask <- ivolcask*(strikes>ffit)+ivolpask*(strikes<=ffit); # This version outputs OTM vols
callBid <- BSFormula(ffit, strikes, texp, 0, ivolbid);
callAsk <- BSFormula(ffit, strikes, texp, 0, ivolask);
exclude <- (cb==0)|(pb==0)
callMid <- (callBid+callAsk)/2;
out <- data.frame(expiration,texp,strikes,ivolbid,ivolask,ffit,callMid);
out$callMid[exclude]<-NA;
out$ivolbid[exclude]<-NA;
colnames(out) <- c("Expiry","Texp","Strike","Bid","Ask","Fwd","CallMid");
out2 <- rbind(out2,out);
} # end of if()
} # End of for{}
out2$Bid[(out2$Bid<10^-8)]<-NA;
out3 <- out2[order(out2$Texp,out2$Strike),];# Sort columns for output
return(out3);
} # End of function
|
% Generated by roxygen2 (4.0.0): do not edit by hand
\name{geom_segment}
\alias{geom_segment}
\title{Single line segments.}
\usage{
geom_segment(mapping = NULL, data = NULL, stat = "identity",
position = "identity", arrow = NULL, lineend = "butt", na.rm = FALSE,
...)
}
\arguments{
\item{arrow}{specification for arrow heads, as created by
arrow()}
\item{lineend}{Line end style (round, butt, square)}
\item{mapping}{The aesthetic mapping, usually constructed
with \code{\link{aes}} or \code{\link{aes_string}}. Only
needs to be set at the layer level if you are overriding
the plot defaults.}
\item{data}{A layer specific dataset - only needed if you
want to override the plot defaults.}
\item{stat}{The statistical transformation to use on the
data for this layer.}
\item{position}{The position adjustment to use for
overlapping points on this layer}
\item{na.rm}{If \code{FALSE} (the default), removes
missing values with a warning. If \code{TRUE} silently
removes missing values.}
\item{...}{other arguments passed on to
\code{\link{layer}}. This can include aesthetics whose
values you want to set, not map. See \code{\link{layer}}
for more details.}
}
\description{
Single line segments.
}
\section{Aesthetics}{
\Sexpr[results=rd,stage=build]{ggplot2:::rd_aesthetics("geom",
"segment")}
}
\examples{
library(grid) # needed for arrow function
p <- ggplot(seals, aes(x = long, y = lat))
(p <- p + geom_segment(aes(xend = long + delta_long, yend = lat + delta_lat), arrow = arrow(length = unit(0.1,"cm"))))
if (require("maps")) {
xlim <- range(seals$long)
ylim <- range(seals$lat)
usamap <- data.frame(map("world", xlim = xlim, ylim = ylim, plot =
FALSE)[c("x","y")])
usamap <- rbind(usamap, NA, data.frame(map('state', xlim = xlim, ylim
= ylim, plot = FALSE)[c("x","y")]))
names(usamap) <- c("long", "lat")
p + geom_path(data = usamap) + scale_x_continuous(limits = xlim)
}
# You can also use geom_segment to recreate plot(type = "h") :
counts <- as.data.frame(table(x = rpois(100,5)))
counts$x <- as.numeric(as.character(counts$x))
with(counts, plot(x, Freq, type = "h", lwd = 10))
qplot(x, Freq, data = counts, geom = "segment",
yend = 0, xend = x, size = I(10))
# Adding line segments
library(grid) # needed for arrow function
b <- ggplot(mtcars, aes(wt, mpg)) + geom_point()
b + geom_segment(aes(x = 2, y = 15, xend = 2, yend = 25))
b + geom_segment(aes(x = 2, y = 15, xend = 3, yend = 15))
b + geom_segment(aes(x = 5, y = 30, xend = 3.5, yend = 25), arrow = arrow(length = unit(0.5, "cm")))
}
\seealso{
\code{\link{geom_path}} and \code{\link{geom_line}} for multi-
segment lines and paths.
}
| /man/geom_segment.Rd | no_license | nietzsche1993/ggplot2 | R | false | false | 2,669 | rd | % Generated by roxygen2 (4.0.0): do not edit by hand
\name{geom_segment}
\alias{geom_segment}
\title{Single line segments.}
\usage{
geom_segment(mapping = NULL, data = NULL, stat = "identity",
position = "identity", arrow = NULL, lineend = "butt", na.rm = FALSE,
...)
}
\arguments{
\item{arrow}{specification for arrow heads, as created by
arrow()}
\item{lineend}{Line end style (round, butt, square)}
\item{mapping}{The aesthetic mapping, usually constructed
with \code{\link{aes}} or \code{\link{aes_string}}. Only
needs to be set at the layer level if you are overriding
the plot defaults.}
\item{data}{A layer specific dataset - only needed if you
want to override the plot defaults.}
\item{stat}{The statistical transformation to use on the
data for this layer.}
\item{position}{The position adjustment to use for
overlapping points on this layer}
\item{na.rm}{If \code{FALSE} (the default), removes
missing values with a warning. If \code{TRUE} silently
removes missing values.}
\item{...}{other arguments passed on to
\code{\link{layer}}. This can include aesthetics whose
values you want to set, not map. See \code{\link{layer}}
for more details.}
}
\description{
Single line segments.
}
\section{Aesthetics}{
\Sexpr[results=rd,stage=build]{ggplot2:::rd_aesthetics("geom",
"segment")}
}
\examples{
library(grid) # needed for arrow function
p <- ggplot(seals, aes(x = long, y = lat))
(p <- p + geom_segment(aes(xend = long + delta_long, yend = lat + delta_lat), arrow = arrow(length = unit(0.1,"cm"))))
if (require("maps")) {
xlim <- range(seals$long)
ylim <- range(seals$lat)
usamap <- data.frame(map("world", xlim = xlim, ylim = ylim, plot =
FALSE)[c("x","y")])
usamap <- rbind(usamap, NA, data.frame(map('state', xlim = xlim, ylim
= ylim, plot = FALSE)[c("x","y")]))
names(usamap) <- c("long", "lat")
p + geom_path(data = usamap) + scale_x_continuous(limits = xlim)
}
# You can also use geom_segment to recreate plot(type = "h") :
counts <- as.data.frame(table(x = rpois(100,5)))
counts$x <- as.numeric(as.character(counts$x))
with(counts, plot(x, Freq, type = "h", lwd = 10))
qplot(x, Freq, data = counts, geom = "segment",
yend = 0, xend = x, size = I(10))
# Adding line segments
library(grid) # needed for arrow function
b <- ggplot(mtcars, aes(wt, mpg)) + geom_point()
b + geom_segment(aes(x = 2, y = 15, xend = 2, yend = 25))
b + geom_segment(aes(x = 2, y = 15, xend = 3, yend = 15))
b + geom_segment(aes(x = 5, y = 30, xend = 3.5, yend = 25), arrow = arrow(length = unit(0.5, "cm")))
}
\seealso{
\code{\link{geom_path}} and \code{\link{geom_line}} for multi-
segment lines and paths.
}
|
#params <- list(atr=30,p=0.89,series=10)
maxRows <- 1100
getOrders <- function(store, newRowList, currentPos, params) {
#cat("currentPos", formatC(currentPos,3),"\n")
# check if current inventory is above a limit and if so exit completely
# with a market order
if (is.null(store)) store <- initStore(newRowList,params$series)
store <- updateStore(store, newRowList, params$series)
marketOrders <- rep(0,length(newRowList))
limitOrders1 <- rep(0,length(newRowList))
limitOrders2 <- rep(0,length(newRowList))
limitPrices1 <- rep(0,length(newRowList))
limitPrices2 <- rep(0,length(newRowList))
# use the range (High-Low) as a indicator for a reasonable "spread" for
# this pseudo market making strategy
if (store$iter > params$atr) {
startIndexatr <- store$iter - params$atr - 1
for (i in 1:length(params$series)) {
High<-store$high[startIndexatr:store$iter,i]
Low<-store$low[startIndexatr:store$iter,i]
Close<-store$cl[startIndexatr:store$iter,i]
HLC<-cbind(High,Low,Close)
atr<-last(as.matrix(ATR(HLC,n=params$atr))[,'atr'])
limitOrders1 <- c(0,0,0,0,0,0,0,0,0,params$posSizes[i])
#limitOrders1 <- rep(1,length(newRowList)) # BUY LIMIT ORDERS
limitPrices1 <- sapply(1:length(newRowList),function(i)
newRowList[[i]]$High - atr*params$p)
limitOrders2 <- c(0,0,0,0,0,0,0,0,0,-params$posSizes[i])
#limitOrders2 <-rep(-1,length(newRowList)) # SELL LIMIT ORDERS
limitPrices2 <- sapply(1:length(newRowList),function(i)
newRowList[[i]]$Low + atr*params$p)
}
}
return(list(store=store,marketOrders=marketOrders,
limitOrders1=limitOrders1,
limitPrices1=limitPrices1,
limitOrders2=limitOrders2,
limitPrices2=limitPrices2))
}
initClStore <- function(newRowList,series) {
clStore <- matrix(0,nrow=maxRows,ncol=length(series))
return(clStore)
}
updateClStore <- function(clStore, newRowList, series, iter) {
for (i in 1:length(series))
clStore[iter,i] <- as.numeric(newRowList[[series[i]]]$Close)
return(clStore)
}
initOpenStore <- function(newRowList,series) {
openStore <- matrix(0,nrow=maxRows,ncol=length(series))
return(openStore)
}
updateOpenStore <- function(openStore, newRowList, series, iter) {
for (i in 1:length(series))
openStore[iter,i] <- as.numeric(newRowList[[series[i]]]$Open)
return(openStore)
}
initHighStore <- function(newRowList,series) {
highStore <- matrix(0,nrow=maxRows,ncol=length(series))
return(highStore)
}
updateHighStore <- function(highStore, newRowList, series, iter) {
for (i in 1:length(series))
highStore[iter,i] <- as.numeric(newRowList[[series[i]]]$High)
return(highStore)
}
initLowStore <- function(newRowList,series) {
lowStore <- matrix(0,nrow=maxRows,ncol=length(series))
return(lowStore)
}
updateLowStore <- function(lowStore, newRowList, series, iter) {
for (i in 1:length(series))
lowStore[iter,i] <- as.numeric(newRowList[[series[i]]]$Low)
return(lowStore)
}
initVolumeStore <- function(newRowList,series) {
volumeStore <- matrix(0,nrow=maxRows,ncol=length(series))
return(volumeStore)
}
updateVolumeStore <- function(volumeStore, newRowList, series, iter) {
for (i in 1:length(series))
volumeStore[iter,i] <- as.numeric(newRowList[[series[i]]]$Volume)
return(volumeStore)
}
initStore <- function(newRowList,series) {
return(list(iter=0,cl=initClStore(newRowList,series),
open=initOpenStore(newRowList,series),
high=initHighStore(newRowList,series),
low=initLowStore(newRowList,series),
volume=initVolumeStore(newRowList,series),
entry=rep(1,10)))
}
updateStore <- function(store, newRowList, series) {
store$iter <- store$iter + 1
store$cl <- updateClStore(store$cl,newRowList,series,store$iter)
store$open<-updateOpenStore(store$open,newRowList,series,store$iter)
store$high<-updateHighStore(store$high,newRowList,series,store$iter)
store$low<-updateLowStore(store$low,newRowList,series,store$iter)
store$volume<-updateVolumeStore(store$volume,newRowList,series,store$iter)
store$entry <- store$entry
return(store)
}
| /simple_limit.R | no_license | heatherhu93/COMP396 | R | false | false | 4,430 | r | #params <- list(atr=30,p=0.89,series=10)
maxRows <- 1100
getOrders <- function(store, newRowList, currentPos, params) {
#cat("currentPos", formatC(currentPos,3),"\n")
# check if current inventory is above a limit and if so exit completely
# with a market order
if (is.null(store)) store <- initStore(newRowList,params$series)
store <- updateStore(store, newRowList, params$series)
marketOrders <- rep(0,length(newRowList))
limitOrders1 <- rep(0,length(newRowList))
limitOrders2 <- rep(0,length(newRowList))
limitPrices1 <- rep(0,length(newRowList))
limitPrices2 <- rep(0,length(newRowList))
# use the range (High-Low) as a indicator for a reasonable "spread" for
# this pseudo market making strategy
if (store$iter > params$atr) {
startIndexatr <- store$iter - params$atr - 1
for (i in 1:length(params$series)) {
High<-store$high[startIndexatr:store$iter,i]
Low<-store$low[startIndexatr:store$iter,i]
Close<-store$cl[startIndexatr:store$iter,i]
HLC<-cbind(High,Low,Close)
atr<-last(as.matrix(ATR(HLC,n=params$atr))[,'atr'])
limitOrders1 <- c(0,0,0,0,0,0,0,0,0,params$posSizes[i])
#limitOrders1 <- rep(1,length(newRowList)) # BUY LIMIT ORDERS
limitPrices1 <- sapply(1:length(newRowList),function(i)
newRowList[[i]]$High - atr*params$p)
limitOrders2 <- c(0,0,0,0,0,0,0,0,0,-params$posSizes[i])
#limitOrders2 <-rep(-1,length(newRowList)) # SELL LIMIT ORDERS
limitPrices2 <- sapply(1:length(newRowList),function(i)
newRowList[[i]]$Low + atr*params$p)
}
}
return(list(store=store,marketOrders=marketOrders,
limitOrders1=limitOrders1,
limitPrices1=limitPrices1,
limitOrders2=limitOrders2,
limitPrices2=limitPrices2))
}
initClStore <- function(newRowList,series) {
clStore <- matrix(0,nrow=maxRows,ncol=length(series))
return(clStore)
}
updateClStore <- function(clStore, newRowList, series, iter) {
for (i in 1:length(series))
clStore[iter,i] <- as.numeric(newRowList[[series[i]]]$Close)
return(clStore)
}
initOpenStore <- function(newRowList,series) {
openStore <- matrix(0,nrow=maxRows,ncol=length(series))
return(openStore)
}
updateOpenStore <- function(openStore, newRowList, series, iter) {
for (i in 1:length(series))
openStore[iter,i] <- as.numeric(newRowList[[series[i]]]$Open)
return(openStore)
}
initHighStore <- function(newRowList,series) {
highStore <- matrix(0,nrow=maxRows,ncol=length(series))
return(highStore)
}
updateHighStore <- function(highStore, newRowList, series, iter) {
for (i in 1:length(series))
highStore[iter,i] <- as.numeric(newRowList[[series[i]]]$High)
return(highStore)
}
initLowStore <- function(newRowList,series) {
lowStore <- matrix(0,nrow=maxRows,ncol=length(series))
return(lowStore)
}
updateLowStore <- function(lowStore, newRowList, series, iter) {
for (i in 1:length(series))
lowStore[iter,i] <- as.numeric(newRowList[[series[i]]]$Low)
return(lowStore)
}
initVolumeStore <- function(newRowList,series) {
volumeStore <- matrix(0,nrow=maxRows,ncol=length(series))
return(volumeStore)
}
updateVolumeStore <- function(volumeStore, newRowList, series, iter) {
for (i in 1:length(series))
volumeStore[iter,i] <- as.numeric(newRowList[[series[i]]]$Volume)
return(volumeStore)
}
initStore <- function(newRowList,series) {
return(list(iter=0,cl=initClStore(newRowList,series),
open=initOpenStore(newRowList,series),
high=initHighStore(newRowList,series),
low=initLowStore(newRowList,series),
volume=initVolumeStore(newRowList,series),
entry=rep(1,10)))
}
updateStore <- function(store, newRowList, series) {
store$iter <- store$iter + 1
store$cl <- updateClStore(store$cl,newRowList,series,store$iter)
store$open<-updateOpenStore(store$open,newRowList,series,store$iter)
store$high<-updateHighStore(store$high,newRowList,series,store$iter)
store$low<-updateLowStore(store$low,newRowList,series,store$iter)
store$volume<-updateVolumeStore(store$volume,newRowList,series,store$iter)
store$entry <- store$entry
return(store)
}
|
#' Render graph with visNetwork
#'
#' Render a graph object with the visNetwork R package.
#'
#' @inheritParams render_graph
#' @examples
#' \dontrun{
#' # Create a node data frame (ndf)
#' ndf <-
#' create_node_df(
#' n = 6,
#' label = TRUE,
#' fillcolor = c("lightgrey", "red", "orange",
#' "pink", "aqua", "yellow"),
#' shape = "dot",
#' size = c(20, 80, 40, 10, 30, 50),
#' type = c("1", "1", "1", "2", "2", "2"))
#'
#' # Create an edge data frame (edf)
#' edf <-
#' create_edge_df(
#' from = c(1, 2, 3, 4, 6, 5),
#' to = c(4, 3, 1, 3, 1, 4),
#' color = c("green", "green", "grey",
#' "grey", "blue", "blue"),
#' rel = "leading_to")
#'
#' # Create a graph object
#' graph <-
#' create_graph(
#' nodes_df = ndf,
#' edges_df = edf)
#'
#' visnetwork(graph)
#' }
#'
#' @import visNetwork
#' @export
visnetwork <- function(graph) {
# Extract node and edge data frames from the graph object
nodes <- graph %>% get_node_df()
edges <- graph %>% get_edge_df()
# Render an empty graph if no nodes or edges exist
if (graph %>% is_graph_empty()) {
nodes <- create_node_df(n = 1)
nodes <- nodes[-1, ]
edges <- create_edge_df(from = 1, to = 1)
edges <- edges[-1, ]
}
# Remove the 'pos' column, if it exists
if ("pos" %in% colnames(nodes)) {
nodes <- nodes[, -(which(colnames(nodes) %in% "pos"))]
}
# Modify names of columns in `nodes` for compatibility with
# visNetwork data frames for nodes
colnames(nodes)[which(colnames(nodes) == "nodes")] <- "id"
colnames(nodes)[which(colnames(nodes) == "type")] <- "group"
colnames(nodes)[which(colnames(nodes) == "tooltip")] <- "title"
colnames(nodes)[which(colnames(nodes) == "fillcolor")] <- "color"
# Modify names of columns in 'edges' for compatibility with
# visNetwork data frames for edges
colnames(edges)[which(colnames(edges) == "rel")] <- "label"
colnames(edges)[which(colnames(edges) == "tooltip")] <- "title"
colnames(edges)[which(colnames(edges) == "penwidth")] <- "width"
# Obtain `fontcolor` values if the column exists in `edges`
if ("fontcolor" %in% colnames(edges)) {
fontcolor <- edges[, -(which(colnames(edges) %in% "fontcolor"))]
}
# Create the visNetwork object
if (all(c("x", "y") %in% colnames(nodes)) == FALSE) {
if (nrow(graph$edges_df) == 0) {
vn_obj <- visNetwork(nodes = nodes)
}
if (nrow(graph$edges_df) > 0) {
vn_obj <- visNetwork(nodes = nodes, edges = edges)
if (is_graph_directed(graph)) {
vn_obj <-
visEdges(
graph = vn_obj,
arrows = list(
to = list(
enabled = TRUE,
scaleFactor = 1)))
}
if (is_graph_undirected(graph)) {
vn_obj <-
visEdges(
graph = vn_obj,
arrows = list(
to = list(
enabled = FALSE,
scaleFactor = 1)))
}
}
vn_obj <-
visPhysics(
graph = vn_obj,
solver = "barnesHut",
stabilization = list(
enabled = TRUE,
onlyDynamicEdges = FALSE,
fit = TRUE))
vn_obj <-
visLayout(
graph = vn_obj,
improvedLayout = TRUE)
}
if (all(c("x", "y") %in% colnames(nodes))) {
# Reverse y values
nodes$y <- -as.numeric(nodes$y)
if (is.null(graph$edges_df)) {
vn_obj <- visNetwork(nodes = nodes)
vn_obj <-
visNodes(
graph = vn_obj,
physics = FALSE,
fixed = FALSE)
vn_obj <-
visPhysics(
graph = vn_obj,
stabilization = list(
enabled = FALSE,
onlyDynamicEdges = FALSE,
fit = TRUE))
vn_obj <-
visInteraction(
graph = vn_obj,
dragNodes = FALSE)
}
if (nrow(graph$edges_df) > 0) {
if ("arrow" %in% colnames(edges)) {
if (all(edges[which(colnames(edges) %in% "arrow")] == FALSE)) {
arrows_for_edges <- FALSE
} else {
arrows_for_edges <- FALSE
}
} else {
arrows_for_edges <- FALSE
}
vn_obj <-
visNetwork(
nodes = nodes,
edges = edges)
vn_obj <-
visNodes(
graph = vn_obj,
physics = FALSE,
fixed = FALSE)
vn_obj <-
visEdges(
graph = vn_obj,
arrows = list(
to =
list(
enabled = ifelse(arrows_for_edges, TRUE, FALSE),
scaleFactor = 1)),
smooth = FALSE,
font = list(
color = "#343434",
size = 14,
face = "arial",
background = NULL,
strokeWidth = 2,
strokeColor = "#ffffff",
align = "middle"))
vn_obj <-
visPhysics(
graph = vn_obj,
stabilization = list(
enabled = TRUE,
onlyDynamicEdges = FALSE,
fit = TRUE))
vn_obj <-
visInteraction(
graph = vn_obj,
dragNodes = FALSE)
}
}
vn_obj
}
| /R/visnetwork.R | permissive | wush978/DiagrammeR | R | false | false | 5,173 | r | #' Render graph with visNetwork
#'
#' Render a graph object with the visNetwork R package.
#'
#' @inheritParams render_graph
#' @examples
#' \dontrun{
#' # Create a node data frame (ndf)
#' ndf <-
#' create_node_df(
#' n = 6,
#' label = TRUE,
#' fillcolor = c("lightgrey", "red", "orange",
#' "pink", "aqua", "yellow"),
#' shape = "dot",
#' size = c(20, 80, 40, 10, 30, 50),
#' type = c("1", "1", "1", "2", "2", "2"))
#'
#' # Create an edge data frame (edf)
#' edf <-
#' create_edge_df(
#' from = c(1, 2, 3, 4, 6, 5),
#' to = c(4, 3, 1, 3, 1, 4),
#' color = c("green", "green", "grey",
#' "grey", "blue", "blue"),
#' rel = "leading_to")
#'
#' # Create a graph object
#' graph <-
#' create_graph(
#' nodes_df = ndf,
#' edges_df = edf)
#'
#' visnetwork(graph)
#' }
#'
#' @import visNetwork
#' @export
visnetwork <- function(graph) {
# Extract node and edge data frames from the graph object
nodes <- graph %>% get_node_df()
edges <- graph %>% get_edge_df()
# Render an empty graph if no nodes or edges exist
if (graph %>% is_graph_empty()) {
nodes <- create_node_df(n = 1)
nodes <- nodes[-1, ]
edges <- create_edge_df(from = 1, to = 1)
edges <- edges[-1, ]
}
# Remove the 'pos' column, if it exists
if ("pos" %in% colnames(nodes)) {
nodes <- nodes[, -(which(colnames(nodes) %in% "pos"))]
}
# Modify names of columns in `nodes` for compatibility with
# visNetwork data frames for nodes
colnames(nodes)[which(colnames(nodes) == "nodes")] <- "id"
colnames(nodes)[which(colnames(nodes) == "type")] <- "group"
colnames(nodes)[which(colnames(nodes) == "tooltip")] <- "title"
colnames(nodes)[which(colnames(nodes) == "fillcolor")] <- "color"
# Modify names of columns in 'edges' for compatibility with
# visNetwork data frames for edges
colnames(edges)[which(colnames(edges) == "rel")] <- "label"
colnames(edges)[which(colnames(edges) == "tooltip")] <- "title"
colnames(edges)[which(colnames(edges) == "penwidth")] <- "width"
# Obtain `fontcolor` values if the column exists in `edges`
if ("fontcolor" %in% colnames(edges)) {
fontcolor <- edges[, -(which(colnames(edges) %in% "fontcolor"))]
}
# Create the visNetwork object
if (all(c("x", "y") %in% colnames(nodes)) == FALSE) {
if (nrow(graph$edges_df) == 0) {
vn_obj <- visNetwork(nodes = nodes)
}
if (nrow(graph$edges_df) > 0) {
vn_obj <- visNetwork(nodes = nodes, edges = edges)
if (is_graph_directed(graph)) {
vn_obj <-
visEdges(
graph = vn_obj,
arrows = list(
to = list(
enabled = TRUE,
scaleFactor = 1)))
}
if (is_graph_undirected(graph)) {
vn_obj <-
visEdges(
graph = vn_obj,
arrows = list(
to = list(
enabled = FALSE,
scaleFactor = 1)))
}
}
vn_obj <-
visPhysics(
graph = vn_obj,
solver = "barnesHut",
stabilization = list(
enabled = TRUE,
onlyDynamicEdges = FALSE,
fit = TRUE))
vn_obj <-
visLayout(
graph = vn_obj,
improvedLayout = TRUE)
}
if (all(c("x", "y") %in% colnames(nodes))) {
# Reverse y values
nodes$y <- -as.numeric(nodes$y)
if (is.null(graph$edges_df)) {
vn_obj <- visNetwork(nodes = nodes)
vn_obj <-
visNodes(
graph = vn_obj,
physics = FALSE,
fixed = FALSE)
vn_obj <-
visPhysics(
graph = vn_obj,
stabilization = list(
enabled = FALSE,
onlyDynamicEdges = FALSE,
fit = TRUE))
vn_obj <-
visInteraction(
graph = vn_obj,
dragNodes = FALSE)
}
if (nrow(graph$edges_df) > 0) {
if ("arrow" %in% colnames(edges)) {
if (all(edges[which(colnames(edges) %in% "arrow")] == FALSE)) {
arrows_for_edges <- FALSE
} else {
arrows_for_edges <- FALSE
}
} else {
arrows_for_edges <- FALSE
}
vn_obj <-
visNetwork(
nodes = nodes,
edges = edges)
vn_obj <-
visNodes(
graph = vn_obj,
physics = FALSE,
fixed = FALSE)
vn_obj <-
visEdges(
graph = vn_obj,
arrows = list(
to =
list(
enabled = ifelse(arrows_for_edges, TRUE, FALSE),
scaleFactor = 1)),
smooth = FALSE,
font = list(
color = "#343434",
size = 14,
face = "arial",
background = NULL,
strokeWidth = 2,
strokeColor = "#ffffff",
align = "middle"))
vn_obj <-
visPhysics(
graph = vn_obj,
stabilization = list(
enabled = TRUE,
onlyDynamicEdges = FALSE,
fit = TRUE))
vn_obj <-
visInteraction(
graph = vn_obj,
dragNodes = FALSE)
}
}
vn_obj
}
|
directory = '/Volumes/X/out/GR/100604GR'
setwd(directory)
source("/Volumes/X/code/R/Functions.R")
load(paste(directory, "/d.Rfile", sep=""))
load(paste(directory, "/gr6.Rfile", sep="")) #growth rate based on 6 hours
load( paste(directory, "/md.Rfile", sep=""))
cf <- md$cenfield
md <- md$mindist
x <- md; x[] <- 0
x[which(md < 35)] <- 1
x[which(cf == 0)] <- 1
gr$colparam[which(x == 1),1] <- NA
a <- log(d$areas[,1:6]) #log areas matrix
b <- a
for(i in 1:(ncol(a)-1)){
b[,i] <- a[,i+1] - a[,i]
}
b <- b[,1:5]
b[which(is.infinite(b))] <- NA
b[which(is.nan(b))] <- NA
n <- which(is.na(gr$colparam[,1]))
f <- d$fint[,1:6]
f <- f/d$areas[,1:6]
fm <- apply(f, 1, mean, na.rm=T)
fm[which(is.nan(fm))] <- NA
b[n,] <- NA; a[n,] <- NA; f[n,] <- NA
n <- unique(d$condition.names)
n <- n[which(! n == 0)]
w <- which(d$condition.names == n[1])
wl <- NULL
for(j in 1:length(w)){
wl <- c(wl, d$well.list[[names(w)[j]]])
}
tsl <- wl
w <- which(d$condition.names == n[2])
wl <- NULL
for(j in 1:length(w)){
wl <- c(wl, d$well.list[[names(w)[j]]])
}
tma <- wl
#binning TSL1
bins <- c(0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.7)
cp <- gr$colparam[,1]
wl <- list()
wl[[1]] <- which(! is.na(cp) & 1:length(cp) %in% tsl)
for(i in 1:(length(bins) - 1)){
wl[[i+1]] <- which(cp >= bins[i] & cp < bins[i +1] & 1:length(cp) %in% tsl)
names(wl)[i+1] <- paste(bins[i],"-",bins[i+1])
}
names(wl)[1] <- "All"
tsl.colors <- colorRampPalette(c(colors()[258],"grey", "dark magenta"))
source("/Volumes/X/code/R/Barplot.R")
pdf(file = "~/Documents/Papers/Bethedging/figureparts/fig2c.pdf", height = 6, width = 5.6)
par(mai = c(2.3,1.5,.5,.5), lwd = 2)
i=6
Barplot(f[,i]*0.45, wl, asterix = T, cex.names = 1.3, ylim = c(200, 375), comparison = 1, space = 0.2, color = c("grey", tsl.colors(6)), density = NA, ylab = expression("TSL1-GFP " (counts / mu*m^2)), xlab = expression("Specific growth rate " (h^-1)), horizontal.labels = F, xlab.adjustment = 4, ylab.adjustment = -1, cex.lab = 1.3, asterix.bins = c(.01, 1e-5, 1e-10, 0))
dev.off()
| /LevySiegalData/Lawless/Fig2c.R | no_license | lwlss/discstoch | R | false | false | 2,014 | r |
directory = '/Volumes/X/out/GR/100604GR'
setwd(directory)
source("/Volumes/X/code/R/Functions.R")
load(paste(directory, "/d.Rfile", sep=""))
load(paste(directory, "/gr6.Rfile", sep="")) #growth rate based on 6 hours
load( paste(directory, "/md.Rfile", sep=""))
cf <- md$cenfield
md <- md$mindist
x <- md; x[] <- 0
x[which(md < 35)] <- 1
x[which(cf == 0)] <- 1
gr$colparam[which(x == 1),1] <- NA
a <- log(d$areas[,1:6]) #log areas matrix
b <- a
for(i in 1:(ncol(a)-1)){
b[,i] <- a[,i+1] - a[,i]
}
b <- b[,1:5]
b[which(is.infinite(b))] <- NA
b[which(is.nan(b))] <- NA
n <- which(is.na(gr$colparam[,1]))
f <- d$fint[,1:6]
f <- f/d$areas[,1:6]
fm <- apply(f, 1, mean, na.rm=T)
fm[which(is.nan(fm))] <- NA
b[n,] <- NA; a[n,] <- NA; f[n,] <- NA
n <- unique(d$condition.names)
n <- n[which(! n == 0)]
w <- which(d$condition.names == n[1])
wl <- NULL
for(j in 1:length(w)){
wl <- c(wl, d$well.list[[names(w)[j]]])
}
tsl <- wl
w <- which(d$condition.names == n[2])
wl <- NULL
for(j in 1:length(w)){
wl <- c(wl, d$well.list[[names(w)[j]]])
}
tma <- wl
#binning TSL1
bins <- c(0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.7)
cp <- gr$colparam[,1]
wl <- list()
wl[[1]] <- which(! is.na(cp) & 1:length(cp) %in% tsl)
for(i in 1:(length(bins) - 1)){
wl[[i+1]] <- which(cp >= bins[i] & cp < bins[i +1] & 1:length(cp) %in% tsl)
names(wl)[i+1] <- paste(bins[i],"-",bins[i+1])
}
names(wl)[1] <- "All"
tsl.colors <- colorRampPalette(c(colors()[258],"grey", "dark magenta"))
source("/Volumes/X/code/R/Barplot.R")
pdf(file = "~/Documents/Papers/Bethedging/figureparts/fig2c.pdf", height = 6, width = 5.6)
par(mai = c(2.3,1.5,.5,.5), lwd = 2)
i=6
Barplot(f[,i]*0.45, wl, asterix = T, cex.names = 1.3, ylim = c(200, 375), comparison = 1, space = 0.2, color = c("grey", tsl.colors(6)), density = NA, ylab = expression("TSL1-GFP " (counts / mu*m^2)), xlab = expression("Specific growth rate " (h^-1)), horizontal.labels = F, xlab.adjustment = 4, ylab.adjustment = -1, cex.lab = 1.3, asterix.bins = c(.01, 1e-5, 1e-10, 0))
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/zxx.R
\name{lunique}
\alias{lunique}
\title{Number of unique values}
\usage{
lunique(x, na.rm = FALSE)
}
\arguments{
\item{x}{a vector}
\item{na.rm}{logical; if \code{TRUE}, \code{NA} will not be counted as a
unique level (default is to include \code{NA}s)}
}
\description{
Number of unique values
}
\examples{
x <- c(1:5, NA)
lunique(factor(x))
lunique(x, TRUE)
}
| /man/lunique.Rd | no_license | raredd/rawr | R | false | true | 445 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/zxx.R
\name{lunique}
\alias{lunique}
\title{Number of unique values}
\usage{
lunique(x, na.rm = FALSE)
}
\arguments{
\item{x}{a vector}
\item{na.rm}{logical; if \code{TRUE}, \code{NA} will not be counted as a
unique level (default is to include \code{NA}s)}
}
\description{
Number of unique values
}
\examples{
x <- c(1:5, NA)
lunique(factor(x))
lunique(x, TRUE)
}
|
#' Creates the Inventory Detail Data Table
#'
#' This function creates a Inventory Detail Data Table.
#'
#' Requires: DT, data.table
#'
#' @param inv_table The inventory table to generate the report.
#' @param rem_flag The flag to indicate whether the Edit Button should be removed from output.
#' @return An Inventory Detail Data Table.
#' @export
#' @examples
#'
Inventory_Table <- function(inv_table, rem_flag = FALSE) {
if(nrow(inv_table) == 0) { inv_DT <- NULL ; return(inv_DT) }
#
vals <- reactiveValues()
button_def <- "<button id=\"XXXX\" type=\"button\" class=\"btn btn-default action-button\" style=\"color: #565455; background-color: #fbe7e9; border-color: #565455;\"
onclick=\"Shiny.onInputChange("lastClick", this.id)\">View/Edit</button>"
t_data <- data.table::data.table(inv_table %>%
mutate(edit_btns = button_def) %>%
select(39, 2, 5, 3, 4, 6:38))
for (i in 1:nrow(t_data)) {
t_data$edit_btns[i] <- gsub("XXXX", paste0("editbtn_", t_data$sku[i]), t_data$edit_btns[i])
}
if(rem_flag) { t_data$edit_btns <- "" }
vals$Data <- t_data
###
file_name <- paste("Inventory-Table", as.character(Sys.Date()), sep = "-")
dt_col_names <- c("",
"SKU", "Title", "Status", "Date Added", "Description", "Department",
"Category", "Sub-Category", "Brand", "Color", "Size", "New With Tags",
"Item Source", "Bin Number", "Item Cost", "Listing Price", "List Poshmark", "Poshmark Date",
"List Mercari", "Mercari Date", "List Ebay", "Ebay Date", "List Thredup", "Thredup Date",
"List Tradesy", "Tradesy Date", "Sold Date", "Sold Site", "Bundled", "Sale Price",
"Earnings", "Profit", "Other Info", "Last Update", "Last Action", "Remove Date",
"Remove Reason")
DT <- vals$Data
inv_DT <- DT::datatable(DT,
escape = FALSE,
selection = 'single',
rownames = FALSE,
editable = FALSE,
filter = list(position = 'top', plain = TRUE, clear = TRUE),
colnames = dt_col_names,
extensions = c('Buttons', 'FixedColumns'),
options = list(dom = 'Blfrtip',
columnDefs = list(
list(
targets = c(5, 33),
render = DT::JS(
"function(data, type, row, meta) {",
"return type === 'display' && data.length > 30 ?",
"'<span title=\"' + data + '\">' + data.substr(0, 30) + '...</span>' : data;",
"}")
),
list(
searchable = FALSE,
targets = c(0)
)
),
initComplete = DT::JS("function(settings, json)
{","$(this.api().table().header()).css({'background-color': '#fbe7e9', 'color': '#404041'});","}"),
scroller = TRUE,
scrollX = TRUE,
scrollY = 700,
fixedColumns = list(leftColumns = 4),
buttons = list(list(extend = 'copy'),
list(extend = 'csv', filename = file_name),
list(extend = 'excel', filename = file_name),
list(extend = 'print')),
lengthMenu = list(c(20, 40, 60, -1), c(20, 40, 60, "All"))),
class = "display nowrap") %>%
DT::formatCurrency(c('item_cost', 'listing_price', 'sell_price', 'net_earnings', 'net_profit'))
###
return(inv_DT)
}
| /rsellDT/R/Inventory_Table.R | no_license | t2tech-corp/Rsell-Packages | R | false | false | 4,787 | r | #' Creates the Inventory Detail Data Table
#'
#' This function creates a Inventory Detail Data Table.
#'
#' Requires: DT, data.table
#'
#' @param inv_table The inventory table to generate the report.
#' @param rem_flag The flag to indicate whether the Edit Button should be removed from output.
#' @return An Inventory Detail Data Table.
#' @export
#' @examples
#'
Inventory_Table <- function(inv_table, rem_flag = FALSE) {
if(nrow(inv_table) == 0) { inv_DT <- NULL ; return(inv_DT) }
#
vals <- reactiveValues()
button_def <- "<button id=\"XXXX\" type=\"button\" class=\"btn btn-default action-button\" style=\"color: #565455; background-color: #fbe7e9; border-color: #565455;\"
onclick=\"Shiny.onInputChange("lastClick", this.id)\">View/Edit</button>"
t_data <- data.table::data.table(inv_table %>%
mutate(edit_btns = button_def) %>%
select(39, 2, 5, 3, 4, 6:38))
for (i in 1:nrow(t_data)) {
t_data$edit_btns[i] <- gsub("XXXX", paste0("editbtn_", t_data$sku[i]), t_data$edit_btns[i])
}
if(rem_flag) { t_data$edit_btns <- "" }
vals$Data <- t_data
###
file_name <- paste("Inventory-Table", as.character(Sys.Date()), sep = "-")
dt_col_names <- c("",
"SKU", "Title", "Status", "Date Added", "Description", "Department",
"Category", "Sub-Category", "Brand", "Color", "Size", "New With Tags",
"Item Source", "Bin Number", "Item Cost", "Listing Price", "List Poshmark", "Poshmark Date",
"List Mercari", "Mercari Date", "List Ebay", "Ebay Date", "List Thredup", "Thredup Date",
"List Tradesy", "Tradesy Date", "Sold Date", "Sold Site", "Bundled", "Sale Price",
"Earnings", "Profit", "Other Info", "Last Update", "Last Action", "Remove Date",
"Remove Reason")
DT <- vals$Data
inv_DT <- DT::datatable(DT,
escape = FALSE,
selection = 'single',
rownames = FALSE,
editable = FALSE,
filter = list(position = 'top', plain = TRUE, clear = TRUE),
colnames = dt_col_names,
extensions = c('Buttons', 'FixedColumns'),
options = list(dom = 'Blfrtip',
columnDefs = list(
list(
targets = c(5, 33),
render = DT::JS(
"function(data, type, row, meta) {",
"return type === 'display' && data.length > 30 ?",
"'<span title=\"' + data + '\">' + data.substr(0, 30) + '...</span>' : data;",
"}")
),
list(
searchable = FALSE,
targets = c(0)
)
),
initComplete = DT::JS("function(settings, json)
{","$(this.api().table().header()).css({'background-color': '#fbe7e9', 'color': '#404041'});","}"),
scroller = TRUE,
scrollX = TRUE,
scrollY = 700,
fixedColumns = list(leftColumns = 4),
buttons = list(list(extend = 'copy'),
list(extend = 'csv', filename = file_name),
list(extend = 'excel', filename = file_name),
list(extend = 'print')),
lengthMenu = list(c(20, 40, 60, -1), c(20, 40, 60, "All"))),
class = "display nowrap") %>%
DT::formatCurrency(c('item_cost', 'listing_price', 'sell_price', 'net_earnings', 'net_profit'))
###
return(inv_DT)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SingleCellRNASeq.R
\name{remove_unwanted_confounders}
\alias{remove_unwanted_confounders}
\title{Removing unwanted confounders using limma}
\usage{
remove_unwanted_confounders(
object,
residualModelFormulaStr = "~UMI_count+percent_mito",
preserved_feature = "",
block.gene.size = 2000
)
}
\arguments{
\item{object}{The SingCellaR object.}
\item{residualModelFormulaStr}{The model for removing confounder variables.}
\item{preserved_feature}{is a defined preserved variable such as a cell genotype.}
\item{block.gene.size}{is the number of genes in each processing block.}
}
\description{
Removing unwanted confounders using limma
}
| /man/remove_unwanted_confounders.Rd | no_license | supatt-lab/SingCellaR | R | false | true | 721 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SingleCellRNASeq.R
\name{remove_unwanted_confounders}
\alias{remove_unwanted_confounders}
\title{Removing unwanted confounders using limma}
\usage{
remove_unwanted_confounders(
object,
residualModelFormulaStr = "~UMI_count+percent_mito",
preserved_feature = "",
block.gene.size = 2000
)
}
\arguments{
\item{object}{The SingCellaR object.}
\item{residualModelFormulaStr}{The model for removing confounder variables.}
\item{preserved_feature}{is a defined preserved variable such as a cell genotype.}
\item{block.gene.size}{is the number of genes in each processing block.}
}
\description{
Removing unwanted confounders using limma
}
|
/plugins/MacAU/VinylDither/VinylDither.r | permissive | airwindows/airwindows | R | false | false | 3,261 | r | ||
0ab81a1ab1e9919ab894c5521f77ba2c dungeon_i10-m10-u5-v0.pddl_planlen=129.qdimacs 22196 69188 | /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Database/Kronegger-Pfandler-Pichler/dungeon/dungeon_i10-m10-u5-v0.pddl_planlen=129/dungeon_i10-m10-u5-v0.pddl_planlen=129.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 91 | r | 0ab81a1ab1e9919ab894c5521f77ba2c dungeon_i10-m10-u5-v0.pddl_planlen=129.qdimacs 22196 69188 |
## The two functions defined below demonstrate how to use R's lexical scoping
## features to cache outputs from time-consuming computations. In particular
## the functions below demonstrate the computation, caching, and retrieval of
## the inverse of a matrix.
## makeCacheMatrix creates a special "matrix". It creates a list
## containing functions for {setting|getting} the {value|inverse} of
## the matrix
makeCacheMatrix <- function(x = matrix()) {
invX <- NULL
set <- function(y) {
x <<- y # set the matrix value
invX <<- NULL
}
get <- function() x # get the matrix value
setInv <- function(invY) invX <<- invY # set the inverse
getInv <- function() invX # get the inverse
list(set = set, get = get, setInv = setInv, getInv = getInv)
}
## cacheSolve will take the "matrix" produced by makeCacheMatrix and
## will return the inverse of the matrix. If the inverse has already
## been computed and cached, cacheSolve will simply return this
## cached inverse; otherwise, it will solve for the inverse, and then
## cache its value for future use.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
# Try getting the cached version
invX <- x$getInv()
if(!is.null(invX)) {
message("getting cached inverse")
return(invX)
}
# cached version doesn't exist so, solve for the inverse of 'x'.
data <- x$get()
# Note: Assumes 'x' is invertable. Also, pass optional arguments via '...'
invX <- solve(data,...)
x$setInv(invX)
invX
}
| /cachematrix.R | no_license | blubarry/ProgrammingAssignment2 | R | false | false | 1,569 | r | ## The two functions defined below demonstrate how to use R's lexical scoping
## features to cache outputs from time-consuming computations. In particular
## the functions below demonstrate the computation, caching, and retrieval of
## the inverse of a matrix.
## makeCacheMatrix creates a special "matrix". It creates a list
## containing functions for {setting|getting} the {value|inverse} of
## the matrix
makeCacheMatrix <- function(x = matrix()) {
invX <- NULL
set <- function(y) {
x <<- y # set the matrix value
invX <<- NULL
}
get <- function() x # get the matrix value
setInv <- function(invY) invX <<- invY # set the inverse
getInv <- function() invX # get the inverse
list(set = set, get = get, setInv = setInv, getInv = getInv)
}
## cacheSolve will take the "matrix" produced by makeCacheMatrix and
## will return the inverse of the matrix. If the inverse has already
## been computed and cached, cacheSolve will simply return this
## cached inverse; otherwise, it will solve for the inverse, and then
## cache its value for future use.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
# Try getting the cached version
invX <- x$getInv()
if(!is.null(invX)) {
message("getting cached inverse")
return(invX)
}
# cached version doesn't exist so, solve for the inverse of 'x'.
data <- x$get()
# Note: Assumes 'x' is invertable. Also, pass optional arguments via '...'
invX <- solve(data,...)
x$setInv(invX)
invX
}
|
## ui.R - Prediction App
library(shiny)
shinyUI(
pageWithSidebar(
#Title
headerPanel("Pitching Metrics Calulator"),
#Inputs
sidebarPanel(
p(h5("Enter the pitcher's statistics for the game and hit 'Submit' to calculate the metrics.")),
numericInput('outs','Outs Recorded',1,min=0,max=5000,step=1),
numericInput('runs','Earned Runs',0,min=0,max=5000,step=1),
numericInput('hits','Hits Allowed',0,min=0,max=5000,step=1),
numericInput('bb','Base on Balls',0,min=0,max=5000,step=1),
numericInput('k','Strike-outs',0,min=0,max=5000,step=1),
numericInput('pitches','Pitches Thrown',0,min=0,max=5000,step=1),
submitButton('Submit'),
br(),
img(src="baseball.jpg")
),
#Outputs
mainPanel(
h3('Results'),
h4('Innings pitched:'),
verbatimTextOutput("innings"),
h4('Earned Run Average:'),
h5('Total runs allowed per 9 innings'),
verbatimTextOutput("era"),
h4('WHIP:'),
h5('Total walks and hits allowed per inning'),
verbatimTextOutput("whip"),
h4('Power Finesse Ratio:'),
h5('Total strike-outs and walks allowed per inning'),
verbatimTextOutput("pfr"),
h4('Was this a Quality Start?'),
h5('Quality start is an appearance of at least 6 innings, while allowing no more than 3 runs.'),
verbatimTextOutput("qualitystart")
)
)) | /ui.R | no_license | brianychan/dataproducts | R | false | false | 1,469 | r | ## ui.R - Prediction App
library(shiny)
shinyUI(
pageWithSidebar(
#Title
headerPanel("Pitching Metrics Calulator"),
#Inputs
sidebarPanel(
p(h5("Enter the pitcher's statistics for the game and hit 'Submit' to calculate the metrics.")),
numericInput('outs','Outs Recorded',1,min=0,max=5000,step=1),
numericInput('runs','Earned Runs',0,min=0,max=5000,step=1),
numericInput('hits','Hits Allowed',0,min=0,max=5000,step=1),
numericInput('bb','Base on Balls',0,min=0,max=5000,step=1),
numericInput('k','Strike-outs',0,min=0,max=5000,step=1),
numericInput('pitches','Pitches Thrown',0,min=0,max=5000,step=1),
submitButton('Submit'),
br(),
img(src="baseball.jpg")
),
#Outputs
mainPanel(
h3('Results'),
h4('Innings pitched:'),
verbatimTextOutput("innings"),
h4('Earned Run Average:'),
h5('Total runs allowed per 9 innings'),
verbatimTextOutput("era"),
h4('WHIP:'),
h5('Total walks and hits allowed per inning'),
verbatimTextOutput("whip"),
h4('Power Finesse Ratio:'),
h5('Total strike-outs and walks allowed per inning'),
verbatimTextOutput("pfr"),
h4('Was this a Quality Start?'),
h5('Quality start is an appearance of at least 6 innings, while allowing no more than 3 runs.'),
verbatimTextOutput("qualitystart")
)
)) |
# This contains the main fitting functions
# Class definition
#setOldClass("nls")
#wFit<-setClass("wFit",
# slots = list(
# accuracy = "numeric",
# fit = "nls",
# df = "data.frame",
# w = "numeric",
# fitinfo = "list",
# modeltype = "character"
# ))
#
wFit<-setClass("wFit",
slots = list(
data = "list"
))
# show method
setMethod("show",
"wFit",
function(object) {
cat("Object of class",class(object),"(Weber Fit) \n")
cat("Model type:", object@data$modeltype,"\n")
cat("Accuracy:", object@data$accuracy,"\n")
cat("Weber fraction:", object@data$w)
})
setMethod("$", signature = "wFit",
function(x, name) {
returnval = x@data[[name]]
names(returnval) = name
return(returnval)}
)
setMethod("names", signature = "wFit",
function(x) { return(names(x@data)) }
)
GeneratePlotData <- function(w,x) 0.5 * (1 + VGAM::erf((x - 1) / (sqrt(2) * w * sqrt( (x^2) + 1))))
#setMethod("$",
# "wFit",
# function(x,name){
# if(name == "w"){
# returnval = x@w
# names(returnval) = "w"
# return(returnval)
# }
# if(name == "accuracy"){
# returnval = x@accuracy
# names(returnval) = "accuracy"
# return(returnval)
# }
# })
#
# function that does the actual fitting
FitLinearModel<-function(ratio, outcomes, start.values = c(1,4), lower.limit = .55){
`%>%` = tidyr::`%>%`
# calculate the accuracy
df = data.frame(x = ratio, y = outcomes)
accuracy = df %>% dplyr::mutate(acc = dplyr::case_when(x < 1 ~ 1 - y, x > 1 ~ y)) %>% dplyr::pull(acc) %>% mean(na.rm = T)
fit = 'No Fit'
class(fit) <- "nls"
range = NULL
vari = NULL
w = NA_real_
if(accuracy > lower.limit && accuracy < 1){
fits <- purrr::map(start.values, function(s) try(expr = {
nls(formula = y ~ 0.5 * (1 + VGAM::erf((x - 1) / (sqrt(2) * w * sqrt( (x^2) + 1)))), start = c(w = s), trace = F,
data = df, control = list(maxiter = 50000))
},silent = T))
fits <- fits[purrr::map_lgl(fits, function(x) length(x) > 1)]
fit <- fits[[which.min(purrr::map_dbl(fits, function(x) deviance(x)))]] # take the one that fits best
range = purrr::map_dbl(fits, function(x) coef(x)) %>% range() %>% diff()
vari = purrr::map_dbl(fits, function(x) coef(x)) %>% sd()
w = coef(fit)
}
new(Class = 'wFit',
data = list(accuracy = accuracy,
fit = fit,
df = df,
w = w,
fitinfo = list(range = range,
variability = vari,
lower.limit = lower.limit,
start.values = start.values),
modeltype = "linear model"))
}
| /WeberFit/R/fitting_funcs.R | no_license | ljcolling/weberfit | R | false | false | 3,025 | r | # This contains the main fitting functions
# Class definition
#setOldClass("nls")
#wFit<-setClass("wFit",
# slots = list(
# accuracy = "numeric",
# fit = "nls",
# df = "data.frame",
# w = "numeric",
# fitinfo = "list",
# modeltype = "character"
# ))
#
wFit<-setClass("wFit",
slots = list(
data = "list"
))
# show method
setMethod("show",
"wFit",
function(object) {
cat("Object of class",class(object),"(Weber Fit) \n")
cat("Model type:", object@data$modeltype,"\n")
cat("Accuracy:", object@data$accuracy,"\n")
cat("Weber fraction:", object@data$w)
})
setMethod("$", signature = "wFit",
function(x, name) {
returnval = x@data[[name]]
names(returnval) = name
return(returnval)}
)
setMethod("names", signature = "wFit",
function(x) { return(names(x@data)) }
)
GeneratePlotData <- function(w,x) 0.5 * (1 + VGAM::erf((x - 1) / (sqrt(2) * w * sqrt( (x^2) + 1))))
#setMethod("$",
# "wFit",
# function(x,name){
# if(name == "w"){
# returnval = x@w
# names(returnval) = "w"
# return(returnval)
# }
# if(name == "accuracy"){
# returnval = x@accuracy
# names(returnval) = "accuracy"
# return(returnval)
# }
# })
#
# function that does the actual fitting
FitLinearModel<-function(ratio, outcomes, start.values = c(1,4), lower.limit = .55){
`%>%` = tidyr::`%>%`
# calculate the accuracy
df = data.frame(x = ratio, y = outcomes)
accuracy = df %>% dplyr::mutate(acc = dplyr::case_when(x < 1 ~ 1 - y, x > 1 ~ y)) %>% dplyr::pull(acc) %>% mean(na.rm = T)
fit = 'No Fit'
class(fit) <- "nls"
range = NULL
vari = NULL
w = NA_real_
if(accuracy > lower.limit && accuracy < 1){
fits <- purrr::map(start.values, function(s) try(expr = {
nls(formula = y ~ 0.5 * (1 + VGAM::erf((x - 1) / (sqrt(2) * w * sqrt( (x^2) + 1)))), start = c(w = s), trace = F,
data = df, control = list(maxiter = 50000))
},silent = T))
fits <- fits[purrr::map_lgl(fits, function(x) length(x) > 1)]
fit <- fits[[which.min(purrr::map_dbl(fits, function(x) deviance(x)))]] # take the one that fits best
range = purrr::map_dbl(fits, function(x) coef(x)) %>% range() %>% diff()
vari = purrr::map_dbl(fits, function(x) coef(x)) %>% sd()
w = coef(fit)
}
new(Class = 'wFit',
data = list(accuracy = accuracy,
fit = fit,
df = df,
w = w,
fitinfo = list(range = range,
variability = vari,
lower.limit = lower.limit,
start.values = start.values),
modeltype = "linear model"))
}
|
#' Imbalance distance caculation
#'
#' This functions help function knn_flow() use imbalance distance in caculating similarity
#' among traffic flows.
#'
#' Imbalance distance means that values of flow in flow base is treated differently, which
#' depends on whether it's higher than object flow or otherwise. Because the punishment of
#' forecast should be different whether the forecast tends to be higher or smaller. It's more
#' tolerable that make higher forecast than lower.
#'
#' @param obj The object flow (fragment) to be forecasted.
#' @param base The flow base (fragment) used by KNN method.
#' @return The distance between object flow and every flow in flow base as a dataframe with
#' the same order of flow in flow base.
#' @export
dist_imbalance <- function(obj,base){
n <- nrow(base)
s <- matrix(NA,n,1)
for (i in 1:nrow(base)) {
if(sum(base[i,(obj[1,]>base[i,])])>0){
s[i,1] <- sqrt(sum((base[i,(obj[1,]>base[i,])]-obj[1,(obj[1,]>base[i,])])**2))
} else{
s[i,1] = 0
}
}
rownames(s) <- rownames(base)
return(s)
}
| /R/dist_imbalance.R | no_license | ahorawzy/TFTSA | R | false | false | 1,066 | r | #' Imbalance distance caculation
#'
#' This functions help function knn_flow() use imbalance distance in caculating similarity
#' among traffic flows.
#'
#' Imbalance distance means that values of flow in flow base is treated differently, which
#' depends on whether it's higher than object flow or otherwise. Because the punishment of
#' forecast should be different whether the forecast tends to be higher or smaller. It's more
#' tolerable that make higher forecast than lower.
#'
#' @param obj The object flow (fragment) to be forecasted.
#' @param base The flow base (fragment) used by KNN method.
#' @return The distance between object flow and every flow in flow base as a dataframe with
#' the same order of flow in flow base.
#' @export
dist_imbalance <- function(obj,base){
n <- nrow(base)
s <- matrix(NA,n,1)
for (i in 1:nrow(base)) {
if(sum(base[i,(obj[1,]>base[i,])])>0){
s[i,1] <- sqrt(sum((base[i,(obj[1,]>base[i,])]-obj[1,(obj[1,]>base[i,])])**2))
} else{
s[i,1] = 0
}
}
rownames(s) <- rownames(base)
return(s)
}
|
\name{domPrior}
\alias{domPrior}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ ~~function to do ... ~~ }
\description{
~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
domPrior(params)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{params}{ ~~Describe \code{params} here~~ }
}
\details{
~~ If necessary, more details than the description above ~~
}
\value{
~Describe the value returned
If it is a LIST, use
\item{comp1 }{Description of 'comp1'}
\item{comp2 }{Description of 'comp2'}
...
}
\references{ ~put references to the literature/web site here ~ }
\author{ ~~who you are~~ }
\note{ ~~further notes~~
~Make other sections like Warning with \section{Warning }{....} ~
}
\seealso{ ~~objects to See Also as \code{\link{help}}, ~~~ }
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function (params)
{
prior <- c(params["null", 3] * params["null", 3], params["neg",
3] * params["null", 3], params["neg", 3] * params["neg",
3], params["pos", 3] * params["null", 3], params["pos",
3] * params["pos", 3])
return(normalizePrior(prior))
}
}
\keyword{ ~kwd1 }% at least one, from doc/KEYWORDS
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
| /KnockoutNets/man/domPrior.Rd | permissive | sverchkov/vaske-fgnem | R | false | false | 1,419 | rd | \name{domPrior}
\alias{domPrior}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ ~~function to do ... ~~ }
\description{
~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
domPrior(params)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{params}{ ~~Describe \code{params} here~~ }
}
\details{
~~ If necessary, more details than the description above ~~
}
\value{
~Describe the value returned
If it is a LIST, use
\item{comp1 }{Description of 'comp1'}
\item{comp2 }{Description of 'comp2'}
...
}
\references{ ~put references to the literature/web site here ~ }
\author{ ~~who you are~~ }
\note{ ~~further notes~~
~Make other sections like Warning with \section{Warning }{....} ~
}
\seealso{ ~~objects to See Also as \code{\link{help}}, ~~~ }
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function (params)
{
prior <- c(params["null", 3] * params["null", 3], params["neg",
3] * params["null", 3], params["neg", 3] * params["neg",
3], params["pos", 3] * params["null", 3], params["pos",
3] * params["pos", 3])
return(normalizePrior(prior))
}
}
\keyword{ ~kwd1 }% at least one, from doc/KEYWORDS
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
rm(list=ls())
library(data.table)
library(quickPlot)
library(ggplot2)
InvTree <- data.table(read.csv("J:/!Workgrp/Inventory/MPB regeneration_WenliGrp/compiled data/From Erafor/Erafor_layer.csv"))
InvStand <- data.table(read.csv("J:/!Workgrp/Inventory/MPB regeneration_WenliGrp/compiled data/From Erafor/InvStand.csv"))
##remove non-regeneration plots in InvTree and InvStand
n <- InvStand[Regen %in% "0", PlotNum]
InvStand <- InvStand[Regen %in% "1"]
InvTree <- InvTree[!PlotNum %in% n]
##set color for species
SPcolor <- data.table(PL = "#56B4E9",
S = "#E69F00",
AT = "#D55E00",
E = "#999999",
B = "#F0E442",
F = "#009E73",
AC = "#CC79A7")
##############
## SBSmk #####
##############
SBSmkTree <- InvTree[BEC_sub_all %in% "SBSmk"]
SBSmkStand <- InvStand[BEC_sub_all %in% "SBSmk"]
SBSmk_Pine_PlotNum <- SBSmkTree[Status %in% 2003 & SP %in% "PL" & PCT >= 70, PlotNum]
SBSmkTree_Pine <- SBSmkTree[PlotNum %in% SBSmk_Pine_PlotNum]
SBSmkStand_Pine <- SBSmkStand[PlotNum %in% SBSmk_Pine_PlotNum]
SBSmkStand_Pine[, mean(TPH_R,na.rm = TRUE)]
#[1] 1928.713
SBSmkStand_Pine[, range(TPH_R,na.rm = TRUE)]
#[1] 200 9200
SBSmkStand_Pine[,sqrt(var(TPH_R, na.rm = TRUE))]
#[1] 1651.565
#Plot species combination
SBSmk_spcomp2003 <- ggplot(data = SBSmkTree_Pine[Status %in% "2003"])+
geom_bar(aes(x = as.character(PlotNum), y = PCT, fill = SP), stat = "identity", position = "fill")+
scale_fill_manual(values = SPcolor)+
labs(x = "Plot", y = "PCT by volume")+
theme(panel.background = element_blank(),
panel.grid = element_blank())
SBSmk_spcomp2019 <- ggplot(data = SBSmkTree_Pine[Status %in% "2019"])+
geom_bar(aes(x = as.character(PlotNum), y = PCT, fill = SP), stat = "identity", position = "fill")+
scale_fill_manual(values = SPcolor)+
labs(title = "VRI 2019 overstory species composition (SBSmk)", x = "Plot", y = "PCT by basal area")+
theme(panel.background = element_blank(),
panel.grid = element_blank())
SBSmk_spregen <- ggplot(data = SBSmkTree_Pine[Status %in% "Regen"])+
geom_bar(aes(x = as.character(PlotNum), y = Count, fill = SP), stat = "identity", position = "fill")+
scale_fill_manual(values = SPcolor)+
labs(title = "Post-survey regeneration composition (SBSmk)", x = "Plot", y = "PCT by stems tallied")+
theme(panel.background = element_blank(),
panel.grid = element_blank(),
axis.text.x = element_blank())
SBSdw_spcomp2019 <- ggplot(data = SBSdwTree_Pine[Status %in% "Post-survey"])+
geom_bar(aes(x = as.character(PlotNum), y = Count, fill = SP), stat = "identity", position = "fill")+
scale_fill_manual(values = SPcolor)+
labs(title = "Post-survey overstory species composition (SBSmk)", x = "Plot", y = "PCT by Stem tallied")+
theme(panel.background = element_blank(),
panel.grid = element_blank())
data <- SBSdw[Status %in% "2003",.(SP = paste(SP,PCT)),by= Plot2]
data[, SPcomp := Reduce(paste, SP), by=Plot2]
data[,SP := NULL]
data <- unique(data)
data <- data[, .N, by = SPcomp]
setorder(data,-N)
#density, age, height of regeneration
SBSmkTree_Pine[Status %in% "Regen", sum(TPH, na.rm = TRUE)/101, by = SP]
SBSmkTree_Pine[Status %in% "Regen", range(TPH, na.rm = TRUE), by = SP]
SBSmkTree_Pine[Status %in% "Regen", mean(Age, na.rm = TRUE), by = SP]
SBSmkTree_Pine[Status %in% "Regen", range(Age, na.rm = TRUE), by = SP]
SBSmkTree_Pine[Status %in% "Regen", mean(Ht, na.rm = TRUE), by = SP]
SBSmkTree_Pine[Status %in% "Regen", range(Ht, na.rm = TRUE), by = SP]
SBSmk_regenage <- ggplot(data = SBSmkTree_Pine[Status %in% "Regen"])+
geom_histogram(aes(x = Age, color = SP), fill = "white")+
scale_fill_manual(values = SPcolor)+
labs(title = "Histogram of Regen age (SBSmk)", x = "Age", y = "Frequency")+
theme(panel.background = element_blank(),
panel.grid = element_blank())
SBSmk_regenht <- ggplot(data = SBSmkTree_Pine[Status %in% "Regen"])+
geom_histogram(aes(x = Ht, color = SP), fill = "white")+
scale_fill_manual(values = SPcolor)+
labs(title = "Histogram of Regen ht (SBSmk)", x = "Age", y = "Frequency")+
theme(panel.background = element_blank(),
panel.grid = element_blank())
##############
## SBSdw #####
##############
SBSdwTree <- InvTree[BEC_sub_all %in% "SBSdw"]
SBSdwStand <- InvStand[BEC_sub_all %in% "SBSdw"]
##Pine dominant stands
SBSdw_Pine_PlotNum <- SBSdwTree[Status %in% 2003 & SP %in% "PL" & PCT >= 70, PlotNum]
SBSdwTree_Pine <- SBSdwTree[PlotNum %in% SBSdw_Pine_PlotNum]
SBSdwStand_Pine <- SBSdwStand[PlotNum %in% SBSdw_Pine_PlotNum]
SBSdwStand_Pine[, mean(TPH_R,na.rm = TRUE)]
#[1] 2178.313
SBSdwStand_Pine[, range(TPH_R,na.rm = TRUE)]
#[1] 200 12600
SBSdwStand_Pine[,sqrt(var(TPH_R))]
#[1] 2156.784
#Plot species composition
SBSdw_spcomp2003 <- ggplot(data = SBSdwTree_Pine[Status %in% "2003"])+
geom_bar(aes(x = as.character(PlotNum), y = PCT, fill = SP), stat = "identity", position = "fill")+
scale_fill_manual(values = SPcolor)+
labs(x = "Plot", y = "PCT by volume")+
theme(panel.background = element_blank(),
panel.grid = element_blank())
SBSdw_spcomp2019 <- ggplot(data = SBSdwTree_Pine[Status %in% "2019"])+
geom_bar(aes(x = as.character(PlotNum), y = PCT, fill = SP), stat = "identity", position = "fill")+
scale_fill_manual(values = SPcolor)+
labs(title = "VRI 2019 overstory species composition (SBSdw)", x = "Plot", y = "PCT by basal area")+
theme(panel.background = element_blank(),
panel.grid = element_blank())
SBSdw_spregen <- ggplot(data = SBSdwTree_Pine[Status %in% "Regen"])+
geom_bar(aes(x = as.character(PlotNum), y = Count, fill = SP), stat = "identity", position = "fill")+
scale_fill_manual(values = SPcolor)+
labs(title = "Post-survey regeneration composition (SBSdw)", x = "Plot", y = "PCT by stems tallied")+
theme(panel.background = element_blank(),
panel.grid = element_blank())
SBSdw_spcomp2019 <- ggplot(data = SBSdwTree_Pine[Status %in% "Post-survey"])+
geom_bar(aes(x = as.character(PlotNum), y = Count, fill = SP), stat = "identity", position = "fill")+
scale_fill_manual(values = SPcolor)+
labs(x = "Plot", y = "PCT by Stem tallied")+
theme(panel.background = element_blank(),
panel.grid = element_blank())
#density, age, height of regeneration
SBSdwTree_Pine[Status %in% "Regen", sum(TPH, na.rm = TRUE)/83, by = SP]
SBSdwTree_Pine[Status %in% "Regen", range(TPH, na.rm = TRUE), by = SP]
SBSdwTree_Pine[Status %in% "Regen", mean(Age, na.rm = TRUE), by = SP]
SBSdwTree_Pine[Status %in% "Regen", range(Age, na.rm = TRUE), by = SP]
SBSdwTree_Pine[Status %in% "Regen", mean(Ht, na.rm = TRUE), by = SP]
SBSdwTree_Pine[Status %in% "Regen", range(Ht, na.rm = TRUE), by = SP]
SBSdw_regenage <- ggplot(data = SBSdwTree_Pine[Status %in% "Regen"])+
geom_histogram(aes(x = Age, color = SP), fill = "white")+
scale_fill_manual(values = SPcolor)+
labs(title = "Histogram of Regen age (SBSdw)", x = "Age", y = "Frequency")+
theme(panel.background = element_blank(),
panel.grid = element_blank())
SBSdw_regenht <- ggplot(data = SBSdwTree_Pine[Status %in% "Regen"])+
geom_histogram(aes(x = Ht, color = SP), fill = "white")+
scale_fill_manual(values = SPcolor)+
labs(title = "Histogram of Regen ht (SBSdw)", x = "Age", y = "Frequency")+
theme(panel.background = element_blank(),
panel.grid = element_blank())
###post-MPB overstory species composition vs regeneration species composition
#SBSmk
a <- SBSmkTree_Pine[Status %in% "Regen", .(mean = sum(TPH, na.rm = TRUE)/101), by = SP]
b <- SBSmkTree_Pine[Status %in% "Post-survey", .(mean = sum(TPH, na.rm = TRUE)/101, Status = "Post-survey"), by = SP]
c <- rbind(a, b)
test <- ggplot(data = c)+
geom_bar(aes(x = as.character(Status), y = mean, fill = SP), stat = "identity", position = "fill")+
scale_fill_manual(values = SPcolor)+
labs(title = "Post-survey canopy versus regeneration species composition (SBSmk)", x = "Status", y = "PCT by stems tallied")+
theme(panel.background = element_blank(),
panel.grid = element_blank())
#SBSdw
d <- SBSdwTree_Pine[Status %in% "Regen", .(mean = sum(TPH, na.rm = TRUE)/101, Status = "Regen"), by = SP]
e <- SBSdwTree_Pine[Status %in% "Post-survey", .(mean = sum(TPH, na.rm = TRUE)/101, Status = "Post-survey"), by = SP]
f <- rbind(d,e)
test <- ggplot(data = f)+
geom_bar(aes(x = as.character(Status), y = mean, fill = SP), stat = "identity", position = "fill")+
scale_fill_manual(values = SPcolor)+
labs(title = "Post-survey canopy versus regeneration species composition (SBSdw)", x = "Status", y = "PCT by stems tallied")+
theme(panel.background = element_blank(),
panel.grid = element_blank())
| /rcodes/1.6. general information for overstory and understory attributes.R | permissive | Tubbz-alt/MPB-regeneration | R | false | false | 8,751 | r | rm(list=ls())
library(data.table)
library(quickPlot)
library(ggplot2)
InvTree <- data.table(read.csv("J:/!Workgrp/Inventory/MPB regeneration_WenliGrp/compiled data/From Erafor/Erafor_layer.csv"))
InvStand <- data.table(read.csv("J:/!Workgrp/Inventory/MPB regeneration_WenliGrp/compiled data/From Erafor/InvStand.csv"))
##remove non-regeneration plots in InvTree and InvStand
n <- InvStand[Regen %in% "0", PlotNum]
InvStand <- InvStand[Regen %in% "1"]
InvTree <- InvTree[!PlotNum %in% n]
##set color for species
SPcolor <- data.table(PL = "#56B4E9",
S = "#E69F00",
AT = "#D55E00",
E = "#999999",
B = "#F0E442",
F = "#009E73",
AC = "#CC79A7")
##############
## SBSmk #####
##############
SBSmkTree <- InvTree[BEC_sub_all %in% "SBSmk"]
SBSmkStand <- InvStand[BEC_sub_all %in% "SBSmk"]
SBSmk_Pine_PlotNum <- SBSmkTree[Status %in% 2003 & SP %in% "PL" & PCT >= 70, PlotNum]
SBSmkTree_Pine <- SBSmkTree[PlotNum %in% SBSmk_Pine_PlotNum]
SBSmkStand_Pine <- SBSmkStand[PlotNum %in% SBSmk_Pine_PlotNum]
SBSmkStand_Pine[, mean(TPH_R,na.rm = TRUE)]
#[1] 1928.713
SBSmkStand_Pine[, range(TPH_R,na.rm = TRUE)]
#[1] 200 9200
SBSmkStand_Pine[,sqrt(var(TPH_R, na.rm = TRUE))]
#[1] 1651.565
#Plot species combination
SBSmk_spcomp2003 <- ggplot(data = SBSmkTree_Pine[Status %in% "2003"])+
geom_bar(aes(x = as.character(PlotNum), y = PCT, fill = SP), stat = "identity", position = "fill")+
scale_fill_manual(values = SPcolor)+
labs(x = "Plot", y = "PCT by volume")+
theme(panel.background = element_blank(),
panel.grid = element_blank())
SBSmk_spcomp2019 <- ggplot(data = SBSmkTree_Pine[Status %in% "2019"])+
geom_bar(aes(x = as.character(PlotNum), y = PCT, fill = SP), stat = "identity", position = "fill")+
scale_fill_manual(values = SPcolor)+
labs(title = "VRI 2019 overstory species composition (SBSmk)", x = "Plot", y = "PCT by basal area")+
theme(panel.background = element_blank(),
panel.grid = element_blank())
SBSmk_spregen <- ggplot(data = SBSmkTree_Pine[Status %in% "Regen"])+
geom_bar(aes(x = as.character(PlotNum), y = Count, fill = SP), stat = "identity", position = "fill")+
scale_fill_manual(values = SPcolor)+
labs(title = "Post-survey regeneration composition (SBSmk)", x = "Plot", y = "PCT by stems tallied")+
theme(panel.background = element_blank(),
panel.grid = element_blank(),
axis.text.x = element_blank())
SBSdw_spcomp2019 <- ggplot(data = SBSdwTree_Pine[Status %in% "Post-survey"])+
geom_bar(aes(x = as.character(PlotNum), y = Count, fill = SP), stat = "identity", position = "fill")+
scale_fill_manual(values = SPcolor)+
labs(title = "Post-survey overstory species composition (SBSmk)", x = "Plot", y = "PCT by Stem tallied")+
theme(panel.background = element_blank(),
panel.grid = element_blank())
data <- SBSdw[Status %in% "2003",.(SP = paste(SP,PCT)),by= Plot2]
data[, SPcomp := Reduce(paste, SP), by=Plot2]
data[,SP := NULL]
data <- unique(data)
data <- data[, .N, by = SPcomp]
setorder(data,-N)
#density, age, height of regeneration
SBSmkTree_Pine[Status %in% "Regen", sum(TPH, na.rm = TRUE)/101, by = SP]
SBSmkTree_Pine[Status %in% "Regen", range(TPH, na.rm = TRUE), by = SP]
SBSmkTree_Pine[Status %in% "Regen", mean(Age, na.rm = TRUE), by = SP]
SBSmkTree_Pine[Status %in% "Regen", range(Age, na.rm = TRUE), by = SP]
SBSmkTree_Pine[Status %in% "Regen", mean(Ht, na.rm = TRUE), by = SP]
SBSmkTree_Pine[Status %in% "Regen", range(Ht, na.rm = TRUE), by = SP]
SBSmk_regenage <- ggplot(data = SBSmkTree_Pine[Status %in% "Regen"])+
geom_histogram(aes(x = Age, color = SP), fill = "white")+
scale_fill_manual(values = SPcolor)+
labs(title = "Histogram of Regen age (SBSmk)", x = "Age", y = "Frequency")+
theme(panel.background = element_blank(),
panel.grid = element_blank())
SBSmk_regenht <- ggplot(data = SBSmkTree_Pine[Status %in% "Regen"])+
geom_histogram(aes(x = Ht, color = SP), fill = "white")+
scale_fill_manual(values = SPcolor)+
labs(title = "Histogram of Regen ht (SBSmk)", x = "Age", y = "Frequency")+
theme(panel.background = element_blank(),
panel.grid = element_blank())
##############
## SBSdw #####
##############
SBSdwTree <- InvTree[BEC_sub_all %in% "SBSdw"]
SBSdwStand <- InvStand[BEC_sub_all %in% "SBSdw"]
##Pine dominant stands
SBSdw_Pine_PlotNum <- SBSdwTree[Status %in% 2003 & SP %in% "PL" & PCT >= 70, PlotNum]
SBSdwTree_Pine <- SBSdwTree[PlotNum %in% SBSdw_Pine_PlotNum]
SBSdwStand_Pine <- SBSdwStand[PlotNum %in% SBSdw_Pine_PlotNum]
SBSdwStand_Pine[, mean(TPH_R,na.rm = TRUE)]
#[1] 2178.313
SBSdwStand_Pine[, range(TPH_R,na.rm = TRUE)]
#[1] 200 12600
SBSdwStand_Pine[,sqrt(var(TPH_R))]
#[1] 2156.784
#Plot species composition
SBSdw_spcomp2003 <- ggplot(data = SBSdwTree_Pine[Status %in% "2003"])+
geom_bar(aes(x = as.character(PlotNum), y = PCT, fill = SP), stat = "identity", position = "fill")+
scale_fill_manual(values = SPcolor)+
labs(x = "Plot", y = "PCT by volume")+
theme(panel.background = element_blank(),
panel.grid = element_blank())
SBSdw_spcomp2019 <- ggplot(data = SBSdwTree_Pine[Status %in% "2019"])+
geom_bar(aes(x = as.character(PlotNum), y = PCT, fill = SP), stat = "identity", position = "fill")+
scale_fill_manual(values = SPcolor)+
labs(title = "VRI 2019 overstory species composition (SBSdw)", x = "Plot", y = "PCT by basal area")+
theme(panel.background = element_blank(),
panel.grid = element_blank())
SBSdw_spregen <- ggplot(data = SBSdwTree_Pine[Status %in% "Regen"])+
geom_bar(aes(x = as.character(PlotNum), y = Count, fill = SP), stat = "identity", position = "fill")+
scale_fill_manual(values = SPcolor)+
labs(title = "Post-survey regeneration composition (SBSdw)", x = "Plot", y = "PCT by stems tallied")+
theme(panel.background = element_blank(),
panel.grid = element_blank())
SBSdw_spcomp2019 <- ggplot(data = SBSdwTree_Pine[Status %in% "Post-survey"])+
geom_bar(aes(x = as.character(PlotNum), y = Count, fill = SP), stat = "identity", position = "fill")+
scale_fill_manual(values = SPcolor)+
labs(x = "Plot", y = "PCT by Stem tallied")+
theme(panel.background = element_blank(),
panel.grid = element_blank())
#density, age, height of regeneration
SBSdwTree_Pine[Status %in% "Regen", sum(TPH, na.rm = TRUE)/83, by = SP]
SBSdwTree_Pine[Status %in% "Regen", range(TPH, na.rm = TRUE), by = SP]
SBSdwTree_Pine[Status %in% "Regen", mean(Age, na.rm = TRUE), by = SP]
SBSdwTree_Pine[Status %in% "Regen", range(Age, na.rm = TRUE), by = SP]
SBSdwTree_Pine[Status %in% "Regen", mean(Ht, na.rm = TRUE), by = SP]
SBSdwTree_Pine[Status %in% "Regen", range(Ht, na.rm = TRUE), by = SP]
SBSdw_regenage <- ggplot(data = SBSdwTree_Pine[Status %in% "Regen"])+
geom_histogram(aes(x = Age, color = SP), fill = "white")+
scale_fill_manual(values = SPcolor)+
labs(title = "Histogram of Regen age (SBSdw)", x = "Age", y = "Frequency")+
theme(panel.background = element_blank(),
panel.grid = element_blank())
SBSdw_regenht <- ggplot(data = SBSdwTree_Pine[Status %in% "Regen"])+
geom_histogram(aes(x = Ht, color = SP), fill = "white")+
scale_fill_manual(values = SPcolor)+
labs(title = "Histogram of Regen ht (SBSdw)", x = "Age", y = "Frequency")+
theme(panel.background = element_blank(),
panel.grid = element_blank())
###post-MPB overstory species composition vs regeneration species composition
#SBSmk
a <- SBSmkTree_Pine[Status %in% "Regen", .(mean = sum(TPH, na.rm = TRUE)/101), by = SP]
b <- SBSmkTree_Pine[Status %in% "Post-survey", .(mean = sum(TPH, na.rm = TRUE)/101, Status = "Post-survey"), by = SP]
c <- rbind(a, b)
test <- ggplot(data = c)+
geom_bar(aes(x = as.character(Status), y = mean, fill = SP), stat = "identity", position = "fill")+
scale_fill_manual(values = SPcolor)+
labs(title = "Post-survey canopy versus regeneration species composition (SBSmk)", x = "Status", y = "PCT by stems tallied")+
theme(panel.background = element_blank(),
panel.grid = element_blank())
#SBSdw
d <- SBSdwTree_Pine[Status %in% "Regen", .(mean = sum(TPH, na.rm = TRUE)/101, Status = "Regen"), by = SP]
e <- SBSdwTree_Pine[Status %in% "Post-survey", .(mean = sum(TPH, na.rm = TRUE)/101, Status = "Post-survey"), by = SP]
f <- rbind(d,e)
test <- ggplot(data = f)+
geom_bar(aes(x = as.character(Status), y = mean, fill = SP), stat = "identity", position = "fill")+
scale_fill_manual(values = SPcolor)+
labs(title = "Post-survey canopy versus regeneration species composition (SBSdw)", x = "Status", y = "PCT by stems tallied")+
theme(panel.background = element_blank(),
panel.grid = element_blank())
|
hmsc.BinomialLogit <-
function(data,param=NULL,priors=NULL,ncount,niter=2000,nburn=1000,thin=1,verbose=TRUE){
#### F. Guillaume Blanchet - March 2017
##########################################################################################
### General checks
if(niter < nburn){
stop("'niter' should be equal or larger than 'burning'")
}
### Handle verbose arguments
verbose<-iniVerbose(verbose,niter)
### A few basic objects
nsp<-ncol(data$Y)
nsite<-nrow(data$Y)
### Transform each data into a matrix
Y<-as.matrix(data$Y)
if(any(names(data)=="X")){
nparamX<-ncol(data$X)
X<-as.matrix(data$X)
}
if(any(names(data)=="Tr")){
Tr<-as.matrix(data$Tr)
}
if(any(names(data)=="Phylo")){
Phylo<-as.matrix(data$Phylo)
### Construct inverse correlation phylogeny matrix
iPhylo<-cov2cor(chol2inv(chol(Phylo)))
}
#====================================================
### Initiate prior values if they have not been given
#====================================================
if(is.null(priors)){
priors<-as.HMSCprior(data,family="probit")
}
#=================================================================
### Initiate starting parameter values if they have not been given
#=================================================================
if(is.null(param)){
param<-as.HMSCparam(data,priors)
}
### Degrees of freedom for Wishart distribution to update precX and varX (removed from the for loop)
if(!is.null(param)){
varXDf<-priors$param$varXDf+nsp
}
#====================================================
### Initiate basic objects to define latent variables
#====================================================
if(any(names(data)=="Random")){
### Some basic objects about Random
nRandom<-ncol(data$Random) #nr
nRandomLev<-mapply(nlevels,data$Random) #np
Random <- lapply(data$Random,function(x) factor(x, levels=unique(x)))
Random <- sapply(Random,as.numeric)-1
### Initial number of latent variables
nLatent<-sapply(param$param$latent,ncol)
### Parameters for the adaptation when calculating the number and importance of latent variables
adapt<-c(1,0.0005) # c(b0,b1)
### redund[1] (prop) : Proportion of redundant elements within factor loadings
### redund [2] (epsilon) : Proportion of redundant elements within factor loadings
redund<-c(1,0.001) # c(prop,epsilon)
}
#===================================================================
### Initiate basic objects to define autocorrelated latent variables
#===================================================================
if(any(names(data)=="Auto")){
### Some basic objects about RandomAuto
nAuto<-length(data$Auto)
nLevelAuto<-sapply(data$Auto,function(x) nlevels(x[,1]))
### Construct AutoCoord to be used as Auto in the mcmc functions
AutoCoord<-vector("list",length=nAuto)
for(i in 1:nAuto){
nAutoCoord<-ncol(data$Auto[[i]])-1
AutoCoordMean<-matrix(NA,nrow=nLevelAuto[i],ncol=nAutoCoord)
for(j in 1:nAutoCoord){
AutoCoordMean[,j]<-tapply(data$Auto[[i]][,j+1],data$Auto[[i]][,1],mean)
}
AutoCoord[[i]]<-AutoCoordMean
}
### Construct RandomAuto
RandomAuto<-vector("list",length=nAuto)
for(i in 1:nAuto){
RandomAuto[[i]]<-data$Auto[[i]][,1]
}
### Calculate the number of levels in
nAutoLev<-mapply(nlevels,RandomAuto) #np
### Reorganize RandomAuto so that it can be used in the mcmc function
RandomAuto <- lapply(RandomAuto,function(x) factor(x, levels=unique(x)))
RandomAuto<-sapply(RandomAuto,as.numeric)-1
### Initial number of latent variables
nLatentAuto<-sapply(param$param$latentAuto,ncol)
### Parameters for the adaptation when calculating the number and importance of latent variables
adapt<-c(1,0.0005) # c(b0,b1)
### redund[1] (prop) : Proportion of redundant elements within factor loadings
### redund [2] (epsilon) : Proportion of redundant elements within factor loadings
redund<-c(1,0.001) # c(prop,epsilon)
}
#=================================
### Initiate a latent Y for probit
#=================================
Ylatent<-iniYlatent(data,param,family="logit",ncount)
#======================
### Construct the model
#======================
### Find the data type in the data object
dataType<-names(data)
### Remove "Y"
dataType<-dataType[-which(dataType=="Y")]
### Number of datatypes
nDataType<-length(dataType)
if(nDataType==1){
### Construct model with X
if(dataType %in% "X"){
result<-mcmcBinomialLogitX(Y,
Ylatent,
X,
param$param$paramX,
param$param$meansParamX,
param$param$precX,
param$param$residVar,
priors$param$meansParamX,
priors$param$varXScaleMat,
priors$param$varXDf,
priors$param$residVar[1],
priors$param$residVar[2],
nsp,
nsite,
nparamX,
niter,
nburn,
thin,
ncount,
verbose)
}
### Construct model with Random
if(dataType %in% "Random"){
result<-mcmcBinomialLogitLatent(Y,
Ylatent,
Random,
param$param$residVar,
param$param$latent,
param$param$paramLatent,
param$param$shrinkLocal,
param$param$paramShrinkGlobal,
priors$param$residVar[1],
priors$param$residVar[2],
priors$param$shrinkLocal,
priors$param$shrinkOverall[1],
priors$param$shrinkOverall[2],
priors$param$shrinkSpeed[1],
priors$param$shrinkSpeed[2],
adapt,
redund,
nRandom,
nRandomLev,
nLatent,
nsp,
nsite,
niter,
nburn,
thin,
ncount,
verbose)
}
### Construct model with Auto
if(dataType %in% "Auto"){
result<-mcmcBinomialLogitAuto(Y,
Ylatent,
AutoCoord,
RandomAuto,
param$param$residVar,
param$param$paramAuto,
param$param$latentAuto,
param$param$paramLatentAuto,
param$param$shrinkLocalAuto,
param$param$paramShrinkGlobalAuto,
priors$param$residVar[1],
priors$param$residVar[2],
priors$param$paramAutoWeight,
priors$param$paramAutoDist,
priors$param$shrinkLocalAuto,
priors$param$shrinkOverallAuto[1],
priors$param$shrinkOverallAuto[2],
priors$param$shrinkSpeedAuto[1],
priors$param$shrinkSpeedAuto[2],
adapt,
redund,
nAuto,
nAutoLev,
nLatentAuto,
nsp,
nsite,
nrow(priors$param$paramAutoWeight),
niter,
nburn,
thin,
ncount,
verbose)
}
}
if(nDataType==2){
### Construct model with X and Tr
if(all(dataType %in% c("X","Tr"))){
### Basic objects
nTr<-nrow(data$Tr)
result<-mcmcBinomialLogitXTr(Y,
Ylatent,
X,
Tr,
param$param$paramX,
param$param$paramTr,
param$param$precX,
param$param$residVar,
priors$param$paramTr,
priors$param$varTr,
priors$param$varXScaleMat,
priors$param$varXDf,
priors$param$residVar[1],
priors$param$residVar[2],
nsp,
nsite,
nparamX,
nTr,
niter,
nburn,
thin,
ncount,
verbose)
}
### Construct model with X and Random
if(all(dataType %in% c("X","Random"))){
result<-mcmcBinomialLogitXLatent(Y,
Ylatent,
X,
Random,
param$param$paramX,
param$param$meansParamX,
param$param$precX,
param$param$residVar,
priors$param$meansParamX,
priors$param$varXScaleMat,
priors$param$varXDf,
priors$param$residVar[1],
priors$param$residVar[2],
priors$param$shrinkLocal,
priors$param$shrinkOverall[1],
priors$param$shrinkOverall[2],
priors$param$shrinkSpeed[1],
priors$param$shrinkSpeed[2],
adapt,
redund,
nRandom,
nRandomLev,
nLatent,
nsp,
nsite,
nparamX,
niter,
nburn,
thin,
ncount,
verbose)
}
### Construct model with X and Random
if(all(dataType %in% c("X","Auto"))){
result<-mcmcBinomialLogitXAuto(Y,
Ylatent,
X,
AutoCoord,
RandomAuto,
param$param$paramX,
param$param$meansParamX,
param$param$precX,
param$param$residVar,
param$param$paramAuto,
param$param$latentAuto,
param$param$paramLatentAuto,
param$param$shrinkLocalAuto,
param$param$paramShrinkGlobalAuto,
priors$param$meansParamX,
priors$param$varXScaleMat,
priors$param$varXDf,
priors$param$residVar[1],
priors$param$residVar[2],
priors$param$paramAutoWeight,
priors$param$paramAutoDist,
priors$param$shrinkLocalAuto,
priors$param$shrinkOverallAuto[1],
priors$param$shrinkOverallAuto[2],
priors$param$shrinkSpeedAuto[1],
priors$param$shrinkSpeedAuto[2],
adapt,
redund,
nAuto,
nAutoLev,
nLatentAuto,
nsp,
nsite,
nparamX,
nrow(priors$param$paramAutoWeight),
niter,
nburn,
thin,
ncount,
verbose)
}
### Construct model with X and Phylo
if(all(dataType %in% c("X","Phylo"))){
result<-mcmcBinomialLogitXPhylo(Y,
Ylatent,
X,
Phylo,
iPhylo,
param$param$paramX,
param$param$meansParamX,
param$param$paramPhylo,
param$param$precX,
param$param$residVar,
priors$param$meansParamX,
priors$param$varMeansParamX,
priors$param$varXScaleMat,
priors$param$varXDf,
priors$param$residVar[1],
priors$param$residVar[2],
matrix(priors$param$paramPhylo[,2],ncol=1),
priors$param$paramPhylo[,1],
nsp,
nsite,
nparamX,
nrow(priors$param$paramPhylo),
niter,
nburn,
thin,
ncount,
verbose)
}
### Construct model with Random and Auto
if(all(dataType %in% c("Auto","Random"))){
result<-mcmcBinomialLogitAutoLatent(Y,
Ylatent,
AutoCoord,
RandomAuto,
Random,
param$param$residVar,
param$param$latent,
param$param$paramLatent,
param$param$shrinkLocal,
param$param$paramShrinkGlobal,
param$param$paramAuto,
param$param$latentAuto,
param$param$paramLatentAuto,
param$param$shrinkLocalAuto,
param$param$paramShrinkGlobalAuto,
priors$param$residVar[1],
priors$param$residVar[2],
priors$param$paramAutoWeight,
priors$param$paramAutoDist,
priors$param$shrinkLocal,
priors$param$shrinkOverall[1],
priors$param$shrinkOverall[2],
priors$param$shrinkSpeed[1],
priors$param$shrinkSpeed[2],
adapt,
redund,
nAuto,
nAutoLev,
nLatentAuto,
nRandom,
nRandomLev,
nLatent,
nsp,
nsite,
nrow(priors$param$paramAutoWeight),
niter,
nburn,
thin,
ncount,
verbose)
}
}
if(nDataType==3){
### Construct model with X, Tr and Random
if(all(dataType %in% c("X","Tr","Random"))){
### Basic objects
nTr<-nrow(data$Tr)
result<-mcmcBinomialLogitXTrLatent(Y,
Ylatent,
X,
Tr,
Random,
param$param$paramX,
param$param$paramTr,
param$param$precX,
param$param$residVar,
priors$param$paramTr,
priors$param$varTr,
priors$param$varXScaleMat,
priors$param$varXDf,
priors$param$residVar[1],
priors$param$residVar[2],
priors$param$shrinkLocal,
priors$param$shrinkOverall[1],
priors$param$shrinkOverall[2],
priors$param$shrinkSpeed[1],
priors$param$shrinkSpeed[2],
adapt,
redund,
nRandom,
nRandomLev,
nLatent,
nsp,
nsite,
nparamX,
nTr,
niter,
nburn,
thin,
ncount,
verbose)
}
### Construct model with X, Tr and Random
if(all(dataType %in% c("X","Tr","Auto"))){
### Basic objects
nTr<-nrow(data$Tr)
result<-mcmcBinomialLogitXTrAuto(Y,
Ylatent,
X,
Tr,
AutoCoord,
RandomAuto,
param$param$paramX,
param$param$paramTr,
param$param$precX,
param$param$residVar,
param$param$paramAuto,
param$param$latentAuto,
param$param$paramLatentAuto,
param$param$shrinkLocalAuto,
param$param$paramShrinkGlobalAuto,
priors$param$paramTr,
priors$param$varTr,
priors$param$varXScaleMat,
priors$param$varXDf,
priors$param$residVar[1],
priors$param$residVar[2],
priors$param$paramAutoWeight,
priors$param$paramAutoDist,
priors$param$shrinkLocalAuto,
priors$param$shrinkOverallAuto[1],
priors$param$shrinkOverallAuto[2],
priors$param$shrinkSpeedAuto[1],
priors$param$shrinkSpeedAuto[2],
adapt,
redund,
nAuto,
nAutoLev,
nLatentAuto,
nsp,
nsite,
nparamX,
nTr,
nrow(priors$param$paramAutoWeight),
niter,
nburn,
thin,
ncount,
verbose)
}
### Construct model with X, Tr and Phylo
if(all(dataType %in% c("X","Tr","Phylo"))){
### Basic objects
nTr<-nrow(data$Tr)
result<-mcmcBinomialLogitXTrPhylo(Y,
Ylatent,
X,
Tr,
Phylo,
iPhylo,
param$param$paramX,
param$param$paramTr,
param$param$paramPhylo,
param$param$precX,
param$param$residVar,
priors$param$paramTr,
priors$param$varTr,
priors$param$varXScaleMat,
priors$param$varXDf,
priors$param$residVar[1],
priors$param$residVar[2],
matrix(priors$param$paramPhylo[,2],ncol=1),
priors$param$paramPhylo[,1],
nsp,
nsite,
nparamX,
nTr,
nrow(priors$param$paramPhylo),
niter,
nburn,
thin,
ncount,
verbose)
}
### Construct model with X, Phylo and Random
if(all(dataType %in% c("X","Phylo","Random"))){
result<-mcmcBinomialLogitXPhyloLatent(Y,
Ylatent,
X,
Phylo,
iPhylo,
Random,
param$param$paramX,
param$param$meansParamX,
param$param$precX,
param$param$paramPhylo,
param$param$residVar,
priors$param$meansParamX,
priors$param$varMeansParamX,
priors$param$varXScaleMat,
priors$param$varXDf,
priors$param$residVar[1],
priors$param$residVar[2],
matrix(priors$param$paramPhylo[,2],ncol=1),
priors$param$paramPhylo[,1],
priors$param$shrinkLocal,
priors$param$shrinkOverall[1],
priors$param$shrinkOverall[2],
priors$param$shrinkSpeed[1],
priors$param$shrinkSpeed[2],
adapt,
redund,
nRandom,
nRandomLev,
nLatent,
nsp,
nsite,
nparamX,
nrow(priors$param$paramPhylo),
niter,
nburn,
thin,
ncount,
verbose)
}
### Construct model with X, Phylo and Auto
if(all(dataType %in% c("X","Phylo","Auto"))){
result<-mcmcBinomialLogitXPhyloAuto(Y,
Ylatent,
X,
Phylo,
iPhylo,
AutoCoord,
RandomAuto,
param$param$paramX,
param$param$meansParamX,
param$param$precX,
param$param$paramPhylo,
param$param$residVar,
param$param$paramAuto,
param$param$latentAuto,
param$param$paramLatentAuto,
param$param$shrinkLocalAuto,
param$param$paramShrinkGlobalAuto,
priors$param$meansParamX,
priors$param$varMeansParamX,
priors$param$varXScaleMat,
priors$param$varXDf,
priors$param$residVar[1],
priors$param$residVar[2],
matrix(priors$param$paramPhylo[,2],ncol=1),
priors$param$paramPhylo[,1],
priors$param$paramAutoWeight,
priors$param$paramAutoDist,
priors$param$shrinkLocalAuto,
priors$param$shrinkOverallAuto[1],
priors$param$shrinkOverallAuto[2],
priors$param$shrinkSpeedAuto[1],
priors$param$shrinkSpeedAuto[2],
adapt,
redund,
nAuto,
nAutoLev,
nLatentAuto,
nsp,
nsite,
nparamX,
nrow(priors$param$paramPhylo),
nrow(priors$param$paramAutoWeight),
niter,
nburn,
thin,
ncount,
verbose)
}
### Construct model with Random and Auto
if(all(dataType %in% c("X","Auto","Random"))){
result<-mcmcBinomialLogitXAutoLatent(Y,
Ylatent,
X,
AutoCoord,
RandomAuto,
Random,
param$param$paramX,
param$param$meansParamX,
param$param$precX,
param$param$residVar,
param$param$latent,
param$param$paramLatent,
param$param$shrinkLocal,
param$param$paramShrinkGlobal,
param$param$paramAuto,
param$param$latentAuto,
param$param$paramLatentAuto,
param$param$shrinkLocalAuto,
param$param$paramShrinkGlobalAuto,
priors$param$meansParamX,
priors$param$varXScaleMat,
priors$param$varXDf,
priors$param$residVar[1],
priors$param$residVar[2],
priors$param$paramAutoWeight,
priors$param$paramAutoDist,
priors$param$shrinkLocal,
priors$param$shrinkOverall[1],
priors$param$shrinkOverall[2],
priors$param$shrinkSpeed[1],
priors$param$shrinkSpeed[2],
adapt,
redund,
nAuto,
nAutoLev,
nLatentAuto,
nRandom,
nRandomLev,
nLatent,
nsp,
nsite,
nparamX,
nrow(priors$param$paramAutoWeight),
niter,
nburn,
thin,
ncount,
verbose)
}
}
if(nDataType==4){
### Construct model with X, Tr, Auto and Random
if(all(dataType %in% c("X","Tr","Auto","Random"))){
### Basic objects
nTr<-nrow(data$Tr)
result<-mcmcBinomialLogitXTrAutoLatent(Y,
Ylatent,
X,
Tr,
AutoCoord,
RandomAuto,
Random,
param$param$paramX,
param$param$paramTr,
param$param$precX,
param$param$residVar,
param$param$latent,
param$param$paramLatent,
param$param$shrinkLocal,
param$param$paramShrinkGlobal,
param$param$paramAuto,
param$param$latentAuto,
param$param$paramLatentAuto,
param$param$shrinkLocalAuto,
param$param$paramShrinkGlobalAuto,
priors$param$paramTr,
priors$param$varTr,
priors$param$varXScaleMat,
priors$param$varXDf,
priors$param$residVar[1],
priors$param$residVar[2],
priors$param$paramAutoWeight,
priors$param$paramAutoDist,
priors$param$shrinkLocal,
priors$param$shrinkOverall[1],
priors$param$shrinkOverall[2],
priors$param$shrinkSpeed[1],
priors$param$shrinkSpeed[2],
adapt,
redund,
nAuto,
nAutoLev,
nLatentAuto,
nRandom,
nRandomLev,
nLatent,
nsp,
nsite,
nparamX,
nTr,
nrow(priors$param$paramAutoWeight),
niter,
nburn,
thin,
ncount,
verbose)
}
### Construct model with X, Tr, Auto and Random
if(all(dataType %in% c("X","Phylo","Auto","Random"))){
### Basic objects
nTr<-nrow(data$Tr)
result<-mcmcBinomialLogitXPhyloAutoLatent(Y,
Ylatent,
X,
Phylo,
iPhylo,
AutoCoord,
RandomAuto,
Random,
param$param$paramX,
param$param$meansParamX,
param$param$precX,
param$param$paramPhylo,
param$param$residVar,
param$param$latent,
param$param$paramLatent,
param$param$shrinkLocal,
param$param$paramShrinkGlobal,
param$param$paramAuto,
param$param$latentAuto,
param$param$paramLatentAuto,
param$param$shrinkLocalAuto,
param$param$paramShrinkGlobalAuto,
priors$param$meansParamX,
priors$param$varMeansParamX,
priors$param$varXScaleMat,
priors$param$varXDf,
priors$param$residVar[1],
priors$param$residVar[2],
matrix(priors$param$paramPhylo[,2],ncol=1),
priors$param$paramPhylo[,1],
priors$param$paramAutoWeight,
priors$param$paramAutoDist,
priors$param$shrinkLocal,
priors$param$shrinkOverall[1],
priors$param$shrinkOverall[2],
priors$param$shrinkSpeed[1],
priors$param$shrinkSpeed[2],
adapt,
redund,
nAuto,
nAutoLev,
nLatentAuto,
nRandom,
nRandomLev,
nLatent,
nsp,
nsite,
nparamX,
nrow(priors$param$paramAutoWeight),
nrow(priors$param$paramPhylo),
niter,
nburn,
thin,
ncount,
verbose)
}
### Construct model with X, Tr, Phylo and Random
if(all(dataType %in% c("X","Tr","Phylo","Random"))){
### Basic objects
nTr<-nrow(data$Tr)
result<-mcmcBinomialLogitXTrPhyloLatent(Y,
Ylatent,
X,
Tr,
Phylo,
iPhylo,
Random,
param$param$paramX,
param$param$paramTr,
param$param$paramPhylo,
param$param$precX,
param$param$residVar,
priors$param$paramTr,
priors$param$varTr,
priors$param$varXScaleMat,
priors$param$varXDf,
priors$param$residVar[1],
priors$param$residVar[2],
matrix(priors$param$paramPhylo[,2],ncol=1),
priors$param$paramPhylo[,1],
priors$param$shrinkLocal,
priors$param$shrinkOverall[1],
priors$param$shrinkOverall[2],
priors$param$shrinkSpeed[1],
priors$param$shrinkSpeed[2],
adapt,
redund,
nRandom,
nRandomLev,
nLatent,
nsp,
nsite,
nparamX,
nTr,
nrow(priors$param$paramPhylo),
niter,
nburn,
thin,
ncount,
verbose)
}
### Construct model with X, Tr, Phylo and Auto
if(all(dataType %in% c("X","Tr","Phylo","Auto"))){
### Basic objects
nTr<-nrow(data$Tr)
result<-mcmcBinomialLogitXTrPhyloAuto(Y,
Ylatent,
X,
Tr,
Phylo,
iPhylo,
AutoCoord,
RandomAuto,
param$param$paramX,
param$param$paramTr,
param$param$precX,
param$param$paramPhylo,
param$param$residVar,
param$param$paramAuto,
param$param$latentAuto,
param$param$paramLatentAuto,
param$param$shrinkLocalAuto,
param$param$paramShrinkGlobalAuto,
priors$param$paramTr,
priors$param$varTr,
priors$param$varXScaleMat,
priors$param$varXDf,
priors$param$residVar[1],
priors$param$residVar[2],
matrix(priors$param$paramPhylo[,2],ncol=1),
priors$param$paramPhylo[,1],
priors$param$paramAutoWeight,
priors$param$paramAutoDist,
priors$param$shrinkLocalAuto,
priors$param$shrinkOverallAuto[1],
priors$param$shrinkOverallAuto[2],
priors$param$shrinkSpeedAuto[1],
priors$param$shrinkSpeedAuto[2],
adapt,
redund,
nAuto,
nAutoLev,
nLatentAuto,
nsp,
nsite,
nparamX,
nTr,
nrow(priors$param$paramPhylo),
nrow(priors$param$paramAutoWeight),
niter,
nburn,
thin,
ncount,
verbose)
}
}
if(nDataType==5){
### Construct model with X, Tr, Phylo, Auto and Random
if(all(dataType %in% c("X","Tr","Phylo","Auto","Random"))){
### Basic objects
nTr<-nrow(data$Tr)
result<-mcmcBinomialLogitXTrPhyloAutoLatent(Y,
Ylatent,
X,
Tr,
Phylo,
iPhylo,
AutoCoord,
RandomAuto,
Random,
param$param$paramX,
param$param$paramTr,
param$param$precX,
param$param$paramPhylo,
param$param$residVar,
param$param$latent,
param$param$paramLatent,
param$param$shrinkLocal,
param$param$paramShrinkGlobal,
param$param$paramAuto,
param$param$latentAuto,
param$param$paramLatentAuto,
param$param$shrinkLocalAuto,
param$param$paramShrinkGlobalAuto,
priors$param$paramTr,
priors$param$varTr,
priors$param$varXScaleMat,
priors$param$varXDf,
priors$param$residVar[1],
priors$param$residVar[2],
matrix(priors$param$paramPhylo[,2],ncol=1),
priors$param$paramPhylo[,1],
priors$param$paramAutoWeight,
priors$param$paramAutoDist,
priors$param$shrinkLocal,
priors$param$shrinkOverall[1],
priors$param$shrinkOverall[2],
priors$param$shrinkSpeed[1],
priors$param$shrinkSpeed[2],
adapt,
redund,
nAuto,
nAutoLev,
nLatentAuto,
nRandom,
nRandomLev,
nLatent,
nsp,
nsite,
nparamX,
nTr,
nrow(priors$param$paramPhylo),
nrow(priors$param$paramAutoWeight),
niter,
nburn,
thin,
ncount,
verbose)
}
}
### Name all parts of the result
result<-nameResult(data,result,niter,nburn,thin, family = "logit")
#=================
### Output results
#=================
res<-list(results=result,data=data)
class(res)<-c("hmsc","logit")
return(res)
}
| /R/hmsc.BinomialLogit.R | no_license | guiblanchet/HMSC | R | false | false | 28,530 | r | hmsc.BinomialLogit <-
function(data,param=NULL,priors=NULL,ncount,niter=2000,nburn=1000,thin=1,verbose=TRUE){
#### F. Guillaume Blanchet - March 2017
##########################################################################################
### General checks
if(niter < nburn){
stop("'niter' should be equal or larger than 'burning'")
}
### Handle verbose arguments
verbose<-iniVerbose(verbose,niter)
### A few basic objects
nsp<-ncol(data$Y)
nsite<-nrow(data$Y)
### Transform each data into a matrix
Y<-as.matrix(data$Y)
if(any(names(data)=="X")){
nparamX<-ncol(data$X)
X<-as.matrix(data$X)
}
if(any(names(data)=="Tr")){
Tr<-as.matrix(data$Tr)
}
if(any(names(data)=="Phylo")){
Phylo<-as.matrix(data$Phylo)
### Construct inverse correlation phylogeny matrix
iPhylo<-cov2cor(chol2inv(chol(Phylo)))
}
#====================================================
### Initiate prior values if they have not been given
#====================================================
if(is.null(priors)){
priors<-as.HMSCprior(data,family="probit")
}
#=================================================================
### Initiate starting parameter values if they have not been given
#=================================================================
if(is.null(param)){
param<-as.HMSCparam(data,priors)
}
### Degrees of freedom for Wishart distribution to update precX and varX (removed from the for loop)
if(!is.null(param)){
varXDf<-priors$param$varXDf+nsp
}
#====================================================
### Initiate basic objects to define latent variables
#====================================================
if(any(names(data)=="Random")){
### Some basic objects about Random
nRandom<-ncol(data$Random) #nr
nRandomLev<-mapply(nlevels,data$Random) #np
Random <- lapply(data$Random,function(x) factor(x, levels=unique(x)))
Random <- sapply(Random,as.numeric)-1
### Initial number of latent variables
nLatent<-sapply(param$param$latent,ncol)
### Parameters for the adaptation when calculating the number and importance of latent variables
adapt<-c(1,0.0005) # c(b0,b1)
### redund[1] (prop) : Proportion of redundant elements within factor loadings
### redund [2] (epsilon) : Proportion of redundant elements within factor loadings
redund<-c(1,0.001) # c(prop,epsilon)
}
#===================================================================
### Initiate basic objects to define autocorrelated latent variables
#===================================================================
if(any(names(data)=="Auto")){
### Some basic objects about RandomAuto
nAuto<-length(data$Auto)
nLevelAuto<-sapply(data$Auto,function(x) nlevels(x[,1]))
### Construct AutoCoord to be used as Auto in the mcmc functions
AutoCoord<-vector("list",length=nAuto)
for(i in 1:nAuto){
nAutoCoord<-ncol(data$Auto[[i]])-1
AutoCoordMean<-matrix(NA,nrow=nLevelAuto[i],ncol=nAutoCoord)
for(j in 1:nAutoCoord){
AutoCoordMean[,j]<-tapply(data$Auto[[i]][,j+1],data$Auto[[i]][,1],mean)
}
AutoCoord[[i]]<-AutoCoordMean
}
### Construct RandomAuto
RandomAuto<-vector("list",length=nAuto)
for(i in 1:nAuto){
RandomAuto[[i]]<-data$Auto[[i]][,1]
}
### Calculate the number of levels in
nAutoLev<-mapply(nlevels,RandomAuto) #np
### Reorganize RandomAuto so that it can be used in the mcmc function
RandomAuto <- lapply(RandomAuto,function(x) factor(x, levels=unique(x)))
RandomAuto<-sapply(RandomAuto,as.numeric)-1
### Initial number of latent variables
nLatentAuto<-sapply(param$param$latentAuto,ncol)
### Parameters for the adaptation when calculating the number and importance of latent variables
adapt<-c(1,0.0005) # c(b0,b1)
### redund[1] (prop) : Proportion of redundant elements within factor loadings
### redund [2] (epsilon) : Proportion of redundant elements within factor loadings
redund<-c(1,0.001) # c(prop,epsilon)
}
#=================================
### Initiate a latent Y for probit
#=================================
Ylatent<-iniYlatent(data,param,family="logit",ncount)
#======================
### Construct the model
#======================
### Find the data type in the data object
dataType<-names(data)
### Remove "Y"
dataType<-dataType[-which(dataType=="Y")]
### Number of datatypes
nDataType<-length(dataType)
if(nDataType==1){
### Construct model with X
if(dataType %in% "X"){
result<-mcmcBinomialLogitX(Y,
Ylatent,
X,
param$param$paramX,
param$param$meansParamX,
param$param$precX,
param$param$residVar,
priors$param$meansParamX,
priors$param$varXScaleMat,
priors$param$varXDf,
priors$param$residVar[1],
priors$param$residVar[2],
nsp,
nsite,
nparamX,
niter,
nburn,
thin,
ncount,
verbose)
}
### Construct model with Random
if(dataType %in% "Random"){
result<-mcmcBinomialLogitLatent(Y,
Ylatent,
Random,
param$param$residVar,
param$param$latent,
param$param$paramLatent,
param$param$shrinkLocal,
param$param$paramShrinkGlobal,
priors$param$residVar[1],
priors$param$residVar[2],
priors$param$shrinkLocal,
priors$param$shrinkOverall[1],
priors$param$shrinkOverall[2],
priors$param$shrinkSpeed[1],
priors$param$shrinkSpeed[2],
adapt,
redund,
nRandom,
nRandomLev,
nLatent,
nsp,
nsite,
niter,
nburn,
thin,
ncount,
verbose)
}
### Construct model with Auto
if(dataType %in% "Auto"){
result<-mcmcBinomialLogitAuto(Y,
Ylatent,
AutoCoord,
RandomAuto,
param$param$residVar,
param$param$paramAuto,
param$param$latentAuto,
param$param$paramLatentAuto,
param$param$shrinkLocalAuto,
param$param$paramShrinkGlobalAuto,
priors$param$residVar[1],
priors$param$residVar[2],
priors$param$paramAutoWeight,
priors$param$paramAutoDist,
priors$param$shrinkLocalAuto,
priors$param$shrinkOverallAuto[1],
priors$param$shrinkOverallAuto[2],
priors$param$shrinkSpeedAuto[1],
priors$param$shrinkSpeedAuto[2],
adapt,
redund,
nAuto,
nAutoLev,
nLatentAuto,
nsp,
nsite,
nrow(priors$param$paramAutoWeight),
niter,
nburn,
thin,
ncount,
verbose)
}
}
if(nDataType==2){
### Construct model with X and Tr
if(all(dataType %in% c("X","Tr"))){
### Basic objects
nTr<-nrow(data$Tr)
result<-mcmcBinomialLogitXTr(Y,
Ylatent,
X,
Tr,
param$param$paramX,
param$param$paramTr,
param$param$precX,
param$param$residVar,
priors$param$paramTr,
priors$param$varTr,
priors$param$varXScaleMat,
priors$param$varXDf,
priors$param$residVar[1],
priors$param$residVar[2],
nsp,
nsite,
nparamX,
nTr,
niter,
nburn,
thin,
ncount,
verbose)
}
### Construct model with X and Random
if(all(dataType %in% c("X","Random"))){
result<-mcmcBinomialLogitXLatent(Y,
Ylatent,
X,
Random,
param$param$paramX,
param$param$meansParamX,
param$param$precX,
param$param$residVar,
priors$param$meansParamX,
priors$param$varXScaleMat,
priors$param$varXDf,
priors$param$residVar[1],
priors$param$residVar[2],
priors$param$shrinkLocal,
priors$param$shrinkOverall[1],
priors$param$shrinkOverall[2],
priors$param$shrinkSpeed[1],
priors$param$shrinkSpeed[2],
adapt,
redund,
nRandom,
nRandomLev,
nLatent,
nsp,
nsite,
nparamX,
niter,
nburn,
thin,
ncount,
verbose)
}
### Construct model with X and Random
if(all(dataType %in% c("X","Auto"))){
result<-mcmcBinomialLogitXAuto(Y,
Ylatent,
X,
AutoCoord,
RandomAuto,
param$param$paramX,
param$param$meansParamX,
param$param$precX,
param$param$residVar,
param$param$paramAuto,
param$param$latentAuto,
param$param$paramLatentAuto,
param$param$shrinkLocalAuto,
param$param$paramShrinkGlobalAuto,
priors$param$meansParamX,
priors$param$varXScaleMat,
priors$param$varXDf,
priors$param$residVar[1],
priors$param$residVar[2],
priors$param$paramAutoWeight,
priors$param$paramAutoDist,
priors$param$shrinkLocalAuto,
priors$param$shrinkOverallAuto[1],
priors$param$shrinkOverallAuto[2],
priors$param$shrinkSpeedAuto[1],
priors$param$shrinkSpeedAuto[2],
adapt,
redund,
nAuto,
nAutoLev,
nLatentAuto,
nsp,
nsite,
nparamX,
nrow(priors$param$paramAutoWeight),
niter,
nburn,
thin,
ncount,
verbose)
}
### Construct model with X and Phylo
if(all(dataType %in% c("X","Phylo"))){
result<-mcmcBinomialLogitXPhylo(Y,
Ylatent,
X,
Phylo,
iPhylo,
param$param$paramX,
param$param$meansParamX,
param$param$paramPhylo,
param$param$precX,
param$param$residVar,
priors$param$meansParamX,
priors$param$varMeansParamX,
priors$param$varXScaleMat,
priors$param$varXDf,
priors$param$residVar[1],
priors$param$residVar[2],
matrix(priors$param$paramPhylo[,2],ncol=1),
priors$param$paramPhylo[,1],
nsp,
nsite,
nparamX,
nrow(priors$param$paramPhylo),
niter,
nburn,
thin,
ncount,
verbose)
}
### Construct model with Random and Auto
if(all(dataType %in% c("Auto","Random"))){
result<-mcmcBinomialLogitAutoLatent(Y,
Ylatent,
AutoCoord,
RandomAuto,
Random,
param$param$residVar,
param$param$latent,
param$param$paramLatent,
param$param$shrinkLocal,
param$param$paramShrinkGlobal,
param$param$paramAuto,
param$param$latentAuto,
param$param$paramLatentAuto,
param$param$shrinkLocalAuto,
param$param$paramShrinkGlobalAuto,
priors$param$residVar[1],
priors$param$residVar[2],
priors$param$paramAutoWeight,
priors$param$paramAutoDist,
priors$param$shrinkLocal,
priors$param$shrinkOverall[1],
priors$param$shrinkOverall[2],
priors$param$shrinkSpeed[1],
priors$param$shrinkSpeed[2],
adapt,
redund,
nAuto,
nAutoLev,
nLatentAuto,
nRandom,
nRandomLev,
nLatent,
nsp,
nsite,
nrow(priors$param$paramAutoWeight),
niter,
nburn,
thin,
ncount,
verbose)
}
}
if(nDataType==3){
### Construct model with X, Tr and Random
if(all(dataType %in% c("X","Tr","Random"))){
### Basic objects
nTr<-nrow(data$Tr)
result<-mcmcBinomialLogitXTrLatent(Y,
Ylatent,
X,
Tr,
Random,
param$param$paramX,
param$param$paramTr,
param$param$precX,
param$param$residVar,
priors$param$paramTr,
priors$param$varTr,
priors$param$varXScaleMat,
priors$param$varXDf,
priors$param$residVar[1],
priors$param$residVar[2],
priors$param$shrinkLocal,
priors$param$shrinkOverall[1],
priors$param$shrinkOverall[2],
priors$param$shrinkSpeed[1],
priors$param$shrinkSpeed[2],
adapt,
redund,
nRandom,
nRandomLev,
nLatent,
nsp,
nsite,
nparamX,
nTr,
niter,
nburn,
thin,
ncount,
verbose)
}
### Construct model with X, Tr and Random
if(all(dataType %in% c("X","Tr","Auto"))){
### Basic objects
nTr<-nrow(data$Tr)
result<-mcmcBinomialLogitXTrAuto(Y,
Ylatent,
X,
Tr,
AutoCoord,
RandomAuto,
param$param$paramX,
param$param$paramTr,
param$param$precX,
param$param$residVar,
param$param$paramAuto,
param$param$latentAuto,
param$param$paramLatentAuto,
param$param$shrinkLocalAuto,
param$param$paramShrinkGlobalAuto,
priors$param$paramTr,
priors$param$varTr,
priors$param$varXScaleMat,
priors$param$varXDf,
priors$param$residVar[1],
priors$param$residVar[2],
priors$param$paramAutoWeight,
priors$param$paramAutoDist,
priors$param$shrinkLocalAuto,
priors$param$shrinkOverallAuto[1],
priors$param$shrinkOverallAuto[2],
priors$param$shrinkSpeedAuto[1],
priors$param$shrinkSpeedAuto[2],
adapt,
redund,
nAuto,
nAutoLev,
nLatentAuto,
nsp,
nsite,
nparamX,
nTr,
nrow(priors$param$paramAutoWeight),
niter,
nburn,
thin,
ncount,
verbose)
}
### Construct model with X, Tr and Phylo
if(all(dataType %in% c("X","Tr","Phylo"))){
### Basic objects
nTr<-nrow(data$Tr)
result<-mcmcBinomialLogitXTrPhylo(Y,
Ylatent,
X,
Tr,
Phylo,
iPhylo,
param$param$paramX,
param$param$paramTr,
param$param$paramPhylo,
param$param$precX,
param$param$residVar,
priors$param$paramTr,
priors$param$varTr,
priors$param$varXScaleMat,
priors$param$varXDf,
priors$param$residVar[1],
priors$param$residVar[2],
matrix(priors$param$paramPhylo[,2],ncol=1),
priors$param$paramPhylo[,1],
nsp,
nsite,
nparamX,
nTr,
nrow(priors$param$paramPhylo),
niter,
nburn,
thin,
ncount,
verbose)
}
### Construct model with X, Phylo and Random
if(all(dataType %in% c("X","Phylo","Random"))){
result<-mcmcBinomialLogitXPhyloLatent(Y,
Ylatent,
X,
Phylo,
iPhylo,
Random,
param$param$paramX,
param$param$meansParamX,
param$param$precX,
param$param$paramPhylo,
param$param$residVar,
priors$param$meansParamX,
priors$param$varMeansParamX,
priors$param$varXScaleMat,
priors$param$varXDf,
priors$param$residVar[1],
priors$param$residVar[2],
matrix(priors$param$paramPhylo[,2],ncol=1),
priors$param$paramPhylo[,1],
priors$param$shrinkLocal,
priors$param$shrinkOverall[1],
priors$param$shrinkOverall[2],
priors$param$shrinkSpeed[1],
priors$param$shrinkSpeed[2],
adapt,
redund,
nRandom,
nRandomLev,
nLatent,
nsp,
nsite,
nparamX,
nrow(priors$param$paramPhylo),
niter,
nburn,
thin,
ncount,
verbose)
}
### Construct model with X, Phylo and Auto
if(all(dataType %in% c("X","Phylo","Auto"))){
result<-mcmcBinomialLogitXPhyloAuto(Y,
Ylatent,
X,
Phylo,
iPhylo,
AutoCoord,
RandomAuto,
param$param$paramX,
param$param$meansParamX,
param$param$precX,
param$param$paramPhylo,
param$param$residVar,
param$param$paramAuto,
param$param$latentAuto,
param$param$paramLatentAuto,
param$param$shrinkLocalAuto,
param$param$paramShrinkGlobalAuto,
priors$param$meansParamX,
priors$param$varMeansParamX,
priors$param$varXScaleMat,
priors$param$varXDf,
priors$param$residVar[1],
priors$param$residVar[2],
matrix(priors$param$paramPhylo[,2],ncol=1),
priors$param$paramPhylo[,1],
priors$param$paramAutoWeight,
priors$param$paramAutoDist,
priors$param$shrinkLocalAuto,
priors$param$shrinkOverallAuto[1],
priors$param$shrinkOverallAuto[2],
priors$param$shrinkSpeedAuto[1],
priors$param$shrinkSpeedAuto[2],
adapt,
redund,
nAuto,
nAutoLev,
nLatentAuto,
nsp,
nsite,
nparamX,
nrow(priors$param$paramPhylo),
nrow(priors$param$paramAutoWeight),
niter,
nburn,
thin,
ncount,
verbose)
}
### Construct model with Random and Auto
if(all(dataType %in% c("X","Auto","Random"))){
result<-mcmcBinomialLogitXAutoLatent(Y,
Ylatent,
X,
AutoCoord,
RandomAuto,
Random,
param$param$paramX,
param$param$meansParamX,
param$param$precX,
param$param$residVar,
param$param$latent,
param$param$paramLatent,
param$param$shrinkLocal,
param$param$paramShrinkGlobal,
param$param$paramAuto,
param$param$latentAuto,
param$param$paramLatentAuto,
param$param$shrinkLocalAuto,
param$param$paramShrinkGlobalAuto,
priors$param$meansParamX,
priors$param$varXScaleMat,
priors$param$varXDf,
priors$param$residVar[1],
priors$param$residVar[2],
priors$param$paramAutoWeight,
priors$param$paramAutoDist,
priors$param$shrinkLocal,
priors$param$shrinkOverall[1],
priors$param$shrinkOverall[2],
priors$param$shrinkSpeed[1],
priors$param$shrinkSpeed[2],
adapt,
redund,
nAuto,
nAutoLev,
nLatentAuto,
nRandom,
nRandomLev,
nLatent,
nsp,
nsite,
nparamX,
nrow(priors$param$paramAutoWeight),
niter,
nburn,
thin,
ncount,
verbose)
}
}
if(nDataType==4){
### Construct model with X, Tr, Auto and Random
if(all(dataType %in% c("X","Tr","Auto","Random"))){
### Basic objects
nTr<-nrow(data$Tr)
result<-mcmcBinomialLogitXTrAutoLatent(Y,
Ylatent,
X,
Tr,
AutoCoord,
RandomAuto,
Random,
param$param$paramX,
param$param$paramTr,
param$param$precX,
param$param$residVar,
param$param$latent,
param$param$paramLatent,
param$param$shrinkLocal,
param$param$paramShrinkGlobal,
param$param$paramAuto,
param$param$latentAuto,
param$param$paramLatentAuto,
param$param$shrinkLocalAuto,
param$param$paramShrinkGlobalAuto,
priors$param$paramTr,
priors$param$varTr,
priors$param$varXScaleMat,
priors$param$varXDf,
priors$param$residVar[1],
priors$param$residVar[2],
priors$param$paramAutoWeight,
priors$param$paramAutoDist,
priors$param$shrinkLocal,
priors$param$shrinkOverall[1],
priors$param$shrinkOverall[2],
priors$param$shrinkSpeed[1],
priors$param$shrinkSpeed[2],
adapt,
redund,
nAuto,
nAutoLev,
nLatentAuto,
nRandom,
nRandomLev,
nLatent,
nsp,
nsite,
nparamX,
nTr,
nrow(priors$param$paramAutoWeight),
niter,
nburn,
thin,
ncount,
verbose)
}
### Construct model with X, Tr, Auto and Random
if(all(dataType %in% c("X","Phylo","Auto","Random"))){
### Basic objects
nTr<-nrow(data$Tr)
result<-mcmcBinomialLogitXPhyloAutoLatent(Y,
Ylatent,
X,
Phylo,
iPhylo,
AutoCoord,
RandomAuto,
Random,
param$param$paramX,
param$param$meansParamX,
param$param$precX,
param$param$paramPhylo,
param$param$residVar,
param$param$latent,
param$param$paramLatent,
param$param$shrinkLocal,
param$param$paramShrinkGlobal,
param$param$paramAuto,
param$param$latentAuto,
param$param$paramLatentAuto,
param$param$shrinkLocalAuto,
param$param$paramShrinkGlobalAuto,
priors$param$meansParamX,
priors$param$varMeansParamX,
priors$param$varXScaleMat,
priors$param$varXDf,
priors$param$residVar[1],
priors$param$residVar[2],
matrix(priors$param$paramPhylo[,2],ncol=1),
priors$param$paramPhylo[,1],
priors$param$paramAutoWeight,
priors$param$paramAutoDist,
priors$param$shrinkLocal,
priors$param$shrinkOverall[1],
priors$param$shrinkOverall[2],
priors$param$shrinkSpeed[1],
priors$param$shrinkSpeed[2],
adapt,
redund,
nAuto,
nAutoLev,
nLatentAuto,
nRandom,
nRandomLev,
nLatent,
nsp,
nsite,
nparamX,
nrow(priors$param$paramAutoWeight),
nrow(priors$param$paramPhylo),
niter,
nburn,
thin,
ncount,
verbose)
}
### Construct model with X, Tr, Phylo and Random
if(all(dataType %in% c("X","Tr","Phylo","Random"))){
### Basic objects
nTr<-nrow(data$Tr)
result<-mcmcBinomialLogitXTrPhyloLatent(Y,
Ylatent,
X,
Tr,
Phylo,
iPhylo,
Random,
param$param$paramX,
param$param$paramTr,
param$param$paramPhylo,
param$param$precX,
param$param$residVar,
priors$param$paramTr,
priors$param$varTr,
priors$param$varXScaleMat,
priors$param$varXDf,
priors$param$residVar[1],
priors$param$residVar[2],
matrix(priors$param$paramPhylo[,2],ncol=1),
priors$param$paramPhylo[,1],
priors$param$shrinkLocal,
priors$param$shrinkOverall[1],
priors$param$shrinkOverall[2],
priors$param$shrinkSpeed[1],
priors$param$shrinkSpeed[2],
adapt,
redund,
nRandom,
nRandomLev,
nLatent,
nsp,
nsite,
nparamX,
nTr,
nrow(priors$param$paramPhylo),
niter,
nburn,
thin,
ncount,
verbose)
}
### Construct model with X, Tr, Phylo and Auto
if(all(dataType %in% c("X","Tr","Phylo","Auto"))){
### Basic objects
nTr<-nrow(data$Tr)
result<-mcmcBinomialLogitXTrPhyloAuto(Y,
Ylatent,
X,
Tr,
Phylo,
iPhylo,
AutoCoord,
RandomAuto,
param$param$paramX,
param$param$paramTr,
param$param$precX,
param$param$paramPhylo,
param$param$residVar,
param$param$paramAuto,
param$param$latentAuto,
param$param$paramLatentAuto,
param$param$shrinkLocalAuto,
param$param$paramShrinkGlobalAuto,
priors$param$paramTr,
priors$param$varTr,
priors$param$varXScaleMat,
priors$param$varXDf,
priors$param$residVar[1],
priors$param$residVar[2],
matrix(priors$param$paramPhylo[,2],ncol=1),
priors$param$paramPhylo[,1],
priors$param$paramAutoWeight,
priors$param$paramAutoDist,
priors$param$shrinkLocalAuto,
priors$param$shrinkOverallAuto[1],
priors$param$shrinkOverallAuto[2],
priors$param$shrinkSpeedAuto[1],
priors$param$shrinkSpeedAuto[2],
adapt,
redund,
nAuto,
nAutoLev,
nLatentAuto,
nsp,
nsite,
nparamX,
nTr,
nrow(priors$param$paramPhylo),
nrow(priors$param$paramAutoWeight),
niter,
nburn,
thin,
ncount,
verbose)
}
}
if(nDataType==5){
### Construct model with X, Tr, Phylo, Auto and Random
if(all(dataType %in% c("X","Tr","Phylo","Auto","Random"))){
### Basic objects
nTr<-nrow(data$Tr)
result<-mcmcBinomialLogitXTrPhyloAutoLatent(Y,
Ylatent,
X,
Tr,
Phylo,
iPhylo,
AutoCoord,
RandomAuto,
Random,
param$param$paramX,
param$param$paramTr,
param$param$precX,
param$param$paramPhylo,
param$param$residVar,
param$param$latent,
param$param$paramLatent,
param$param$shrinkLocal,
param$param$paramShrinkGlobal,
param$param$paramAuto,
param$param$latentAuto,
param$param$paramLatentAuto,
param$param$shrinkLocalAuto,
param$param$paramShrinkGlobalAuto,
priors$param$paramTr,
priors$param$varTr,
priors$param$varXScaleMat,
priors$param$varXDf,
priors$param$residVar[1],
priors$param$residVar[2],
matrix(priors$param$paramPhylo[,2],ncol=1),
priors$param$paramPhylo[,1],
priors$param$paramAutoWeight,
priors$param$paramAutoDist,
priors$param$shrinkLocal,
priors$param$shrinkOverall[1],
priors$param$shrinkOverall[2],
priors$param$shrinkSpeed[1],
priors$param$shrinkSpeed[2],
adapt,
redund,
nAuto,
nAutoLev,
nLatentAuto,
nRandom,
nRandomLev,
nLatent,
nsp,
nsite,
nparamX,
nTr,
nrow(priors$param$paramPhylo),
nrow(priors$param$paramAutoWeight),
niter,
nburn,
thin,
ncount,
verbose)
}
}
### Name all parts of the result
result<-nameResult(data,result,niter,nburn,thin, family = "logit")
#=================
### Output results
#=================
res<-list(results=result,data=data)
class(res)<-c("hmsc","logit")
return(res)
}
|
#22/08/19
setwd("~/OneDrive - University of Warwick/WORK/Results/Proteomics/FINAL Result/Analysis - R-Script /")
# regulation = read.csv("./SubtiWiki Exports /regulations.csv", header = T)
# regulation_freq = table(regulation$regulator)
# write.csv(regulation_freq, "./SubtiWiki Exports /regulation_freq.csv", row.names = F)
regulation_freq = read.csv("./SubtiWiki Exports /regulation_freq.csv", header = T)
colnames(regulation_freq) = c("BSU_regulators_all", "No_of_total_targets")
find_ratio = function(regulation_freq, my_table_freq) {
merged_table = merge(my_table_freq, regulation_freq, by.x="regulator", by.y="BSU_regulators_all", all.x = TRUE)
print(merged_table)
merged_percentage_table = transform(merged_table, percentage = 100 * merged_table$No_of_targets/merged_table$No_of_total_targets)
merged_percentage_table = merged_percentage_table[order(merged_percentage_table$percentage, decreasing = TRUE), ]
return(merged_percentage_table)
}
#DPP_LB
DPP_LB_reg_freq = read.csv("./Data/combined/output/geneRegulations/old/regulators/csv/DPP_LB_regulators.csv", header = T)
colnames(DPP_LB_reg_freq) = c("regulator", "No_of_targets")
DPP_LB_reg_freq = DPP_LB_reg_freq[!DPP_LB_reg_freq$regulator == "",]
DPP_LB_reg_percentage = find_ratio(regulation_freq, DPP_LB_reg_freq)
write.csv(DPP_LB_reg_percentage, "./Data/combined/output/geneRegulations/regulators/percentage_regulator/output/DPP_LB_regulators.csv", row.names = FALSE)
View(DPP_LB_reg_percentage)
#DPP_M9
DPP_M9_reg_freq = read.csv("./Data/combined/output/geneRegulations/old/regulators/csv/DPP_M9_regulators.csv", header = T)
colnames(DPP_M9_reg_freq) = c("regulator", "No_of_targets")
DPP_M9_reg_freq = DPP_M9_reg_freq[!DPP_M9_reg_freq$regulator == "",]
DPP_M9_reg_percentage = find_ratio(regulation_freq, DPP_M9_reg_freq)
write.csv(DPP_M9_reg_percentage, "./Data/combined/output/geneRegulations/regulators/percentage_regulator/output/DPP_M9_regulators.csv", row.names = FALSE)
View(DPP_M9_reg_percentage)
#DPP_SH2
DPP_SH2_reg_freq = read.csv("./Data/combined/output/geneRegulations/old/regulators/csv/DPP_SH2_regulators.csv", header = T)
colnames(DPP_SH2_reg_freq) = c("regulator", "No_of_targets")
DPP_SH2_reg_freq = DPP_SH2_reg_freq[!DPP_SH2_reg_freq$regulator == "",]
DPP_SH2_reg_percentage = find_ratio(regulation_freq, DPP_SH2_reg_freq)
write.csv(DPP_SH2_reg_percentage, "./Data/combined/output/geneRegulations/regulators/percentage_regulator/output/DPP_SH2_regulators.csv", row.names = FALSE)
View(DPP_SH2_reg_percentage)
#DPP_SH5
DPP_SH5_reg_freq = read.csv("./Data/combined/output/geneRegulations/old/regulators/csv/DPP_SH5_regulators.csv", header = T)
colnames(DPP_SH5_reg_freq) = c("regulator", "No_of_targets")
DPP_SH5_reg_freq = DPP_SH5_reg_freq[!DPP_SH5_reg_freq$regulator == "",]
DPP_SH5_reg_percentage = find_ratio(regulation_freq, DPP_SH5_reg_freq)
write.csv(DPP_SH5_reg_percentage, "./Data/combined/output/geneRegulations/regulators/percentage_regulator/output/DPP_SH5_regulators.csv", row.names = FALSE)
View(DPP_SH5_reg_percentage)
#DPP_SH5_vs_SH2
DPP_SH5_vs_SH2_reg_freq = read.csv("./Data/combined/output/geneRegulations/old/regulators/csv/DPP_SH5_vs_SH2_regulators.csv", header = T)
colnames(DPP_SH5_vs_SH2_reg_freq) = c("regulator", "No_of_targets")
DPP_SH5_vs_SH2_reg_freq = DPP_SH5_vs_SH2_reg_freq[!DPP_SH5_vs_SH2_reg_freq$regulator == "",]
DPP_SH5_vs_SH2_reg_percentage = find_ratio(regulation_freq, DPP_SH5_vs_SH2_reg_freq)
write.csv(DPP_SH5_vs_SH2_reg_percentage, "./Data/combined/output/geneRegulations/regulators/percentage_regulator/output/DPP_SH5_vs_SH2_regulators.csv", row.names = FALSE)
View(DPP_SH5_vs_SH2_reg_percentage)
| /intersect_TP/intersect_TP_regulator_percentage_SM.R | no_license | mhptr/phd_code | R | false | false | 3,647 | r | #22/08/19
setwd("~/OneDrive - University of Warwick/WORK/Results/Proteomics/FINAL Result/Analysis - R-Script /")
# regulation = read.csv("./SubtiWiki Exports /regulations.csv", header = T)
# regulation_freq = table(regulation$regulator)
# write.csv(regulation_freq, "./SubtiWiki Exports /regulation_freq.csv", row.names = F)
regulation_freq = read.csv("./SubtiWiki Exports /regulation_freq.csv", header = T)
colnames(regulation_freq) = c("BSU_regulators_all", "No_of_total_targets")
find_ratio = function(regulation_freq, my_table_freq) {
merged_table = merge(my_table_freq, regulation_freq, by.x="regulator", by.y="BSU_regulators_all", all.x = TRUE)
print(merged_table)
merged_percentage_table = transform(merged_table, percentage = 100 * merged_table$No_of_targets/merged_table$No_of_total_targets)
merged_percentage_table = merged_percentage_table[order(merged_percentage_table$percentage, decreasing = TRUE), ]
return(merged_percentage_table)
}
#DPP_LB
DPP_LB_reg_freq = read.csv("./Data/combined/output/geneRegulations/old/regulators/csv/DPP_LB_regulators.csv", header = T)
colnames(DPP_LB_reg_freq) = c("regulator", "No_of_targets")
DPP_LB_reg_freq = DPP_LB_reg_freq[!DPP_LB_reg_freq$regulator == "",]
DPP_LB_reg_percentage = find_ratio(regulation_freq, DPP_LB_reg_freq)
write.csv(DPP_LB_reg_percentage, "./Data/combined/output/geneRegulations/regulators/percentage_regulator/output/DPP_LB_regulators.csv", row.names = FALSE)
View(DPP_LB_reg_percentage)
#DPP_M9
DPP_M9_reg_freq = read.csv("./Data/combined/output/geneRegulations/old/regulators/csv/DPP_M9_regulators.csv", header = T)
colnames(DPP_M9_reg_freq) = c("regulator", "No_of_targets")
DPP_M9_reg_freq = DPP_M9_reg_freq[!DPP_M9_reg_freq$regulator == "",]
DPP_M9_reg_percentage = find_ratio(regulation_freq, DPP_M9_reg_freq)
write.csv(DPP_M9_reg_percentage, "./Data/combined/output/geneRegulations/regulators/percentage_regulator/output/DPP_M9_regulators.csv", row.names = FALSE)
View(DPP_M9_reg_percentage)
#DPP_SH2
DPP_SH2_reg_freq = read.csv("./Data/combined/output/geneRegulations/old/regulators/csv/DPP_SH2_regulators.csv", header = T)
colnames(DPP_SH2_reg_freq) = c("regulator", "No_of_targets")
DPP_SH2_reg_freq = DPP_SH2_reg_freq[!DPP_SH2_reg_freq$regulator == "",]
DPP_SH2_reg_percentage = find_ratio(regulation_freq, DPP_SH2_reg_freq)
write.csv(DPP_SH2_reg_percentage, "./Data/combined/output/geneRegulations/regulators/percentage_regulator/output/DPP_SH2_regulators.csv", row.names = FALSE)
View(DPP_SH2_reg_percentage)
#DPP_SH5
DPP_SH5_reg_freq = read.csv("./Data/combined/output/geneRegulations/old/regulators/csv/DPP_SH5_regulators.csv", header = T)
colnames(DPP_SH5_reg_freq) = c("regulator", "No_of_targets")
DPP_SH5_reg_freq = DPP_SH5_reg_freq[!DPP_SH5_reg_freq$regulator == "",]
DPP_SH5_reg_percentage = find_ratio(regulation_freq, DPP_SH5_reg_freq)
write.csv(DPP_SH5_reg_percentage, "./Data/combined/output/geneRegulations/regulators/percentage_regulator/output/DPP_SH5_regulators.csv", row.names = FALSE)
View(DPP_SH5_reg_percentage)
#DPP_SH5_vs_SH2
DPP_SH5_vs_SH2_reg_freq = read.csv("./Data/combined/output/geneRegulations/old/regulators/csv/DPP_SH5_vs_SH2_regulators.csv", header = T)
colnames(DPP_SH5_vs_SH2_reg_freq) = c("regulator", "No_of_targets")
DPP_SH5_vs_SH2_reg_freq = DPP_SH5_vs_SH2_reg_freq[!DPP_SH5_vs_SH2_reg_freq$regulator == "",]
DPP_SH5_vs_SH2_reg_percentage = find_ratio(regulation_freq, DPP_SH5_vs_SH2_reg_freq)
write.csv(DPP_SH5_vs_SH2_reg_percentage, "./Data/combined/output/geneRegulations/regulators/percentage_regulator/output/DPP_SH5_vs_SH2_regulators.csv", row.names = FALSE)
View(DPP_SH5_vs_SH2_reg_percentage)
|
addMetadata <- function(x, metadata, pos){
for (i in 1:length(x)){
metaData(x[[i]])[[pos]] <- as.character(metadata[i])
}
return(as.list(x))
}
| /R/addMetadata.R | no_license | Japal/MALDIrppa | R | false | false | 156 | r | addMetadata <- function(x, metadata, pos){
for (i in 1:length(x)){
metaData(x[[i]])[[pos]] <- as.character(metadata[i])
}
return(as.list(x))
}
|
PrevalenceOutput <- function(id){
ns <- NS(id)
tabPanel(
'Prevalence', icon = icon("bar-chart"),
fluidRow(
shinyjs::useShinyjs(),
shinyjs::disabled(
actionButton(
ns('button_prev'),
label = "Click to calculate prevalences")
),
shinyjs::disabled(
downloadButton(
ns('dl_prevalence'),
label = "Download prevalences")
),
tags$style(type='text/css', "#dl_prevalence {background-color:LightGrey; float:right; margin-bottom: 15px;}")
),
DT::dataTableOutput(ns('prev')),
style = 'width: 95%'
)
}
Prevalence <- function(input, output, session, ...){
# Run prevalence calculations
## Only enable calculation button once z-score calculation button has been clicked
observeEvent(input$button_z, shinyjs::enable("button_prev"))
## Only run calculations once button explicitly pressed
observeEvent(input$button_prev, {
withProgress(message = 'Calculation in progress',
detail = 'Please be patient - This may take a while...',
value = 0.1, {
df_prevs <<-
# CalculatePrev(data = df_zscores, sex = sex(), age = age(), age.month = FALSE,
# weight = weight(), lenhei = lenhei(), lenhei_unit = lenhei_unit(),
# sw = sw(), cluster = cluster(), strata = strata(), typeres = typeres(),
# gregion = gregion(), wealthq = wealthq(), mothered = mothered(), othergr = othergr(),
# headc = headc(), armc = armc(), triskin = triskin(), subskin = subskin(), oedema = oedema()
CalculatePrev(
data = df_zscores, sex = list_map_vars[['sex']](),
age = list_map_vars[['age']](), age.month = FALSE,
weight = list_map_vars[['weight']](), lenhei = list_map_vars[['lenhei']](),
lenhei_unit = list_map_vars[['lenhei_unit']](), sw = list_map_vars[['sw']](),
cluster = list_map_vars[['cluster']](), strata = list_map_vars[['strata']](),
typeres = list_map_vars[['typeres']](), gregion = list_map_vars[['gregion']](),
wealthq = list_map_vars[['wealthq']](), mothered = list_map_vars[['mothered']](),
othergr = list_map_vars[['othergr']](), headc = list_map_vars[['headc']](),
armc = list_map_vars[['armc']](), triskin = list_map_vars[['triskin']](),
subskin = list_map_vars[['subskin']](), oedema = list_map_vars[['oedema']]()
)
incProgress(0.5)
output$prev <-
DT::renderDataTable(df_prevs, options = list(paging = FALSE))
})
})
# Download table
## Only enable download button once z-score calculation button has been clicked
observeEvent(input$button_prev, shinyjs::enable("dl_prevalence"))
output$dl_prevalence <- downloadHandler(
filename <- 'prevalences.csv',
# content <- function(file) write.csv(df_prevs, file, row.names = FALSE)
content <- function(file) readr::write_csv(df_prevs, file)
)
}
| /app/modules/prevalence.R | no_license | jpolonsky/yellow_fever | R | false | false | 3,449 | r | PrevalenceOutput <- function(id){
ns <- NS(id)
tabPanel(
'Prevalence', icon = icon("bar-chart"),
fluidRow(
shinyjs::useShinyjs(),
shinyjs::disabled(
actionButton(
ns('button_prev'),
label = "Click to calculate prevalences")
),
shinyjs::disabled(
downloadButton(
ns('dl_prevalence'),
label = "Download prevalences")
),
tags$style(type='text/css', "#dl_prevalence {background-color:LightGrey; float:right; margin-bottom: 15px;}")
),
DT::dataTableOutput(ns('prev')),
style = 'width: 95%'
)
}
Prevalence <- function(input, output, session, ...){
# Run prevalence calculations
## Only enable calculation button once z-score calculation button has been clicked
observeEvent(input$button_z, shinyjs::enable("button_prev"))
## Only run calculations once button explicitly pressed
observeEvent(input$button_prev, {
withProgress(message = 'Calculation in progress',
detail = 'Please be patient - This may take a while...',
value = 0.1, {
df_prevs <<-
# CalculatePrev(data = df_zscores, sex = sex(), age = age(), age.month = FALSE,
# weight = weight(), lenhei = lenhei(), lenhei_unit = lenhei_unit(),
# sw = sw(), cluster = cluster(), strata = strata(), typeres = typeres(),
# gregion = gregion(), wealthq = wealthq(), mothered = mothered(), othergr = othergr(),
# headc = headc(), armc = armc(), triskin = triskin(), subskin = subskin(), oedema = oedema()
CalculatePrev(
data = df_zscores, sex = list_map_vars[['sex']](),
age = list_map_vars[['age']](), age.month = FALSE,
weight = list_map_vars[['weight']](), lenhei = list_map_vars[['lenhei']](),
lenhei_unit = list_map_vars[['lenhei_unit']](), sw = list_map_vars[['sw']](),
cluster = list_map_vars[['cluster']](), strata = list_map_vars[['strata']](),
typeres = list_map_vars[['typeres']](), gregion = list_map_vars[['gregion']](),
wealthq = list_map_vars[['wealthq']](), mothered = list_map_vars[['mothered']](),
othergr = list_map_vars[['othergr']](), headc = list_map_vars[['headc']](),
armc = list_map_vars[['armc']](), triskin = list_map_vars[['triskin']](),
subskin = list_map_vars[['subskin']](), oedema = list_map_vars[['oedema']]()
)
incProgress(0.5)
output$prev <-
DT::renderDataTable(df_prevs, options = list(paging = FALSE))
})
})
# Download table
## Only enable download button once z-score calculation button has been clicked
observeEvent(input$button_prev, shinyjs::enable("dl_prevalence"))
output$dl_prevalence <- downloadHandler(
filename <- 'prevalences.csv',
# content <- function(file) write.csv(df_prevs, file, row.names = FALSE)
content <- function(file) readr::write_csv(df_prevs, file)
)
}
|
# COURSE PROJECT 1
setwd("C:/Users/lsurampudi/Documents/Coursera/ExploratoryDataAnalysis/CourseProject1")
unzip("exdata_household_power_consumption.zip")
list.files()
# Read input data
data <- read.table("household_power_consumption.txt", header=TRUE, sep = ";", na.strings = "?", stringsAsFactors = FALSE)
format(object.size(data), units="Gb")
#dim(data)
#str(data)
#head(data)
#summary(data)
# Working with variables with in data frame
dat <- na.omit(data)
#dim(data)[1] - dim(dat)[1]
dat$Date <- as.Date(dat$Date, "%d/%m/%Y")
df <- subset(dat, Date >= '2007-02-01' & Date <= '2007-02-02')
df$Time <- strptime(paste(df$Date,df$Time), "%Y-%m-%d %H:%M:%S")
names(df$Time) <- "datetime"
names(df)[match("Time",names(df))] <- "datetime"
#str(df)
#summary(df)
# Convert variables to numeric
df$Global_active_power <- as.numeric(df$Global_active_power)
df$Global_reactive_power <- as.numeric(df$Global_reactive_power)
df$Voltage <- as.numeric(df$Voltage)
df$Global_intensity <- as.numeric(df$Global_intensity)
df$Sub_metering_1 <- as.numeric(df$Sub_metering_1)
df$Sub_metering_2 <- as.numeric(df$Sub_metering_2)
df$Sub_metering_3 <- as.numeric(df$Sub_metering_3)
#summary(df)
#summary(df$Global_active_power)
# PLOT 3
with(df, plot(datetime, Sub_metering_1, type='l', xlab = "", ylab = "Energy sub metering", col="black"))
with(df, lines(datetime, Sub_metering_2, col="red"))
with(df, lines(datetime, Sub_metering_3, col="blue"))
legend("topright", pch = '___', col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.copy(png, file="plot3.png", width = 480, height = 480)
dev.off()
rm(list = ls()) | /ExploratoryDataAnalysis/CourseProject1/plot3.R | no_license | lalitsurampudi/Coursera---Data-Science-Specialization | R | false | false | 1,646 | r | # COURSE PROJECT 1
setwd("C:/Users/lsurampudi/Documents/Coursera/ExploratoryDataAnalysis/CourseProject1")
unzip("exdata_household_power_consumption.zip")
list.files()
# Read input data
data <- read.table("household_power_consumption.txt", header=TRUE, sep = ";", na.strings = "?", stringsAsFactors = FALSE)
format(object.size(data), units="Gb")
#dim(data)
#str(data)
#head(data)
#summary(data)
# Working with variables with in data frame
dat <- na.omit(data)
#dim(data)[1] - dim(dat)[1]
dat$Date <- as.Date(dat$Date, "%d/%m/%Y")
df <- subset(dat, Date >= '2007-02-01' & Date <= '2007-02-02')
df$Time <- strptime(paste(df$Date,df$Time), "%Y-%m-%d %H:%M:%S")
names(df$Time) <- "datetime"
names(df)[match("Time",names(df))] <- "datetime"
#str(df)
#summary(df)
# Convert variables to numeric
df$Global_active_power <- as.numeric(df$Global_active_power)
df$Global_reactive_power <- as.numeric(df$Global_reactive_power)
df$Voltage <- as.numeric(df$Voltage)
df$Global_intensity <- as.numeric(df$Global_intensity)
df$Sub_metering_1 <- as.numeric(df$Sub_metering_1)
df$Sub_metering_2 <- as.numeric(df$Sub_metering_2)
df$Sub_metering_3 <- as.numeric(df$Sub_metering_3)
#summary(df)
#summary(df$Global_active_power)
# PLOT 3
with(df, plot(datetime, Sub_metering_1, type='l', xlab = "", ylab = "Energy sub metering", col="black"))
with(df, lines(datetime, Sub_metering_2, col="red"))
with(df, lines(datetime, Sub_metering_3, col="blue"))
legend("topright", pch = '___', col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.copy(png, file="plot3.png", width = 480, height = 480)
dev.off()
rm(list = ls()) |
testlist <- list(pts = integer(0), ends = NULL, starts = NULL, sorted_ends = c(0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), sorted_starts = c(0L, -1627389952L, 682962941L, 546746628L, 599542L, 1800501216L, 1649393244L, -1712958485L, -1313178345L, 1470710473L, 2108708770L, -1965616612L, -533569700L, -771330099L, 853833880L, 2030715618L, -1261966754L, -129171080L, -642760964L, 779827246L, 1878602521L))
result <- do.call(IntervalSurgeon:::rcpp_depth,testlist)
str(result) | /IntervalSurgeon/inst/testfiles/rcpp_depth/AFL_rcpp_depth/rcpp_depth_valgrind_files/1609856874-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 713 | r | testlist <- list(pts = integer(0), ends = NULL, starts = NULL, sorted_ends = c(0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), sorted_starts = c(0L, -1627389952L, 682962941L, 546746628L, 599542L, 1800501216L, 1649393244L, -1712958485L, -1313178345L, 1470710473L, 2108708770L, -1965616612L, -533569700L, -771330099L, 853833880L, 2030715618L, -1261966754L, -129171080L, -642760964L, 779827246L, 1878602521L))
result <- do.call(IntervalSurgeon:::rcpp_depth,testlist)
str(result) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PSstableSLwithWeights.R
\name{PSstableSLwithWeights}
\alias{PSstableSLwithWeights}
\title{PS stable self-training}
\usage{
PSstableSLwithWeights(
newdat,
weights,
plotName = NULL,
ratioRange = c(0.1, 0.9),
stepby = 0.05,
classProbCut = 0.9,
PShighGroup = "PShigh",
PSlowGroup = "PSlow",
breaks = 50,
imputeNA = FALSE,
byrow = TRUE,
imputeValue = c("median", "mean")
)
}
\arguments{
\item{newdat}{a input data matrix or data frame, columns for samples and rows for features}
\item{weights}{a numeric vector with selected features (as names of the vector) and their weights}
\item{plotName}{a pdf file name with full path and is ended with ".pdf", which is used to save multiple pages
of PS histgrams with distribution densities. Default value us NULL, no plot is saved.}
\item{ratioRange}{a numeric vector with two numbers, which indicates ratio search range. The default is
c(0.1, 0.9)for the current function. If your classification is very
unbalanced such as one group is much smaller than the other, and/or sample variation is quite big,
and/or classification results are far away from what you expect, you might want to change the default values.
c(0.15, 0.85) is recommended as an alternative setting other than default. In an extreme rare situation, c(0.4, 0,6) could a good try.}
\item{stepby}{a numeric parameter for distance between percentage searching step, it should be within (0,1), default value is 0.05,
but a user can change it to other values such as 0.01}
\item{classProbCut}{a numeric variable within (0,1), which is a cutoff of Empirical Bayesian probability,
often used values are 0.8 and 0.9, default value is 0.9. Only one value is used for both groups,
the samples that are not included in either group will be assigned as UNCLASS}
\item{PShighGroup}{a string to indicate group name with high PS score}
\item{PSlowGroup}{a string to indicate group name with low PS score}
\item{breaks}{a integer to indicate number of bins in histogram, default is 50}
\item{imputeNA}{a logic variable to indicate if NA imputation is needed, if it is TRUE, NA imputation is
processed before any other steps, the default is FALSE}
\item{byrow}{a logic variable to indicate direction for imputation, default is TRUE,
which will use the row data for imputation}
\item{imputeValue}{a character variable to indicate which value to be used to replace NA, default is "median",
the median value of the chose direction with "byrow" data to be used}
}
\value{
A list with two items is returned: PS parameters for selected features, PS scores and classifications for the given samples.
\item{PS_pars}{a list of 3 items, the 1st item is a data frame with weights of each selected features for PS
calculation, the 2nd item is a numeric vector containing PS mean and sd for two groups,the 3rd item is a data frame contains
group means for each group and mean of these two means for each feature based on stable classes}
\item{PS_test}{a data frame of PS score and classification with natural 0 cutoff}
}
\description{
This function is to calculate PS (Prediction Strength) scores and make binary classification calls
for a testing data set without PS training object. It involves a self-training process with given features and their weights.
}
\details{
This function is trying to get reasonable PS based classification without training data set, but with
selected features and their weights. The actual steps are as following:
1) assume that we have a pool for group ratio priors such as seq(0.05, 0.95, by = 0.05) for default ratioRange = c(0.05, 0.95)
2) With given features and their weights
a) for each prior in 1), call PSSLwithWeightsPrior with given features and weights to achieve PS scores
apply EM on PS scores with Mclust, get 2 group classification
b) define the samples that are always in the same classes across searching range as stable classes
3) repeat step 2) but this time with opposite signs in the given weights, result in another set of stable classes
4) get final stable classes that are common in 2) and 3)
5) use final stable classes to get group means and sds for each feature and for each group
5) calculate PS scores
6) Once we have PS scores, we could use the theoretic natual cutoff 0 to make classification calls, which may or may not appropriate.
Alternatively, with two groups based on stable classes assuming that PS score is a mixture of two normal distributions,
we can get Empirical Bayesian probability and make calls
}
\references{
Golub TR, Slonim DK, Tamayo P, Huard C, Gaasenbeek M, Mesirov JP, et al. Molecular classification of cancer:
class discovery and class prediction by gene expression monitoring. Science. 1999;286:531–7
Ultsch, A., Thrun, M.C., Hansen-Goos, O., Loetsch, J.: Identification of Molecular Fingerprints
in Human Heat Pain Thresholds by Use of an Interactive Mixture Model R Toolbox(AdaptGauss),
International Journal of Molecular Sciences, doi:10.3390/ijms161025897, 2015.
Scrucca L., Fop M., Murphy T. B. and Raftery A. E. (2016) mclust 5: clustering, classification and
density estimation using Gaussian finite mixture models, The R Journal, 8/1, pp. 205-233.
}
\author{
Aixiang Jiang
}
\keyword{EM}
\keyword{PS}
\keyword{self-learning}
| /man/PSstableSLwithWeights.Rd | no_license | ajiangsfu/PRPS | R | false | true | 5,346 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PSstableSLwithWeights.R
\name{PSstableSLwithWeights}
\alias{PSstableSLwithWeights}
\title{PS stable self-training}
\usage{
PSstableSLwithWeights(
newdat,
weights,
plotName = NULL,
ratioRange = c(0.1, 0.9),
stepby = 0.05,
classProbCut = 0.9,
PShighGroup = "PShigh",
PSlowGroup = "PSlow",
breaks = 50,
imputeNA = FALSE,
byrow = TRUE,
imputeValue = c("median", "mean")
)
}
\arguments{
\item{newdat}{a input data matrix or data frame, columns for samples and rows for features}
\item{weights}{a numeric vector with selected features (as names of the vector) and their weights}
\item{plotName}{a pdf file name with full path and is ended with ".pdf", which is used to save multiple pages
of PS histgrams with distribution densities. Default value us NULL, no plot is saved.}
\item{ratioRange}{a numeric vector with two numbers, which indicates ratio search range. The default is
c(0.1, 0.9)for the current function. If your classification is very
unbalanced such as one group is much smaller than the other, and/or sample variation is quite big,
and/or classification results are far away from what you expect, you might want to change the default values.
c(0.15, 0.85) is recommended as an alternative setting other than default. In an extreme rare situation, c(0.4, 0,6) could a good try.}
\item{stepby}{a numeric parameter for distance between percentage searching step, it should be within (0,1), default value is 0.05,
but a user can change it to other values such as 0.01}
\item{classProbCut}{a numeric variable within (0,1), which is a cutoff of Empirical Bayesian probability,
often used values are 0.8 and 0.9, default value is 0.9. Only one value is used for both groups,
the samples that are not included in either group will be assigned as UNCLASS}
\item{PShighGroup}{a string to indicate group name with high PS score}
\item{PSlowGroup}{a string to indicate group name with low PS score}
\item{breaks}{a integer to indicate number of bins in histogram, default is 50}
\item{imputeNA}{a logic variable to indicate if NA imputation is needed, if it is TRUE, NA imputation is
processed before any other steps, the default is FALSE}
\item{byrow}{a logic variable to indicate direction for imputation, default is TRUE,
which will use the row data for imputation}
\item{imputeValue}{a character variable to indicate which value to be used to replace NA, default is "median",
the median value of the chose direction with "byrow" data to be used}
}
\value{
A list with two items is returned: PS parameters for selected features, PS scores and classifications for the given samples.
\item{PS_pars}{a list of 3 items, the 1st item is a data frame with weights of each selected features for PS
calculation, the 2nd item is a numeric vector containing PS mean and sd for two groups,the 3rd item is a data frame contains
group means for each group and mean of these two means for each feature based on stable classes}
\item{PS_test}{a data frame of PS score and classification with natural 0 cutoff}
}
\description{
This function is to calculate PS (Prediction Strength) scores and make binary classification calls
for a testing data set without PS training object. It involves a self-training process with given features and their weights.
}
\details{
This function is trying to get reasonable PS based classification without training data set, but with
selected features and their weights. The actual steps are as following:
1) assume that we have a pool for group ratio priors such as seq(0.05, 0.95, by = 0.05) for default ratioRange = c(0.05, 0.95)
2) With given features and their weights
a) for each prior in 1), call PSSLwithWeightsPrior with given features and weights to achieve PS scores
apply EM on PS scores with Mclust, get 2 group classification
b) define the samples that are always in the same classes across searching range as stable classes
3) repeat step 2) but this time with opposite signs in the given weights, result in another set of stable classes
4) get final stable classes that are common in 2) and 3)
5) use final stable classes to get group means and sds for each feature and for each group
5) calculate PS scores
6) Once we have PS scores, we could use the theoretic natual cutoff 0 to make classification calls, which may or may not appropriate.
Alternatively, with two groups based on stable classes assuming that PS score is a mixture of two normal distributions,
we can get Empirical Bayesian probability and make calls
}
\references{
Golub TR, Slonim DK, Tamayo P, Huard C, Gaasenbeek M, Mesirov JP, et al. Molecular classification of cancer:
class discovery and class prediction by gene expression monitoring. Science. 1999;286:531–7
Ultsch, A., Thrun, M.C., Hansen-Goos, O., Loetsch, J.: Identification of Molecular Fingerprints
in Human Heat Pain Thresholds by Use of an Interactive Mixture Model R Toolbox(AdaptGauss),
International Journal of Molecular Sciences, doi:10.3390/ijms161025897, 2015.
Scrucca L., Fop M., Murphy T. B. and Raftery A. E. (2016) mclust 5: clustering, classification and
density estimation using Gaussian finite mixture models, The R Journal, 8/1, pp. 205-233.
}
\author{
Aixiang Jiang
}
\keyword{EM}
\keyword{PS}
\keyword{self-learning}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/io.R
\name{read_plain_mode}
\alias{read_plain_mode}
\title{Read plain mode data}
\usage{
read_plain_mode(filename, tz = "UTC")
}
\arguments{
\item{filename}{character or connection}
\item{tz}{character, the time zone for the location and time requested}
}
\value{
list with
\itemize{
\item{location, the name of the location}
\item{lonlat, numeric two element vector of [lon,lat]}
\item{file, the name of the output file}
\item{status, status code from calling the function - either zero (sucess) or non-zero}
\item{moon, tibble of moon rise/set, possible with 0 rows}
\item{sun, tibble of sun rise/set, possibly with 0 rows}
\item{tide, tibble of tide height and stage, possibly with 0 rows}
}
}
\description{
Read plain mode data
}
| /man/read_plain_mode.Rd | permissive | BigelowLab/maree | R | false | true | 834 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/io.R
\name{read_plain_mode}
\alias{read_plain_mode}
\title{Read plain mode data}
\usage{
read_plain_mode(filename, tz = "UTC")
}
\arguments{
\item{filename}{character or connection}
\item{tz}{character, the time zone for the location and time requested}
}
\value{
list with
\itemize{
\item{location, the name of the location}
\item{lonlat, numeric two element vector of [lon,lat]}
\item{file, the name of the output file}
\item{status, status code from calling the function - either zero (sucess) or non-zero}
\item{moon, tibble of moon rise/set, possible with 0 rows}
\item{sun, tibble of sun rise/set, possibly with 0 rows}
\item{tide, tibble of tide height and stage, possibly with 0 rows}
}
}
\description{
Read plain mode data
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/main.R
\docType{class}
\name{Trough-class}
\alias{Trough-class}
\title{An S4 class to represent trough fill elements}
\description{
An instance of the class \code{Trough} contains \eqn{n} trough fills (with
\eqn{ 0 \leq n}. The trough are defined as truncated ellipsoids.
}
\details{
Note that the third object position coordinate (z) corresponds to the
object top elevation.
The truncation ratio \code{rH} is defined as rH x H = c.
The fills of the trough are defined by several trough of smaller size.
}
\section{Slots}{
\describe{
\item{\code{version}}{A character vector indicating the version of CBRDM}
\item{\code{id}}{A length-\eqn{n} integer vector specifying a unique id for each
troughs.}
\item{\code{pos}}{A \eqn{n \times 3} numeric matrix defining the object position
coordinates.}
\item{\code{L}}{A length-\eqn{n} numeric vector specifying the object lengths.}
\item{\code{W}}{A length-\eqn{n} numeric vector specifying the object widhts}
\item{\code{H}}{A length-\eqn{n} numeric vector specifying the object heights.}
\item{\code{theta}}{A length-\eqn{n} numeric vector specifying the object
orientation (horizontal angle in radian).}
\item{\code{rH}}{A length-\eqn{n} numeric vector specifying the truncation ratio.}
\item{\code{fill}}{A length-\eqn{n} list specifying the object fills}
}}
\seealso{
\code{\link{Deposits-class}}, \code{\link{TrEllipsoid-class}}
}
| /man/Trough-class.Rd | no_license | richardhaslam/CBRDM | R | false | true | 1,467 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/main.R
\docType{class}
\name{Trough-class}
\alias{Trough-class}
\title{An S4 class to represent trough fill elements}
\description{
An instance of the class \code{Trough} contains \eqn{n} trough fills (with
\eqn{ 0 \leq n}. The trough are defined as truncated ellipsoids.
}
\details{
Note that the third object position coordinate (z) corresponds to the
object top elevation.
The truncation ratio \code{rH} is defined as rH x H = c.
The fills of the trough are defined by several trough of smaller size.
}
\section{Slots}{
\describe{
\item{\code{version}}{A character vector indicating the version of CBRDM}
\item{\code{id}}{A length-\eqn{n} integer vector specifying a unique id for each
troughs.}
\item{\code{pos}}{A \eqn{n \times 3} numeric matrix defining the object position
coordinates.}
\item{\code{L}}{A length-\eqn{n} numeric vector specifying the object lengths.}
\item{\code{W}}{A length-\eqn{n} numeric vector specifying the object widhts}
\item{\code{H}}{A length-\eqn{n} numeric vector specifying the object heights.}
\item{\code{theta}}{A length-\eqn{n} numeric vector specifying the object
orientation (horizontal angle in radian).}
\item{\code{rH}}{A length-\eqn{n} numeric vector specifying the truncation ratio.}
\item{\code{fill}}{A length-\eqn{n} list specifying the object fills}
}}
\seealso{
\code{\link{Deposits-class}}, \code{\link{TrEllipsoid-class}}
}
|
# Some code to check whether the true effect size of the positive controls holds for the case-control design:
allControls <- read.csv(file.path(outputFolder, "allControls.csv"))
i <- 244
nesting <- FALSE
allControls$trueOddsRatio <- NA
allControls$trueRelativeRisk <- NA
allControls[i, ]
if (!nesting) {
caseData <- CaseControl::loadCaseData(file.path(outputFolder, "caseControl", "caseData_cd1"))
} else {
loadedData <- ""
allControls <- allControls[order(allControls$nestingId), ]
}
for (i in 1:nrow(allControls)) {
print(i)
if (allControls$targetEffectSize[i] == 1) {
allControls$trueOddsRatio[i] <- 1.0
allControls$trueRelativeRisk[i] <- 1.0
} else {
if (nesting) {
fileName <- file.path(outputFolder, "caseControl", sprintf("caseData_cd2_n%s", allControls$nestingId[i]))
if (loadedData != fileName) {
caseData <- CaseControl::loadCaseData(fileName)
loadedData <- fileName
}
}
caseControlsNc <- CaseControl::selectControls(caseData = caseData,
outcomeId = allControls$oldOutcomeId[i],
firstOutcomeOnly = TRUE,
washoutPeriod = 365,
controlsPerCase = 0,
removedUnmatchedCases = FALSE)
caseControlsPc <- CaseControl::selectControls(caseData = caseData,
outcomeId = allControls$outcomeId[i],
firstOutcomeOnly = TRUE,
washoutPeriod = 365,
controlsPerCase = 0,
removedUnmatchedCases = FALSE)
exposureDataNc <- CaseControl::getDbExposureData(caseControlsNc, exposureIds = allControls$targetId[i], caseData = caseData)
exposureDataPc <- CaseControl::getDbExposureData(caseControlsPc, exposureIds = allControls$targetId[i], caseData = caseData)
ccdNc <- CaseControl::createCaseControlData(caseControlsExposure = exposureDataNc, exposureId = allControls$targetId[i], firstExposureOnly = FALSE, riskWindowStart = 0, riskWindowEnd = 0, exposureWashoutPeriod = 365)
ccdPc <- CaseControl::createCaseControlData(caseControlsExposure = exposureDataPc, exposureId = allControls$targetId[i], firstExposureOnly = FALSE, riskWindowStart = 0, riskWindowEnd = 0, exposureWashoutPeriod = 365)
# exposures <- caseData$exposures[caseData$exposures$exposureId == allControls$targetId[i], ]
# exposures <- merge(exposures, caseData$nestingCohorts)
# exposures <- ff::as.ram(exposures)
# exposures$startDate <- exposures$startDate + 365
# exposures <- exposures[exposures$exposureEndDate >= exposures$startDate, ]
# exposures$exposureStartDate[exposures$exposureStartDate < exposures$startDate] <- exposures$startDate[exposures$exposureStartDate < exposures$startDate]
# exposureDays <- as.integer(sum(exposures$exposureEndDate - exposures$exposureStartDate))
# or <- (sum(ccdPc$exposed) / (exposureDays - sum(ccdPc$exposed))) / (sum(ccdNc$exposed) / (exposureDays - sum(ccdNc$exposed)))
# allControls$trueOddsRatio[i] <- or
rr <- sum(ccdPc$exposed) / sum(ccdNc$exposed)
allControls$trueRelativeRisk[i] <- rr
}
}
ggplot2::ggplot(allControls, ggplot2::aes(x = trueEffectSize, y = trueRelativeRisk)) + ggplot2::geom_point(alpha = 0.25)
allControls$trueEffectSize[i]
allControls$targetEffectSize[i]
allControls$trueEffectSizeFirstExposure[i]
ccd <- rbind(ccdNc[ccdNc$exposed, ], ccdPc[ccdPc$exposed, ])
ccd$isCase <- c(rep(0, sum(ccdNc$exposed)), rep(1, sum(ccdPc$exposed)))
or <- (sum(ccd$isCase & ccd$exposed) / sum(!ccd$isCase & ccd$exposed)) / (sum(ccd$isCase & !ccd$exposed) / sum(!ccd$isCase & !ccd$exposed))
or
injectionSummary <- readRDS(file.path(outputFolder, "injectionSummary.rds"))
injectionSummary[injectionSummary$newOutcomeId == allControls$outcomeId[i], ]
exposures <- caseData$exposures[caseData$exposures$exposureId == allControls$targetId[i], ]
exposures <- merge(exposures, caseData$nestingCohorts)
exposures <- ff::as.ram(exposures)
exposures$startDate <- exposures$startDate + 365
# exposures <- exposures[exposures$exposureEndDate >= exposures$startDate & exposures$exposureStartDate < exposures$endDate, ]
exposures <- exposures[exposures$exposureStartDate >= exposures$startDate & exposures$exposureStartDate < exposures$endDate, ]
exposures$exposureStartDate[exposures$exposureStartDate < exposures$startDate] <- exposures$startDate[exposures$exposureStartDate < exposures$startDate]
nrow(exposures)
outcomes <- caseData$cases[caseData$cases$outcomeId == allControls$oldOutcomeId[i], ]
outcomes <- ff::as.ram(outcomes)
outcomes <- merge(outcomes, exposures)
exposedOutcomes <- outcomes[outcomes$indexDate >= outcomes$exposureStartDate & outcomes$indexDate <= outcomes$exposureEndDate, ]
nrow(exposedOutcomes)
names(outcomes)
| /extras/ComputeRealEffectSizes.R | permissive | giginghn/MethodsLibraryPleEvaluation | R | false | false | 5,250 | r | # Some code to check whether the true effect size of the positive controls holds for the case-control design:
allControls <- read.csv(file.path(outputFolder, "allControls.csv"))
i <- 244
nesting <- FALSE
allControls$trueOddsRatio <- NA
allControls$trueRelativeRisk <- NA
allControls[i, ]
if (!nesting) {
caseData <- CaseControl::loadCaseData(file.path(outputFolder, "caseControl", "caseData_cd1"))
} else {
loadedData <- ""
allControls <- allControls[order(allControls$nestingId), ]
}
for (i in 1:nrow(allControls)) {
print(i)
if (allControls$targetEffectSize[i] == 1) {
allControls$trueOddsRatio[i] <- 1.0
allControls$trueRelativeRisk[i] <- 1.0
} else {
if (nesting) {
fileName <- file.path(outputFolder, "caseControl", sprintf("caseData_cd2_n%s", allControls$nestingId[i]))
if (loadedData != fileName) {
caseData <- CaseControl::loadCaseData(fileName)
loadedData <- fileName
}
}
caseControlsNc <- CaseControl::selectControls(caseData = caseData,
outcomeId = allControls$oldOutcomeId[i],
firstOutcomeOnly = TRUE,
washoutPeriod = 365,
controlsPerCase = 0,
removedUnmatchedCases = FALSE)
caseControlsPc <- CaseControl::selectControls(caseData = caseData,
outcomeId = allControls$outcomeId[i],
firstOutcomeOnly = TRUE,
washoutPeriod = 365,
controlsPerCase = 0,
removedUnmatchedCases = FALSE)
exposureDataNc <- CaseControl::getDbExposureData(caseControlsNc, exposureIds = allControls$targetId[i], caseData = caseData)
exposureDataPc <- CaseControl::getDbExposureData(caseControlsPc, exposureIds = allControls$targetId[i], caseData = caseData)
ccdNc <- CaseControl::createCaseControlData(caseControlsExposure = exposureDataNc, exposureId = allControls$targetId[i], firstExposureOnly = FALSE, riskWindowStart = 0, riskWindowEnd = 0, exposureWashoutPeriod = 365)
ccdPc <- CaseControl::createCaseControlData(caseControlsExposure = exposureDataPc, exposureId = allControls$targetId[i], firstExposureOnly = FALSE, riskWindowStart = 0, riskWindowEnd = 0, exposureWashoutPeriod = 365)
# exposures <- caseData$exposures[caseData$exposures$exposureId == allControls$targetId[i], ]
# exposures <- merge(exposures, caseData$nestingCohorts)
# exposures <- ff::as.ram(exposures)
# exposures$startDate <- exposures$startDate + 365
# exposures <- exposures[exposures$exposureEndDate >= exposures$startDate, ]
# exposures$exposureStartDate[exposures$exposureStartDate < exposures$startDate] <- exposures$startDate[exposures$exposureStartDate < exposures$startDate]
# exposureDays <- as.integer(sum(exposures$exposureEndDate - exposures$exposureStartDate))
# or <- (sum(ccdPc$exposed) / (exposureDays - sum(ccdPc$exposed))) / (sum(ccdNc$exposed) / (exposureDays - sum(ccdNc$exposed)))
# allControls$trueOddsRatio[i] <- or
rr <- sum(ccdPc$exposed) / sum(ccdNc$exposed)
allControls$trueRelativeRisk[i] <- rr
}
}
ggplot2::ggplot(allControls, ggplot2::aes(x = trueEffectSize, y = trueRelativeRisk)) + ggplot2::geom_point(alpha = 0.25)
allControls$trueEffectSize[i]
allControls$targetEffectSize[i]
allControls$trueEffectSizeFirstExposure[i]
ccd <- rbind(ccdNc[ccdNc$exposed, ], ccdPc[ccdPc$exposed, ])
ccd$isCase <- c(rep(0, sum(ccdNc$exposed)), rep(1, sum(ccdPc$exposed)))
or <- (sum(ccd$isCase & ccd$exposed) / sum(!ccd$isCase & ccd$exposed)) / (sum(ccd$isCase & !ccd$exposed) / sum(!ccd$isCase & !ccd$exposed))
or
injectionSummary <- readRDS(file.path(outputFolder, "injectionSummary.rds"))
injectionSummary[injectionSummary$newOutcomeId == allControls$outcomeId[i], ]
exposures <- caseData$exposures[caseData$exposures$exposureId == allControls$targetId[i], ]
exposures <- merge(exposures, caseData$nestingCohorts)
exposures <- ff::as.ram(exposures)
exposures$startDate <- exposures$startDate + 365
# exposures <- exposures[exposures$exposureEndDate >= exposures$startDate & exposures$exposureStartDate < exposures$endDate, ]
exposures <- exposures[exposures$exposureStartDate >= exposures$startDate & exposures$exposureStartDate < exposures$endDate, ]
exposures$exposureStartDate[exposures$exposureStartDate < exposures$startDate] <- exposures$startDate[exposures$exposureStartDate < exposures$startDate]
nrow(exposures)
outcomes <- caseData$cases[caseData$cases$outcomeId == allControls$oldOutcomeId[i], ]
outcomes <- ff::as.ram(outcomes)
outcomes <- merge(outcomes, exposures)
exposedOutcomes <- outcomes[outcomes$indexDate >= outcomes$exposureStartDate & outcomes$indexDate <= outcomes$exposureEndDate, ]
nrow(exposedOutcomes)
names(outcomes)
|
context("Face Emotion Detection")
test_that("Face Emotion Detection API returns proper answer", {
fer_set <- c("Angry", "Sad", "Neutral", "Surprise", "Fear", "Happy")
test_image <- matrix(runif(48*48, 0, 1), nrow = 48)
emotion <- face_emotion(test_image)
expect_is(emotion, "list")
expect_true(all(names(emotion) %in% fer_set))
expect_equal(sum(unlist(emotion)), 1)
})
test_that("Throws error on empty or wrong image", {
expect_error(face_emotion())
expect_error(face_emotion(array(10)))
})
| /tests/testthat/test-face_emotion.r | no_license | cran/indicoio | R | false | false | 515 | r | context("Face Emotion Detection")
test_that("Face Emotion Detection API returns proper answer", {
fer_set <- c("Angry", "Sad", "Neutral", "Surprise", "Fear", "Happy")
test_image <- matrix(runif(48*48, 0, 1), nrow = 48)
emotion <- face_emotion(test_image)
expect_is(emotion, "list")
expect_true(all(names(emotion) %in% fer_set))
expect_equal(sum(unlist(emotion)), 1)
})
test_that("Throws error on empty or wrong image", {
expect_error(face_emotion())
expect_error(face_emotion(array(10)))
})
|
#' @export
summarise.tbl_svy <- function(.data, ..., .groups = NULL, .unpack = TRUE) {
.dots <- rlang::quos(...)
if (is_lazy_svy(.data)) .data <- localize_lazy_svy(.data, .dots)
# Set current_svy so available to svy stat functions
old <- set_current_svy(list(full = .data, split = split_for_context(.data)))
on.exit(set_current_svy(old), add = TRUE)
out <- dplyr::summarise(.data$variables, ..., .groups = .groups)
# srvyr predates dplyr's data.frame columns so default to unpacking
# them wide
if (.unpack) out <- unpack_cols(out)
out
}
#' @export
summarise_.tbl_svy <- function(.data, ..., .dots) {
dots <- compat_lazy_dots(.dots, caller_env(), ...)
summarise(.data, !!!dots)
}
#' @export
summarise.grouped_svy <- function(.data, ..., .groups = NULL, .unpack = TRUE) {
.dots <- rlang::quos(...)
if (is_lazy_svy(.data)) .data <- localize_lazy_svy(.data, .dots)
# Set current_svy so available to svy stat functions
old <- set_current_svy(list(full = .data, split = split_for_context(.data)))
on.exit(set_current_svy(old), add = TRUE)
out <- dplyr::summarise(.data$variables, !!!.dots, .groups = .groups)
# Remove interaction variables if present
out <- uninteract(out)
# srvyr predates dplyr's data.frame columns so default to unpacking
# them wide
if (.unpack) out <- unpack_cols(out)
out
}
unpack_cols <- function(results) {
old_groups <- group_vars(results)
is_rowwise <- inherits(results, "rowwise_df")
# Top level renames
var_names <- names(results)[vapply(results, is_srvyr_result_df, logical(1))]
out <- tidyr::unpack(
results,
dplyr::all_of(var_names),
# ugly regex hack to get around https://github.com/tidyverse/tidyr/issues/1161
# __SRVYR_COEF__ is to allow the possibility of legacy srvyr extensions
names_sep = "___SRVYR_SEP___",
names_repair = ~gsub("___SRVYR_SEP___(coef)?(__SRVYR_COEF__)?", "", .)
)
# Also check if there are some nested srvyr results (recursively)
var_names <- names(out)[vapply(out, is.data.frame, logical(1))]
out <- dplyr::mutate(out, dplyr::across(dplyr::all_of(var_names), unpack_cols))
# restore grouping/rowwise (dplyr unpacking can remove rowwise sometimes)
if (length(old_groups) > 0 & !is_rowwise) {
out <- group_by(out, !!!rlang::syms(old_groups))
} else if (length(old_groups) > 0 & is_rowwise) {
out <- dplyr::rowwise(out, !!!rlang::syms(old_groups))
} else if (is_rowwise) {
out <- dplyr::rowwise(out)
}
out
}
#' @export
summarise_.grouped_svy <- function(.data, ..., .dots) {
dots <- compat_lazy_dots(.dots, caller_env(), ...)
summarise(.data, !!!dots)
}
#' Summarise multiple values to a single value.
#'
#' Summarise multiple values to a single value.
#'
#'
#' @param .data tbl A \code{tbl_svy} object
#' @param ... Name-value pairs of summarizing expressions, see details
#' @param .groups Defaults to "drop_last" in srvyr meaning that the last group is peeled
#' off, but if there are more groups they will be preserved. Other options are "drop", which
#' drops all groups, "keep" which keeps all of them and "rowwise" which converts the object
#' to a rowwise object (meaning calculations will be performed on each row).
#' @param .unpack Whether to "unpack" named \code{data.frame} columns. \code{srvyr} predates
#' \code{dplyr}'s support for data.frame columns so it does not treat them the same way by
#' default.
#'
#' @details
#' Summarise for \code{tbl_svy} objects accepts several specialized functions.
#' Each of the functions a variable (or two, in the case of
#' \code{survey_ratio}), from the data.frame and default to providing the measure
#' and its standard error.
#'
#' The argument \code{vartype} can choose one or more measures of uncertainty,
#' \code{se} for standard error, \code{ci} for confidence interval, \code{var}
#' for variance, and \code{cv} for coefficient of variation. \code{level}
#' specifies the level for the confidence interval.
#'
#' The other arguments correspond to the analogous function arguments from the
#' survey package.
#'
#' The available functions from srvyr are:
#'
#'\describe{
#' \item{\code{\link{survey_mean}}}{
#' Calculate the mean of a numeric variable or the proportion falling into \code{groups}
#' for the entire population or by \code{groups}. Based on \code{\link[survey]{svymean}}
#' and \code{\link[survey]{svyciprop}}.}.
#' \item{\code{\link{survey_total}}}{
#' Calculate the survey total of the entire population or by \code{groups}.
#' Based on \code{\link[survey]{svytotal}}.}
#' \item{\code{\link{survey_prop}}}{
#' Calculate the proportion of the entire population or by \code{groups}.
#' Based on \code{\link[survey]{svyciprop}}.}
#' \item{\code{\link{survey_ratio}}}{
#' Calculate the ratio of 2 variables in the entire population or by \code{groups}.
#' Based on \code{\link[survey]{svyratio}}.}
#' \item{\code{\link{survey_quantile}} & \code{\link{survey_median}}}{
#' Calculate quantiles in the entire population or by \code{groups}. Based on
#' \code{\link[survey]{svyquantile}}.}
#' \item{\code{\link{unweighted}}}{
#' Calculate an unweighted estimate as you would on a regular \code{tbl_df}.
#' Based on dplyr's \code{\link[dplyr]{summarise}}.}
#'}
#'
#' You can use expressions both in the \code{...} of \code{summarize} and also
#' in the arguments to the summarizing functions. Though this is valid syntactically
#' it can also allow you to calculate incorrect results (for example if you multiply
#' the mean by 100, the standard error is also multipled by 100, but the variance
#' is not).
#'
#' @examples
#' data(api, package = "survey")
#'
#' dstrata <- apistrat %>%
#' as_survey_design(strata = stype, weights = pw)
#'
#' dstrata %>%
#' summarise(api99_mn = survey_mean(api99),
#' api00_mn = survey_mean(api00),
#' api_diff = survey_mean(api00 - api99))
#'
#' dstrata_grp <- dstrata %>%
#' group_by(stype)
#'
#' dstrata_grp %>%
#' summarise(api99_mn = survey_mean(api99),
#' api00_mn = survey_mean(api00),
#' api_diff = survey_mean(api00 - api99))
#'
#' # `dplyr::across` can be used to programmatically summarize multiple columns
#' # See https://dplyr.tidyverse.org/articles/colwise.html for details
#' # A basic example of working on 2 columns at once and then calculating the total
#' # the mean
#' total_vars <- c("enroll", "api.stu")
#' dstrata %>%
#' summarize(across(c(all_of(total_vars)), survey_total))
#'
#' # Expressions are allowed in summarize arguments & inside functions
#' # Here we can calculate binary variable on the fly and also multiply by 100 to
#' # get percentages
#' dstrata %>%
#' summarize(api99_over_700_pct = 100 * survey_mean(api99 > 700))
#'
#' # But be careful, the variance doesn't scale the same way, so this is wrong!
#' dstrata %>%
#' summarize(api99_over_700_pct = 100 * survey_mean(api99 > 700, vartype = "var"))
#' # Wrong variance!
#'
#' @name summarise
#' @export
#' @importFrom dplyr summarise
NULL
#' @name summarise_
#' @export
#' @importFrom dplyr summarise_
#' @rdname srvyr-se-deprecated
#' @inheritParams summarise
NULL
#' @name summarize
#' @export
#' @importFrom dplyr summarize
#' @rdname summarise
NULL
#' @name summarize_
#' @export
#' @importFrom dplyr summarize_
#' @rdname srvyr-se-deprecated
#' @inheritParams summarize
NULL
| /R/summarise.r | no_license | bschneidr/srvyr | R | false | false | 7,365 | r | #' @export
summarise.tbl_svy <- function(.data, ..., .groups = NULL, .unpack = TRUE) {
.dots <- rlang::quos(...)
if (is_lazy_svy(.data)) .data <- localize_lazy_svy(.data, .dots)
# Set current_svy so available to svy stat functions
old <- set_current_svy(list(full = .data, split = split_for_context(.data)))
on.exit(set_current_svy(old), add = TRUE)
out <- dplyr::summarise(.data$variables, ..., .groups = .groups)
# srvyr predates dplyr's data.frame columns so default to unpacking
# them wide
if (.unpack) out <- unpack_cols(out)
out
}
#' @export
summarise_.tbl_svy <- function(.data, ..., .dots) {
dots <- compat_lazy_dots(.dots, caller_env(), ...)
summarise(.data, !!!dots)
}
#' @export
summarise.grouped_svy <- function(.data, ..., .groups = NULL, .unpack = TRUE) {
.dots <- rlang::quos(...)
if (is_lazy_svy(.data)) .data <- localize_lazy_svy(.data, .dots)
# Set current_svy so available to svy stat functions
old <- set_current_svy(list(full = .data, split = split_for_context(.data)))
on.exit(set_current_svy(old), add = TRUE)
out <- dplyr::summarise(.data$variables, !!!.dots, .groups = .groups)
# Remove interaction variables if present
out <- uninteract(out)
# srvyr predates dplyr's data.frame columns so default to unpacking
# them wide
if (.unpack) out <- unpack_cols(out)
out
}
unpack_cols <- function(results) {
old_groups <- group_vars(results)
is_rowwise <- inherits(results, "rowwise_df")
# Top level renames
var_names <- names(results)[vapply(results, is_srvyr_result_df, logical(1))]
out <- tidyr::unpack(
results,
dplyr::all_of(var_names),
# ugly regex hack to get around https://github.com/tidyverse/tidyr/issues/1161
# __SRVYR_COEF__ is to allow the possibility of legacy srvyr extensions
names_sep = "___SRVYR_SEP___",
names_repair = ~gsub("___SRVYR_SEP___(coef)?(__SRVYR_COEF__)?", "", .)
)
# Also check if there are some nested srvyr results (recursively)
var_names <- names(out)[vapply(out, is.data.frame, logical(1))]
out <- dplyr::mutate(out, dplyr::across(dplyr::all_of(var_names), unpack_cols))
# restore grouping/rowwise (dplyr unpacking can remove rowwise sometimes)
if (length(old_groups) > 0 & !is_rowwise) {
out <- group_by(out, !!!rlang::syms(old_groups))
} else if (length(old_groups) > 0 & is_rowwise) {
out <- dplyr::rowwise(out, !!!rlang::syms(old_groups))
} else if (is_rowwise) {
out <- dplyr::rowwise(out)
}
out
}
#' @export
summarise_.grouped_svy <- function(.data, ..., .dots) {
dots <- compat_lazy_dots(.dots, caller_env(), ...)
summarise(.data, !!!dots)
}
#' Summarise multiple values to a single value.
#'
#' Summarise multiple values to a single value.
#'
#'
#' @param .data tbl A \code{tbl_svy} object
#' @param ... Name-value pairs of summarizing expressions, see details
#' @param .groups Defaults to "drop_last" in srvyr meaning that the last group is peeled
#' off, but if there are more groups they will be preserved. Other options are "drop", which
#' drops all groups, "keep" which keeps all of them and "rowwise" which converts the object
#' to a rowwise object (meaning calculations will be performed on each row).
#' @param .unpack Whether to "unpack" named \code{data.frame} columns. \code{srvyr} predates
#' \code{dplyr}'s support for data.frame columns so it does not treat them the same way by
#' default.
#'
#' @details
#' Summarise for \code{tbl_svy} objects accepts several specialized functions.
#' Each of the functions a variable (or two, in the case of
#' \code{survey_ratio}), from the data.frame and default to providing the measure
#' and its standard error.
#'
#' The argument \code{vartype} can choose one or more measures of uncertainty,
#' \code{se} for standard error, \code{ci} for confidence interval, \code{var}
#' for variance, and \code{cv} for coefficient of variation. \code{level}
#' specifies the level for the confidence interval.
#'
#' The other arguments correspond to the analogous function arguments from the
#' survey package.
#'
#' The available functions from srvyr are:
#'
#'\describe{
#' \item{\code{\link{survey_mean}}}{
#' Calculate the mean of a numeric variable or the proportion falling into \code{groups}
#' for the entire population or by \code{groups}. Based on \code{\link[survey]{svymean}}
#' and \code{\link[survey]{svyciprop}}.}.
#' \item{\code{\link{survey_total}}}{
#' Calculate the survey total of the entire population or by \code{groups}.
#' Based on \code{\link[survey]{svytotal}}.}
#' \item{\code{\link{survey_prop}}}{
#' Calculate the proportion of the entire population or by \code{groups}.
#' Based on \code{\link[survey]{svyciprop}}.}
#' \item{\code{\link{survey_ratio}}}{
#' Calculate the ratio of 2 variables in the entire population or by \code{groups}.
#' Based on \code{\link[survey]{svyratio}}.}
#' \item{\code{\link{survey_quantile}} & \code{\link{survey_median}}}{
#' Calculate quantiles in the entire population or by \code{groups}. Based on
#' \code{\link[survey]{svyquantile}}.}
#' \item{\code{\link{unweighted}}}{
#' Calculate an unweighted estimate as you would on a regular \code{tbl_df}.
#' Based on dplyr's \code{\link[dplyr]{summarise}}.}
#'}
#'
#' You can use expressions both in the \code{...} of \code{summarize} and also
#' in the arguments to the summarizing functions. Though this is valid syntactically
#' it can also allow you to calculate incorrect results (for example if you multiply
#' the mean by 100, the standard error is also multipled by 100, but the variance
#' is not).
#'
#' @examples
#' data(api, package = "survey")
#'
#' dstrata <- apistrat %>%
#' as_survey_design(strata = stype, weights = pw)
#'
#' dstrata %>%
#' summarise(api99_mn = survey_mean(api99),
#' api00_mn = survey_mean(api00),
#' api_diff = survey_mean(api00 - api99))
#'
#' dstrata_grp <- dstrata %>%
#' group_by(stype)
#'
#' dstrata_grp %>%
#' summarise(api99_mn = survey_mean(api99),
#' api00_mn = survey_mean(api00),
#' api_diff = survey_mean(api00 - api99))
#'
#' # `dplyr::across` can be used to programmatically summarize multiple columns
#' # See https://dplyr.tidyverse.org/articles/colwise.html for details
#' # A basic example of working on 2 columns at once and then calculating the total
#' # the mean
#' total_vars <- c("enroll", "api.stu")
#' dstrata %>%
#' summarize(across(c(all_of(total_vars)), survey_total))
#'
#' # Expressions are allowed in summarize arguments & inside functions
#' # Here we can calculate binary variable on the fly and also multiply by 100 to
#' # get percentages
#' dstrata %>%
#' summarize(api99_over_700_pct = 100 * survey_mean(api99 > 700))
#'
#' # But be careful, the variance doesn't scale the same way, so this is wrong!
#' dstrata %>%
#' summarize(api99_over_700_pct = 100 * survey_mean(api99 > 700, vartype = "var"))
#' # Wrong variance!
#'
#' @name summarise
#' @export
#' @importFrom dplyr summarise
NULL
#' @name summarise_
#' @export
#' @importFrom dplyr summarise_
#' @rdname srvyr-se-deprecated
#' @inheritParams summarise
NULL
#' @name summarize
#' @export
#' @importFrom dplyr summarize
#' @rdname summarise
NULL
#' @name summarize_
#' @export
#' @importFrom dplyr summarize_
#' @rdname srvyr-se-deprecated
#' @inheritParams summarize
NULL
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dbetagpdcon.r
\name{betagpdcon}
\alias{betagpdcon}
\alias{dbetagpdcon}
\alias{pbetagpdcon}
\alias{qbetagpdcon}
\alias{rbetagpdcon}
\title{Beta Bulk and GPD Tail Extreme Value Mixture Model with Single Continuity Constraint}
\usage{
dbetagpdcon(x, bshape1 = 1, bshape2 = 1, u = qbeta(0.9, bshape1,
bshape2), xi = 0, phiu = TRUE, log = FALSE)
pbetagpdcon(q, bshape1 = 1, bshape2 = 1, u = qbeta(0.9, bshape1,
bshape2), xi = 0, phiu = TRUE, lower.tail = TRUE)
qbetagpdcon(p, bshape1 = 1, bshape2 = 1, u = qbeta(0.9, bshape1,
bshape2), xi = 0, phiu = TRUE, lower.tail = TRUE)
rbetagpdcon(n = 1, bshape1 = 1, bshape2 = 1, u = qbeta(0.9,
bshape1, bshape2), xi = 0, phiu = TRUE)
}
\arguments{
\item{x}{quantiles}
\item{bshape1}{beta shape 1 (positive)}
\item{bshape2}{beta shape 2 (positive)}
\item{u}{threshold over \eqn{(0, 1)}}
\item{xi}{shape parameter}
\item{phiu}{probability of being above threshold \eqn{[0, 1]} or \code{TRUE}}
\item{log}{logical, if TRUE then log density}
\item{q}{quantiles}
\item{lower.tail}{logical, if FALSE then upper tail probabilities}
\item{p}{cumulative probabilities}
\item{n}{sample size (positive integer)}
}
\value{
\code{\link[evmix:betagpdcon]{dbetagpdcon}} gives the density,
\code{\link[evmix:betagpdcon]{pbetagpdcon}} gives the cumulative distribution function,
\code{\link[evmix:betagpdcon]{qbetagpdcon}} gives the quantile function and
\code{\link[evmix:betagpdcon]{rbetagpdcon}} gives a random sample.
}
\description{
Density, cumulative distribution function, quantile function and
random number generation for the extreme value mixture model with beta for bulk
distribution upto the threshold and conditional GPD above threshold with continuity at threshold. The parameters
are the beta shape 1 \code{bshape1} and shape 2 \code{bshape2}, threshold \code{u}
GPD shape \code{xi} and tail fraction \code{phiu}.
}
\details{
Extreme value mixture model combining beta distribution for the bulk
below the threshold and GPD for upper tail with continuity at threshold.
The user can pre-specify \code{phiu}
permitting a parameterised value for the tail fraction \eqn{\phi_u}. Alternatively, when
\code{phiu=TRUE} the tail fraction is estimated as the tail fraction from the
beta bulk model.
The usual beta distribution is defined over \eqn{[0, 1]}, but this mixture is generally
not limited in the upper tail \eqn{[0,\infty]}, except for the usual upper tail
limits for the GPD when \code{xi<0} discussed in \code{\link[evmix:gpd]{gpd}}.
Therefore, the threshold is limited to \eqn{(0, 1)}.
The cumulative distribution function with tail fraction \eqn{\phi_u} defined by the
upper tail fraction of the beta bulk model (\code{phiu=TRUE}), upto the
threshold \eqn{0 \le x \le u < 1}, given by:
\deqn{F(x) = H(x)}
and above the threshold \eqn{x > u}:
\deqn{F(x) = H(u) + [1 - H(u)] G(x)}
where \eqn{H(x)} and \eqn{G(X)} are the beta and conditional GPD
cumulative distribution functions (i.e. \code{pbeta(x, bshape1, bshape2)} and
\code{pgpd(x, u, sigmau, xi)}).
The cumulative distribution function for pre-specified \eqn{\phi_u}, upto the
threshold \eqn{0 \le x \le u < 1}, is given by:
\deqn{F(x) = (1 - \phi_u) H(x)/H(u)}
and above the threshold \eqn{x > u}:
\deqn{F(x) = \phi_u + [1 - \phi_u] G(x)}
Notice that these definitions are equivalent when \eqn{\phi_u = 1 - H(u)}.
The continuity constraint means that \eqn{(1 - \phi_u) h(u)/H(u) = \phi_u g(u)}
where \eqn{h(x)} and \eqn{g(x)} are the beta and conditional GPD
density functions (i.e. \code{dbeta(x, bshape1, bshape2)} and
\code{dgpd(x, u, sigmau, xi)}) respectively. The resulting GPD scale parameter is then:
\deqn{\sigma_u = \phi_u H(u) / [1 - \phi_u] h(u)}.
In the special case of where the tail fraction is defined by the bulk model this reduces to
\deqn{\sigma_u = [1 - H(u)] / h(u)}.
See \code{\link[evmix:gpd]{gpd}} for details of GPD upper tail component and
\code{\link[stats:Beta]{dbeta}} for details of beta bulk component.
}
\note{
All inputs are vectorised except \code{log} and \code{lower.tail}.
The main inputs (\code{x}, \code{p} or \code{q}) and parameters must be either
a scalar or a vector. If vectors are provided they must all be of the same length,
and the function will be evaluated for each element of vector. In the case of
\code{\link[evmix:betagpdcon]{rbetagpdcon}} any input vector must be of length \code{n}.
Default values are provided for all inputs, except for the fundamentals
\code{x}, \code{q} and \code{p}. The default sample size for
\code{\link[evmix:betagpdcon]{rbetagpdcon}} is 1.
Missing (\code{NA}) and Not-a-Number (\code{NaN}) values in \code{x},
\code{p} and \code{q} are passed through as is and infinite values are set to
\code{NA}. None of these are not permitted for the parameters.
Error checking of the inputs (e.g. invalid probabilities) is carried out and
will either stop or give warning message as appropriate.
}
\examples{
\dontrun{
set.seed(1)
par(mfrow = c(2, 2))
x = rbetagpdcon(1000, bshape1 = 1.5, bshape2 = 2, u = 0.7, phiu = 0.2)
xx = seq(-0.1, 2, 0.01)
hist(x, breaks = 100, freq = FALSE, xlim = c(-0.1, 2))
lines(xx, dbetagpdcon(xx, bshape1 = 1.5, bshape2 = 2, u = 0.7, phiu = 0.2))
# three tail behaviours
plot(xx, pbetagpdcon(xx, bshape1 = 1.5, bshape2 = 2, u = 0.7, phiu = 0.2), type = "l")
lines(xx, pbetagpdcon(xx, bshape1 = 1.5, bshape2 = 2, u = 0.7, phiu = 0.2, xi = 0.3), col = "red")
lines(xx, pbetagpdcon(xx, bshape1 = 1.5, bshape2 = 2, u = 0.7, phiu = 0.2, xi = -0.3), col = "blue")
legend("topleft", paste("xi =",c(0, 0.3, -0.3)),
col=c("black", "red", "blue"), lty = 1)
x = rbetagpdcon(1000, bshape1 = 2, bshape2 = 0.8, u = 0.7, phiu = 0.5)
hist(x, breaks = 100, freq = FALSE, xlim = c(-0.1, 2))
lines(xx, dbetagpdcon(xx, bshape1 = 2, bshape2 = 0.6, u = 0.7, phiu = 0.5))
plot(xx, dbetagpdcon(xx, bshape1 = 2, bshape2 = 0.8, u = 0.7, phiu = 0.5, xi=0), type = "l")
lines(xx, dbetagpdcon(xx, bshape1 = 2, bshape2 = 0.8, u = 0.7, phiu = 0.5, xi=-0.2), col = "red")
lines(xx, dbetagpdcon(xx, bshape1 = 2, bshape2 = 0.8, u = 0.7, phiu = 0.5, xi=0.2), col = "blue")
legend("topright", c("xi = 0", "xi = 0.2", "xi = -0.2"),
col=c("black", "red", "blue"), lty = 1)
}
}
\references{
\url{http://en.wikipedia.org/wiki/Beta_distribution}
\url{http://en.wikipedia.org/wiki/Generalized_Pareto_distribution}
Scarrott, C.J. and MacDonald, A. (2012). A review of extreme value
threshold estimation and uncertainty quantification. REVSTAT - Statistical
Journal 10(1), 33-59. Available from \url{http://www.ine.pt/revstat/pdf/rs120102.pdf}
MacDonald, A. (2012). Extreme value mixture modelling with medical and
industrial applications. PhD thesis, University of Canterbury, New Zealand.
\url{http://ir.canterbury.ac.nz/bitstream/10092/6679/1/thesis_fulltext.pdf}
}
\seealso{
\code{\link[evmix:gpd]{gpd}} and \code{\link[stats:Beta]{dbeta}}
Other betagpd: \code{\link{betagpd}},
\code{\link{fbetagpdcon}}, \code{\link{fbetagpd}}
Other betagpdcon: \code{\link{betagpd}},
\code{\link{fbetagpdcon}}, \code{\link{fbetagpd}}
Other fbetagpdcon: \code{\link{fbetagpdcon}}
}
\author{
Yang Hu and Carl Scarrott \email{carl.scarrott@canterbury.ac.nz}
}
\concept{betagpd}
\concept{betagpdcon}
\concept{fbetagpdcon}
| /man/betagpdcon.Rd | no_license | cran/evmix | R | false | true | 7,482 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dbetagpdcon.r
\name{betagpdcon}
\alias{betagpdcon}
\alias{dbetagpdcon}
\alias{pbetagpdcon}
\alias{qbetagpdcon}
\alias{rbetagpdcon}
\title{Beta Bulk and GPD Tail Extreme Value Mixture Model with Single Continuity Constraint}
\usage{
dbetagpdcon(x, bshape1 = 1, bshape2 = 1, u = qbeta(0.9, bshape1,
bshape2), xi = 0, phiu = TRUE, log = FALSE)
pbetagpdcon(q, bshape1 = 1, bshape2 = 1, u = qbeta(0.9, bshape1,
bshape2), xi = 0, phiu = TRUE, lower.tail = TRUE)
qbetagpdcon(p, bshape1 = 1, bshape2 = 1, u = qbeta(0.9, bshape1,
bshape2), xi = 0, phiu = TRUE, lower.tail = TRUE)
rbetagpdcon(n = 1, bshape1 = 1, bshape2 = 1, u = qbeta(0.9,
bshape1, bshape2), xi = 0, phiu = TRUE)
}
\arguments{
\item{x}{quantiles}
\item{bshape1}{beta shape 1 (positive)}
\item{bshape2}{beta shape 2 (positive)}
\item{u}{threshold over \eqn{(0, 1)}}
\item{xi}{shape parameter}
\item{phiu}{probability of being above threshold \eqn{[0, 1]} or \code{TRUE}}
\item{log}{logical, if TRUE then log density}
\item{q}{quantiles}
\item{lower.tail}{logical, if FALSE then upper tail probabilities}
\item{p}{cumulative probabilities}
\item{n}{sample size (positive integer)}
}
\value{
\code{\link[evmix:betagpdcon]{dbetagpdcon}} gives the density,
\code{\link[evmix:betagpdcon]{pbetagpdcon}} gives the cumulative distribution function,
\code{\link[evmix:betagpdcon]{qbetagpdcon}} gives the quantile function and
\code{\link[evmix:betagpdcon]{rbetagpdcon}} gives a random sample.
}
\description{
Density, cumulative distribution function, quantile function and
random number generation for the extreme value mixture model with beta for bulk
distribution upto the threshold and conditional GPD above threshold with continuity at threshold. The parameters
are the beta shape 1 \code{bshape1} and shape 2 \code{bshape2}, threshold \code{u}
GPD shape \code{xi} and tail fraction \code{phiu}.
}
\details{
Extreme value mixture model combining beta distribution for the bulk
below the threshold and GPD for upper tail with continuity at threshold.
The user can pre-specify \code{phiu}
permitting a parameterised value for the tail fraction \eqn{\phi_u}. Alternatively, when
\code{phiu=TRUE} the tail fraction is estimated as the tail fraction from the
beta bulk model.
The usual beta distribution is defined over \eqn{[0, 1]}, but this mixture is generally
not limited in the upper tail \eqn{[0,\infty]}, except for the usual upper tail
limits for the GPD when \code{xi<0} discussed in \code{\link[evmix:gpd]{gpd}}.
Therefore, the threshold is limited to \eqn{(0, 1)}.
The cumulative distribution function with tail fraction \eqn{\phi_u} defined by the
upper tail fraction of the beta bulk model (\code{phiu=TRUE}), upto the
threshold \eqn{0 \le x \le u < 1}, given by:
\deqn{F(x) = H(x)}
and above the threshold \eqn{x > u}:
\deqn{F(x) = H(u) + [1 - H(u)] G(x)}
where \eqn{H(x)} and \eqn{G(X)} are the beta and conditional GPD
cumulative distribution functions (i.e. \code{pbeta(x, bshape1, bshape2)} and
\code{pgpd(x, u, sigmau, xi)}).
The cumulative distribution function for pre-specified \eqn{\phi_u}, upto the
threshold \eqn{0 \le x \le u < 1}, is given by:
\deqn{F(x) = (1 - \phi_u) H(x)/H(u)}
and above the threshold \eqn{x > u}:
\deqn{F(x) = \phi_u + [1 - \phi_u] G(x)}
Notice that these definitions are equivalent when \eqn{\phi_u = 1 - H(u)}.
The continuity constraint means that \eqn{(1 - \phi_u) h(u)/H(u) = \phi_u g(u)}
where \eqn{h(x)} and \eqn{g(x)} are the beta and conditional GPD
density functions (i.e. \code{dbeta(x, bshape1, bshape2)} and
\code{dgpd(x, u, sigmau, xi)}) respectively. The resulting GPD scale parameter is then:
\deqn{\sigma_u = \phi_u H(u) / [1 - \phi_u] h(u)}.
In the special case of where the tail fraction is defined by the bulk model this reduces to
\deqn{\sigma_u = [1 - H(u)] / h(u)}.
See \code{\link[evmix:gpd]{gpd}} for details of GPD upper tail component and
\code{\link[stats:Beta]{dbeta}} for details of beta bulk component.
}
\note{
All inputs are vectorised except \code{log} and \code{lower.tail}.
The main inputs (\code{x}, \code{p} or \code{q}) and parameters must be either
a scalar or a vector. If vectors are provided they must all be of the same length,
and the function will be evaluated for each element of vector. In the case of
\code{\link[evmix:betagpdcon]{rbetagpdcon}} any input vector must be of length \code{n}.
Default values are provided for all inputs, except for the fundamentals
\code{x}, \code{q} and \code{p}. The default sample size for
\code{\link[evmix:betagpdcon]{rbetagpdcon}} is 1.
Missing (\code{NA}) and Not-a-Number (\code{NaN}) values in \code{x},
\code{p} and \code{q} are passed through as is and infinite values are set to
\code{NA}. None of these are not permitted for the parameters.
Error checking of the inputs (e.g. invalid probabilities) is carried out and
will either stop or give warning message as appropriate.
}
\examples{
\dontrun{
set.seed(1)
par(mfrow = c(2, 2))
x = rbetagpdcon(1000, bshape1 = 1.5, bshape2 = 2, u = 0.7, phiu = 0.2)
xx = seq(-0.1, 2, 0.01)
hist(x, breaks = 100, freq = FALSE, xlim = c(-0.1, 2))
lines(xx, dbetagpdcon(xx, bshape1 = 1.5, bshape2 = 2, u = 0.7, phiu = 0.2))
# three tail behaviours
plot(xx, pbetagpdcon(xx, bshape1 = 1.5, bshape2 = 2, u = 0.7, phiu = 0.2), type = "l")
lines(xx, pbetagpdcon(xx, bshape1 = 1.5, bshape2 = 2, u = 0.7, phiu = 0.2, xi = 0.3), col = "red")
lines(xx, pbetagpdcon(xx, bshape1 = 1.5, bshape2 = 2, u = 0.7, phiu = 0.2, xi = -0.3), col = "blue")
legend("topleft", paste("xi =",c(0, 0.3, -0.3)),
col=c("black", "red", "blue"), lty = 1)
x = rbetagpdcon(1000, bshape1 = 2, bshape2 = 0.8, u = 0.7, phiu = 0.5)
hist(x, breaks = 100, freq = FALSE, xlim = c(-0.1, 2))
lines(xx, dbetagpdcon(xx, bshape1 = 2, bshape2 = 0.6, u = 0.7, phiu = 0.5))
plot(xx, dbetagpdcon(xx, bshape1 = 2, bshape2 = 0.8, u = 0.7, phiu = 0.5, xi=0), type = "l")
lines(xx, dbetagpdcon(xx, bshape1 = 2, bshape2 = 0.8, u = 0.7, phiu = 0.5, xi=-0.2), col = "red")
lines(xx, dbetagpdcon(xx, bshape1 = 2, bshape2 = 0.8, u = 0.7, phiu = 0.5, xi=0.2), col = "blue")
legend("topright", c("xi = 0", "xi = 0.2", "xi = -0.2"),
col=c("black", "red", "blue"), lty = 1)
}
}
\references{
\url{http://en.wikipedia.org/wiki/Beta_distribution}
\url{http://en.wikipedia.org/wiki/Generalized_Pareto_distribution}
Scarrott, C.J. and MacDonald, A. (2012). A review of extreme value
threshold estimation and uncertainty quantification. REVSTAT - Statistical
Journal 10(1), 33-59. Available from \url{http://www.ine.pt/revstat/pdf/rs120102.pdf}
MacDonald, A. (2012). Extreme value mixture modelling with medical and
industrial applications. PhD thesis, University of Canterbury, New Zealand.
\url{http://ir.canterbury.ac.nz/bitstream/10092/6679/1/thesis_fulltext.pdf}
}
\seealso{
\code{\link[evmix:gpd]{gpd}} and \code{\link[stats:Beta]{dbeta}}
Other betagpd: \code{\link{betagpd}},
\code{\link{fbetagpdcon}}, \code{\link{fbetagpd}}
Other betagpdcon: \code{\link{betagpd}},
\code{\link{fbetagpdcon}}, \code{\link{fbetagpd}}
Other fbetagpdcon: \code{\link{fbetagpdcon}}
}
\author{
Yang Hu and Carl Scarrott \email{carl.scarrott@canterbury.ac.nz}
}
\concept{betagpd}
\concept{betagpdcon}
\concept{fbetagpdcon}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/table1.R
\name{generateDayswithPAD}
\alias{generateDayswithPAD}
\title{Generates Days with PAD}
\usage{
generateDayswithPAD(webApiPrefix, webApiUseSsl, dcoList, cdmDb, scratchDb,
scratchTablePrefix)
}
\arguments{
\item{webApiPrefix}{The URL prefix of the WebAPI instance}
\item{webApiUseSsl}{Does the WebAPI use HTTPS?}
\item{dcoList}{The list of drug-comparator-outcomes}
\item{cdmDb}{The CDM database object constructed with \code{buildDatabase}}
\item{scratchDb}{The scratch database object constructed with \code{buildDatabase}}
\item{scratchTablePrefix}{The prefix of all scratch tables created}
}
\details{
Generates Days with PAD
}
\author{
Ajit Londhe
}
| /man/generateDayswithPAD.Rd | no_license | alondhe/MPHThesisPAD | R | false | true | 747 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/table1.R
\name{generateDayswithPAD}
\alias{generateDayswithPAD}
\title{Generates Days with PAD}
\usage{
generateDayswithPAD(webApiPrefix, webApiUseSsl, dcoList, cdmDb, scratchDb,
scratchTablePrefix)
}
\arguments{
\item{webApiPrefix}{The URL prefix of the WebAPI instance}
\item{webApiUseSsl}{Does the WebAPI use HTTPS?}
\item{dcoList}{The list of drug-comparator-outcomes}
\item{cdmDb}{The CDM database object constructed with \code{buildDatabase}}
\item{scratchDb}{The scratch database object constructed with \code{buildDatabase}}
\item{scratchTablePrefix}{The prefix of all scratch tables created}
}
\details{
Generates Days with PAD
}
\author{
Ajit Londhe
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{AL_Libs}
\alias{AK_Libs}
\alias{AL_Libs}
\alias{AR_Libs}
\alias{AZ_Libs}
\alias{CA_Libs}
\alias{CO_Libs}
\alias{CT_Libs}
\alias{DE_Libs}
\alias{FL_Libs}
\alias{GA_Libs}
\alias{HI_Libs}
\alias{IA_Libs}
\alias{ID_Libs}
\alias{IL_Libs}
\alias{IN_Libs}
\alias{KS_Libs}
\alias{KY_Libs}
\alias{LA_Libs}
\alias{MA_Libs}
\alias{MD_Libs}
\alias{ME_Libs}
\alias{MI_Libs}
\alias{MN_Libs}
\alias{MO_Libs}
\alias{MS_Libs}
\alias{MT_Libs}
\alias{NC_Libs}
\alias{ND_Libs}
\alias{NE_Libs}
\alias{NH_Libs}
\alias{NJ_Libs}
\alias{NM_Libs}
\alias{NV_Libs}
\alias{NY_Libs}
\alias{OH_Libs}
\alias{OK_Libs}
\alias{OR_Libs}
\alias{PA_Libs}
\alias{RI_Libs}
\alias{SC_Libs}
\alias{SD_Libs}
\alias{TN_Libs}
\alias{TX_Libs}
\alias{US_Libs}
\alias{UT_Libs}
\alias{VA_Libs}
\alias{VT_Libs}
\alias{WA_Libs}
\alias{WI_Libs}
\alias{WV_Libs}
\alias{WY_Libs}
\title{U.S. Public Library Data}
\format{A \code{data.frame} with 7 variables:
\tabular{rlll}{
[, 1] \tab Library Name \tab Temperature applied to the bond \tab \bold{Categoric}\cr
[, 2] \tab Address \tab Relative humidity applied to the bond \tab \bold{Categoric}\cr
[, 3] \tab City \tab Time the bond was under load \tab \bold{Categoric}\cr
[, 4] \tab State \tab Load at which the bond failed \tab \bold{Numeric}\cr
[, 5] \tab ZIP Code \tab Relative humidity applied to the bond \tab \bold{Categoric}\cr
[, 6] \tab Latitide \tab Time the bond was under load \tab \bold{Categoric}\cr
[, 7] \tab Longitude \tab Load at which the bond failed \tab \bold{Numeric}
}}
\source{
http://www.publiclibraries.com
Google Maps Geocode API
}
\usage{
AL_Libs
AK_Libs
AZ_Libs
AR_Libs
CA_Libs
CO_Libs
CT_Libs
DE_Libs
FL_Libs
GA_Libs
HI_Libs
ID_Libs
IL_Libs
IN_Libs
IA_Libs
KS_Libs
KY_Libs
LA_Libs
ME_Libs
MD_Libs
MA_Libs
MI_Libs
MN_Libs
MS_Libs
MO_Libs
MT_Libs
NE_Libs
NV_Libs
NH_Libs
NJ_Libs
NM_Libs
NY_Libs
NC_Libs
ND_Libs
OH_Libs
OK_Libs
OR_Libs
PA_Libs
RI_Libs
SC_Libs
SD_Libs
TN_Libs
TX_Libs
UT_Libs
VT_Libs
VA_Libs
WA_Libs
WV_Libs
WI_Libs
WY_Libs
US_Libs
}
\description{
Information about public libraries in each state
}
\keyword{datasets}
| /man/US_Libs.Rd | no_license | Auburngrads/publicLibs | R | false | true | 2,244 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{AL_Libs}
\alias{AK_Libs}
\alias{AL_Libs}
\alias{AR_Libs}
\alias{AZ_Libs}
\alias{CA_Libs}
\alias{CO_Libs}
\alias{CT_Libs}
\alias{DE_Libs}
\alias{FL_Libs}
\alias{GA_Libs}
\alias{HI_Libs}
\alias{IA_Libs}
\alias{ID_Libs}
\alias{IL_Libs}
\alias{IN_Libs}
\alias{KS_Libs}
\alias{KY_Libs}
\alias{LA_Libs}
\alias{MA_Libs}
\alias{MD_Libs}
\alias{ME_Libs}
\alias{MI_Libs}
\alias{MN_Libs}
\alias{MO_Libs}
\alias{MS_Libs}
\alias{MT_Libs}
\alias{NC_Libs}
\alias{ND_Libs}
\alias{NE_Libs}
\alias{NH_Libs}
\alias{NJ_Libs}
\alias{NM_Libs}
\alias{NV_Libs}
\alias{NY_Libs}
\alias{OH_Libs}
\alias{OK_Libs}
\alias{OR_Libs}
\alias{PA_Libs}
\alias{RI_Libs}
\alias{SC_Libs}
\alias{SD_Libs}
\alias{TN_Libs}
\alias{TX_Libs}
\alias{US_Libs}
\alias{UT_Libs}
\alias{VA_Libs}
\alias{VT_Libs}
\alias{WA_Libs}
\alias{WI_Libs}
\alias{WV_Libs}
\alias{WY_Libs}
\title{U.S. Public Library Data}
\format{A \code{data.frame} with 7 variables:
\tabular{rlll}{
[, 1] \tab Library Name \tab Temperature applied to the bond \tab \bold{Categoric}\cr
[, 2] \tab Address \tab Relative humidity applied to the bond \tab \bold{Categoric}\cr
[, 3] \tab City \tab Time the bond was under load \tab \bold{Categoric}\cr
[, 4] \tab State \tab Load at which the bond failed \tab \bold{Numeric}\cr
[, 5] \tab ZIP Code \tab Relative humidity applied to the bond \tab \bold{Categoric}\cr
[, 6] \tab Latitide \tab Time the bond was under load \tab \bold{Categoric}\cr
[, 7] \tab Longitude \tab Load at which the bond failed \tab \bold{Numeric}
}}
\source{
http://www.publiclibraries.com
Google Maps Geocode API
}
\usage{
AL_Libs
AK_Libs
AZ_Libs
AR_Libs
CA_Libs
CO_Libs
CT_Libs
DE_Libs
FL_Libs
GA_Libs
HI_Libs
ID_Libs
IL_Libs
IN_Libs
IA_Libs
KS_Libs
KY_Libs
LA_Libs
ME_Libs
MD_Libs
MA_Libs
MI_Libs
MN_Libs
MS_Libs
MO_Libs
MT_Libs
NE_Libs
NV_Libs
NH_Libs
NJ_Libs
NM_Libs
NY_Libs
NC_Libs
ND_Libs
OH_Libs
OK_Libs
OR_Libs
PA_Libs
RI_Libs
SC_Libs
SD_Libs
TN_Libs
TX_Libs
UT_Libs
VT_Libs
VA_Libs
WA_Libs
WV_Libs
WI_Libs
WY_Libs
US_Libs
}
\description{
Information about public libraries in each state
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/classroom_functions.R
\docType{package}
\name{classroom_googleAuthR}
\alias{classroom_googleAuthR}
\alias{classroom_googleAuthR-package}
\title{Google Classroom API
Manages classes, rosters, and invitations in Google Classroom.}
\description{
Auto-generated code by googleAuthR::gar_create_api_skeleton
at 2016-09-03 23:16:30
filename: /Users/mark/dev/R/autoGoogleAPI/googleclassroomv1.auto/R/classroom_functions.R
api_json: api_json
}
\details{
Authentication scopes used are:
\itemize{
\item https://www.googleapis.com/auth/classroom.course-work.readonly
\item https://www.googleapis.com/auth/classroom.courses
\item https://www.googleapis.com/auth/classroom.courses.readonly
\item https://www.googleapis.com/auth/classroom.coursework.me
\item https://www.googleapis.com/auth/classroom.coursework.me.readonly
\item https://www.googleapis.com/auth/classroom.coursework.students
\item https://www.googleapis.com/auth/classroom.coursework.students.readonly
\item https://www.googleapis.com/auth/classroom.profile.emails
\item https://www.googleapis.com/auth/classroom.profile.photos
\item https://www.googleapis.com/auth/classroom.rosters
\item https://www.googleapis.com/auth/classroom.rosters.readonly
\item https://www.googleapis.com/auth/classroom.student-submissions.me.readonly
\item https://www.googleapis.com/auth/classroom.student-submissions.students.readonly
}
}
| /googleclassroomv1.auto/man/classroom_googleAuthR.Rd | permissive | Phippsy/autoGoogleAPI | R | false | true | 1,455 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/classroom_functions.R
\docType{package}
\name{classroom_googleAuthR}
\alias{classroom_googleAuthR}
\alias{classroom_googleAuthR-package}
\title{Google Classroom API
Manages classes, rosters, and invitations in Google Classroom.}
\description{
Auto-generated code by googleAuthR::gar_create_api_skeleton
at 2016-09-03 23:16:30
filename: /Users/mark/dev/R/autoGoogleAPI/googleclassroomv1.auto/R/classroom_functions.R
api_json: api_json
}
\details{
Authentication scopes used are:
\itemize{
\item https://www.googleapis.com/auth/classroom.course-work.readonly
\item https://www.googleapis.com/auth/classroom.courses
\item https://www.googleapis.com/auth/classroom.courses.readonly
\item https://www.googleapis.com/auth/classroom.coursework.me
\item https://www.googleapis.com/auth/classroom.coursework.me.readonly
\item https://www.googleapis.com/auth/classroom.coursework.students
\item https://www.googleapis.com/auth/classroom.coursework.students.readonly
\item https://www.googleapis.com/auth/classroom.profile.emails
\item https://www.googleapis.com/auth/classroom.profile.photos
\item https://www.googleapis.com/auth/classroom.rosters
\item https://www.googleapis.com/auth/classroom.rosters.readonly
\item https://www.googleapis.com/auth/classroom.student-submissions.me.readonly
\item https://www.googleapis.com/auth/classroom.student-submissions.students.readonly
}
}
|
library(vegan)
abiotic_cca <- read.csv("abiotic_cca.csv", row.names=1)
View(abiotic_cca)
bio_cca <- read.csv("bio_cca.csv", row.names=1)
View(bio_cca)
somaindsp=apply(bio_cca, 2, sum)
semraras= bio_cca[, somaindsp>10]
bio_cca_log = decostand(semraras, "log")
fullmodel=cca(bio_cca_log ~ ., abiotic_cca)
smallmodel=cca(bio_cca_log ~ 1, abiotic_cca)
fit_model= ordistep(smallmodel, scope=formula(fullmodel))
fit_model
summary(fit_model)
vif.cca(fit_model)
fit_model
fit_model= cca(formula = bio_cca_log ~ C.N + Ba + Mud + P + Sand, data = abiotic_cca)
fit_model
summary(fit_model)
anova.cca(fit_model)
anova.cca(fit_model, by="terms")
anova.cca(fit_model, by="axis")
#graphic
par( mar=c(4.5,4.5,2,1) )
plot(fit_model, type="n", las=1, font=6, font.axis=6, font.lab=6, cex.lab=1.5, cex.lab=1.5)
text(fit_model, dis="cn", font=6, cex=1.5)
stems = colSums(bio_cca_log)
orditorp(fit_model, "sp", priority=stems, pch = "", font=4, cex=1)
par( mar=c(4.5,4.5,2,1) )
plot(fit_model, type="n", las=1, font=6, font.axis=6, font.lab=6, cex.lab=1.5, cex.lab=1.5)
text(fit_model, dis="cn", font=6, cex=1.5)
text(fit_model, dis="sites", cex=1, font=7)
| /biocoenosis_cca.r | no_license | leopregnolato/data_analysis_master | R | false | false | 1,140 | r | library(vegan)
abiotic_cca <- read.csv("abiotic_cca.csv", row.names=1)
View(abiotic_cca)
bio_cca <- read.csv("bio_cca.csv", row.names=1)
View(bio_cca)
somaindsp=apply(bio_cca, 2, sum)
semraras= bio_cca[, somaindsp>10]
bio_cca_log = decostand(semraras, "log")
fullmodel=cca(bio_cca_log ~ ., abiotic_cca)
smallmodel=cca(bio_cca_log ~ 1, abiotic_cca)
fit_model= ordistep(smallmodel, scope=formula(fullmodel))
fit_model
summary(fit_model)
vif.cca(fit_model)
fit_model
fit_model= cca(formula = bio_cca_log ~ C.N + Ba + Mud + P + Sand, data = abiotic_cca)
fit_model
summary(fit_model)
anova.cca(fit_model)
anova.cca(fit_model, by="terms")
anova.cca(fit_model, by="axis")
#graphic
par( mar=c(4.5,4.5,2,1) )
plot(fit_model, type="n", las=1, font=6, font.axis=6, font.lab=6, cex.lab=1.5, cex.lab=1.5)
text(fit_model, dis="cn", font=6, cex=1.5)
stems = colSums(bio_cca_log)
orditorp(fit_model, "sp", priority=stems, pch = "", font=4, cex=1)
par( mar=c(4.5,4.5,2,1) )
plot(fit_model, type="n", las=1, font=6, font.axis=6, font.lab=6, cex.lab=1.5, cex.lab=1.5)
text(fit_model, dis="cn", font=6, cex=1.5)
text(fit_model, dis="sites", cex=1, font=7)
|
head(temp)
library("ggplot2")
temp$threshold = "non"
temp$threshold[temp$pvalue < 0.05 & temp$log2FoldChange>1 ] = "up"
temp$threshold[temp$pvalue<0.05 & temp$log2FoldChange< -1 ] = "down"
##install.packages("ggthemes")
library(ggthemes)
p<-ggplot(temp,aes(x=temp$log2FoldChange,y=-log10(temp$pvalue),colour=threshold))+xlab("log2 Fold Change")+ylab("-log10P-Value")+
geom_point(size=4,alpha=0.6)+
scale_color_manual(values =c("#0072B5","grey","#BC3C28")) +
geom_hline(aes(yintercept=-log10(0.05)),colour="grey",size=1.2 ,linetype=2) + #????ˮƽ??????
geom_vline(aes(xintercept=1), colour="grey",size=1.2 ,linetype=2)+ #???Ӵ?ֱ??????
geom_vline(aes(xintercept=-1), colour="grey",size=1.2 ,linetype=2)+ #???Ӵ?ֱ??????
theme_few()+theme(legend.title = element_blank()) #ȥ????????ͼע??ǩ
p
| /vocalno plot code.R | no_license | mweixq/OPOR | R | false | false | 840 | r |
head(temp)
library("ggplot2")
temp$threshold = "non"
temp$threshold[temp$pvalue < 0.05 & temp$log2FoldChange>1 ] = "up"
temp$threshold[temp$pvalue<0.05 & temp$log2FoldChange< -1 ] = "down"
##install.packages("ggthemes")
library(ggthemes)
p<-ggplot(temp,aes(x=temp$log2FoldChange,y=-log10(temp$pvalue),colour=threshold))+xlab("log2 Fold Change")+ylab("-log10P-Value")+
geom_point(size=4,alpha=0.6)+
scale_color_manual(values =c("#0072B5","grey","#BC3C28")) +
geom_hline(aes(yintercept=-log10(0.05)),colour="grey",size=1.2 ,linetype=2) + #????ˮƽ??????
geom_vline(aes(xintercept=1), colour="grey",size=1.2 ,linetype=2)+ #???Ӵ?ֱ??????
geom_vline(aes(xintercept=-1), colour="grey",size=1.2 ,linetype=2)+ #???Ӵ?ֱ??????
theme_few()+theme(legend.title = element_blank()) #ȥ????????ͼע??ǩ
p
|
#' Tidy up pathway by combining edges inside of edge_mapping_info
#' @description Combine edges that share nodes and have other commonalities
#' @export
#' @param edges The edge dataframe
#' @param edge_id The numeric value for the edge_id
#' @param by_significance A logical indicator; option if data is added
#' @param by_number A logical indicator; gives rough estimate of edge amount
#' @return A data frame that has had the given edge condensed for viewing
#' @examples \dontrun{
#' if (tidy_edge == TRUE) {
#' edge_IDs <- seq(min(expanded_edges$edgeID), max(expanded_edges$edgeID))
#' for (i in edge_IDs){
#' if(data_added == TRUE){
#' expanded_edges <- tidy_edge(edges = expanded_edges,
#' edge_id = edge_IDs[i],
#' data_added = TRUE,
#' by_significance = TRUE)
#' }
#' if(data_added == FALSE){
#' expanded_edges <- tidy_edge(edges = expanded_edges,
#' edge_id = edge_IDs[i],
#' data_added = FALSE)
#' }
#' }
#'
#'}
#'}
tidy_edge <- function(edges, edge_id, data_added = TRUE,
by_significance = FALSE, by_number = TRUE){
edge <- edges[edges$edgeID == edge_id,]
edges_1 <- edges[edges$edgeID != edge_id,]
if (!data_added){
entry1accessions <- paste(unique(edge$entry1accession), collapse =",")
entry2accessions <- paste(unique(edge$entry2accession), collapse =",")
entry1symbols <- paste(unique(edge$entry1symbol), collapse = ",")
entry2symbols <- paste(unique(edge$entry2symbol), collapse = ",")
tooltip <- paste(entry1symbols, edge$value[1], entry2symbols)
edge$entry1symbol[1] <- entry1symbols
edge$entry2symbol[1] <- entry2symbols
edge$entry1accession[1] <- entry1accessions
edge$entry2accession[1] <- entry2accessions
edge$tooltip[1] <- tooltip
reduced_edge <- edge[1,]
}
if (data_added){
if (sum(edge$has_data) == 0){
entry1accessions <- paste(unique(edge$entry1accession), collapse =",")
entry2accessions <- paste(unique(edge$entry2accession), collapse =",")
entry1symbols <- paste(unique(edge$entry1symbol), collapse = ",")
entry2symbols <- paste(unique(edge$entry2symbol), collapse = ",")
tooltip <- paste(entry1symbols, edge$value[1], entry2symbols)
edge$entry1symbol[1] <- entry1symbols
edge$entry2symbol[1] <- entry2symbols
edge$entry1accession[1] <- entry1accessions
edge$entry2accession[1] <- entry2accessions
edge$tooltip[1] <- tooltip
reduced_edge <- edge[1,]
}
if (sum(edge$has_data) != 0 ){
reduced_edge <- edge[edge$has_data != 0,]
if(by_significance == TRUE){
for (i in 1:length(unique(reduced_edge$color))){
rp <- reduced_edge[reduced_edge$color == unique(reduced_edge$color)[i],]
reduced_edge <- reduced_edge[reduced_edge$color != unique(reduced_edge$color)[i],]
entry1accessions <- paste(unique(rp$entry1accession), collapse =",")
entry2accessions <- paste(unique(rp$entry2accession), collapse =",")
entry1symbols <- paste(unique(rp$entry1symbol), collapse = ",")
entry2symbols <- paste(unique(rp$entry2symbol), collapse = ",")
tooltip <- paste(unique(rp$tooltip), collapse = " , ")
average_summary_score <- mean(rp$summary_score)
rp$entry1symbol[1] <- entry1symbols
rp$entry2symbol[1] <- entry2symbols
rp$entry1accession[1] <- entry1accessions
rp$entry2accession[1] <- entry2accessions
rp$tooltip[1] <- tooltip
rp$summary_score[1] <- average_summary_score
if(by_number == TRUE) {
col <- rp$color[1]
thresh <- min(5, (nrow(rp)-1))
value <- 1-(0.1*thresh)
col1 <- round(value*(col2rgb(col)[1]))
col2 <- round(value*(col2rgb(col)[2]))
col3 <- round(value*(col2rgb(col)[3]))
rp$color[1] <- rgb(col1, col2, col3, maxColorValue = 255)
}
reduced_edge <- rbind(rp[1,], reduced_edge)
}
}
}
}
edges <- rbind (edges_1, reduced_edge)
return(edges)
} | /R/tidy_edge.R | no_license | NicholasClark/KEGGlincs | R | false | false | 4,262 | r | #' Tidy up pathway by combining edges inside of edge_mapping_info
#' @description Combine edges that share nodes and have other commonalities
#' @export
#' @param edges The edge dataframe
#' @param edge_id The numeric value for the edge_id
#' @param by_significance A logical indicator; option if data is added
#' @param by_number A logical indicator; gives rough estimate of edge amount
#' @return A data frame that has had the given edge condensed for viewing
#' @examples \dontrun{
#' if (tidy_edge == TRUE) {
#' edge_IDs <- seq(min(expanded_edges$edgeID), max(expanded_edges$edgeID))
#' for (i in edge_IDs){
#' if(data_added == TRUE){
#' expanded_edges <- tidy_edge(edges = expanded_edges,
#' edge_id = edge_IDs[i],
#' data_added = TRUE,
#' by_significance = TRUE)
#' }
#' if(data_added == FALSE){
#' expanded_edges <- tidy_edge(edges = expanded_edges,
#' edge_id = edge_IDs[i],
#' data_added = FALSE)
#' }
#' }
#'
#'}
#'}
tidy_edge <- function(edges, edge_id, data_added = TRUE,
by_significance = FALSE, by_number = TRUE){
edge <- edges[edges$edgeID == edge_id,]
edges_1 <- edges[edges$edgeID != edge_id,]
if (!data_added){
entry1accessions <- paste(unique(edge$entry1accession), collapse =",")
entry2accessions <- paste(unique(edge$entry2accession), collapse =",")
entry1symbols <- paste(unique(edge$entry1symbol), collapse = ",")
entry2symbols <- paste(unique(edge$entry2symbol), collapse = ",")
tooltip <- paste(entry1symbols, edge$value[1], entry2symbols)
edge$entry1symbol[1] <- entry1symbols
edge$entry2symbol[1] <- entry2symbols
edge$entry1accession[1] <- entry1accessions
edge$entry2accession[1] <- entry2accessions
edge$tooltip[1] <- tooltip
reduced_edge <- edge[1,]
}
if (data_added){
if (sum(edge$has_data) == 0){
entry1accessions <- paste(unique(edge$entry1accession), collapse =",")
entry2accessions <- paste(unique(edge$entry2accession), collapse =",")
entry1symbols <- paste(unique(edge$entry1symbol), collapse = ",")
entry2symbols <- paste(unique(edge$entry2symbol), collapse = ",")
tooltip <- paste(entry1symbols, edge$value[1], entry2symbols)
edge$entry1symbol[1] <- entry1symbols
edge$entry2symbol[1] <- entry2symbols
edge$entry1accession[1] <- entry1accessions
edge$entry2accession[1] <- entry2accessions
edge$tooltip[1] <- tooltip
reduced_edge <- edge[1,]
}
if (sum(edge$has_data) != 0 ){
reduced_edge <- edge[edge$has_data != 0,]
if(by_significance == TRUE){
for (i in 1:length(unique(reduced_edge$color))){
rp <- reduced_edge[reduced_edge$color == unique(reduced_edge$color)[i],]
reduced_edge <- reduced_edge[reduced_edge$color != unique(reduced_edge$color)[i],]
entry1accessions <- paste(unique(rp$entry1accession), collapse =",")
entry2accessions <- paste(unique(rp$entry2accession), collapse =",")
entry1symbols <- paste(unique(rp$entry1symbol), collapse = ",")
entry2symbols <- paste(unique(rp$entry2symbol), collapse = ",")
tooltip <- paste(unique(rp$tooltip), collapse = " , ")
average_summary_score <- mean(rp$summary_score)
rp$entry1symbol[1] <- entry1symbols
rp$entry2symbol[1] <- entry2symbols
rp$entry1accession[1] <- entry1accessions
rp$entry2accession[1] <- entry2accessions
rp$tooltip[1] <- tooltip
rp$summary_score[1] <- average_summary_score
if(by_number == TRUE) {
col <- rp$color[1]
thresh <- min(5, (nrow(rp)-1))
value <- 1-(0.1*thresh)
col1 <- round(value*(col2rgb(col)[1]))
col2 <- round(value*(col2rgb(col)[2]))
col3 <- round(value*(col2rgb(col)[3]))
rp$color[1] <- rgb(col1, col2, col3, maxColorValue = 255)
}
reduced_edge <- rbind(rp[1,], reduced_edge)
}
}
}
}
edges <- rbind (edges_1, reduced_edge)
return(edges)
} |
#' Take a screenshot of a Shiny app
#'
#' \code{appshot} performs a \code{\link{webshot}} using two different methods
#' depending upon the object provided. If a 'character' is provided (pointing to
#' an app.R file or app directory) an isolated background R process is launched
#' to run the Shiny application. The current R process then captures the
#' \code{\link{webshot}}. When a Shiny application object is supplied to
#' \code{appshot}, it is reversed: the Shiny application runs in the current R
#' process and an isolated background R process is launched to capture a
#' \code{\link{webshot}}. The reason it is reversed in the second case has to do
#' with scoping: although it would be preferable to run the Shiny application in
#' a background process and call \code{webshot} from the current process, with
#' Shiny application objects, there are potential scoping errors when run this
#' way.
#'
#' @inheritParams webshot
#' @param app A Shiny app object, or a string naming an app directory.
#' @param port Port that Shiny will listen on.
#' @param envvars A named character vector or named list of environment
#' variables and values to set for the Shiny app's R process. These will be
#' unset after the process exits. This can be used to pass configuration
#' information to a Shiny app.
#' @param webshot_timeout The maximum number of seconds the phantom application
#' is allowed to run before killing the process. If a delay argument is
#' supplied (in \code{...}), the delay value is added to the timeout value.
#'
#' @param ... Other arguments to pass on to \code{\link{webshot}}.
#'
#' @rdname appshot
#' @examples
#' if (interactive()) {
#' appdir <- system.file("examples", "01_hello", package="shiny")
#'
#' # With a Shiny directory
#' appshot(appdir, "01_hello.png")
#'
#' # With a Shiny App object
#' shinyapp <- shiny::shinyAppDir(appdir)
#' appshot(shinyapp, "01_hello_app.png")
#' }
#'
#' @export
appshot <- function(app, file = "webshot.png", ...,
port = getOption("shiny.port"), envvars = NULL) {
UseMethod("appshot")
}
#' @rdname appshot
#' @export
appshot.character <- function(
app,
file = "webshot.png", ...,
port = getOption("shiny.port"),
envvars = NULL
) {
port <- available_port(port)
url <- shiny_url(port)
# Run app in background with envvars
p <- r_background_process(
function(...) {
shiny::runApp(...)
},
args = list(
appDir = app,
port = port,
display.mode = "normal",
launch.browser = FALSE
),
envvars = envvars
)
on.exit({
if (p$is_alive()) {
p$interrupt()
p$wait(2)
if (p$is_alive()) {
p$kill()
}
}
})
# Wait for app to start
wait_until_server_exists(url, p)
# Get screenshot
fileout <- webshot(url, file = file, ...)
invisible(fileout)
}
#' @rdname appshot
#' @export
appshot.shiny.appobj <- function(
app,
file = "webshot.png", ...,
port = getOption("shiny.port"),
envvars = NULL,
webshot_timeout = 60
) {
port <- available_port(port)
url <- shiny_url(port)
args <- list(
url = url,
file = file,
...,
timeout = webshot_app_timeout()
)
p <- r_background_process(
function(url, file, ..., timeout) {
# Wait for app to start
# Avoid ::: for internal function.
wait <- utils::getFromNamespace("wait_until_server_exists", "webshot2")
wait(url, timeout = timeout)
webshot2::webshot(url = url, file = file, ...)
},
args,
envvars = envvars
)
on.exit({
p$kill()
})
# add a delay to the webshot_timeout if it exists
if(!is.null(args$delay)) {
webshot_timeout <- webshot_timeout + args$delay
}
start_time <- as.numeric(Sys.time())
# Add a shiny app observer which checks every 200ms to see if the background r session is alive
shiny::observe({
# check the r session rather than the file to avoid race cases or random issues
if (p$is_alive()) {
if ((as.numeric(Sys.time()) - start_time) <= webshot_timeout) {
# try again later
shiny::invalidateLater(200)
} else {
# timeout has occured. close the app and R session
message("webshot timed out")
p$kill()
shiny::stopApp()
}
} else {
# r_bg session has stopped, close the app
shiny::stopApp()
}
return()
})
# run the app
shiny::runApp(app, port = port, display.mode = "normal", launch.browser = FALSE)
# return webshot2::webshot file value
invisible(p$get_result()) # safe to call as the r_bg must have ended
}
| /R/appshot.R | no_license | AnaRDias/AnaRDias | R | false | false | 4,596 | r | #' Take a screenshot of a Shiny app
#'
#' \code{appshot} performs a \code{\link{webshot}} using two different methods
#' depending upon the object provided. If a 'character' is provided (pointing to
#' an app.R file or app directory) an isolated background R process is launched
#' to run the Shiny application. The current R process then captures the
#' \code{\link{webshot}}. When a Shiny application object is supplied to
#' \code{appshot}, it is reversed: the Shiny application runs in the current R
#' process and an isolated background R process is launched to capture a
#' \code{\link{webshot}}. The reason it is reversed in the second case has to do
#' with scoping: although it would be preferable to run the Shiny application in
#' a background process and call \code{webshot} from the current process, with
#' Shiny application objects, there are potential scoping errors when run this
#' way.
#'
#' @inheritParams webshot
#' @param app A Shiny app object, or a string naming an app directory.
#' @param port Port that Shiny will listen on.
#' @param envvars A named character vector or named list of environment
#' variables and values to set for the Shiny app's R process. These will be
#' unset after the process exits. This can be used to pass configuration
#' information to a Shiny app.
#' @param webshot_timeout The maximum number of seconds the phantom application
#' is allowed to run before killing the process. If a delay argument is
#' supplied (in \code{...}), the delay value is added to the timeout value.
#'
#' @param ... Other arguments to pass on to \code{\link{webshot}}.
#'
#' @rdname appshot
#' @examples
#' if (interactive()) {
#' appdir <- system.file("examples", "01_hello", package="shiny")
#'
#' # With a Shiny directory
#' appshot(appdir, "01_hello.png")
#'
#' # With a Shiny App object
#' shinyapp <- shiny::shinyAppDir(appdir)
#' appshot(shinyapp, "01_hello_app.png")
#' }
#'
#' @export
appshot <- function(app, file = "webshot.png", ...,
port = getOption("shiny.port"), envvars = NULL) {
UseMethod("appshot")
}
#' @rdname appshot
#' @export
appshot.character <- function(
app,
file = "webshot.png", ...,
port = getOption("shiny.port"),
envvars = NULL
) {
port <- available_port(port)
url <- shiny_url(port)
# Run app in background with envvars
p <- r_background_process(
function(...) {
shiny::runApp(...)
},
args = list(
appDir = app,
port = port,
display.mode = "normal",
launch.browser = FALSE
),
envvars = envvars
)
on.exit({
if (p$is_alive()) {
p$interrupt()
p$wait(2)
if (p$is_alive()) {
p$kill()
}
}
})
# Wait for app to start
wait_until_server_exists(url, p)
# Get screenshot
fileout <- webshot(url, file = file, ...)
invisible(fileout)
}
#' @rdname appshot
#' @export
appshot.shiny.appobj <- function(
app,
file = "webshot.png", ...,
port = getOption("shiny.port"),
envvars = NULL,
webshot_timeout = 60
) {
port <- available_port(port)
url <- shiny_url(port)
args <- list(
url = url,
file = file,
...,
timeout = webshot_app_timeout()
)
p <- r_background_process(
function(url, file, ..., timeout) {
# Wait for app to start
# Avoid ::: for internal function.
wait <- utils::getFromNamespace("wait_until_server_exists", "webshot2")
wait(url, timeout = timeout)
webshot2::webshot(url = url, file = file, ...)
},
args,
envvars = envvars
)
on.exit({
p$kill()
})
# add a delay to the webshot_timeout if it exists
if(!is.null(args$delay)) {
webshot_timeout <- webshot_timeout + args$delay
}
start_time <- as.numeric(Sys.time())
# Add a shiny app observer which checks every 200ms to see if the background r session is alive
shiny::observe({
# check the r session rather than the file to avoid race cases or random issues
if (p$is_alive()) {
if ((as.numeric(Sys.time()) - start_time) <= webshot_timeout) {
# try again later
shiny::invalidateLater(200)
} else {
# timeout has occured. close the app and R session
message("webshot timed out")
p$kill()
shiny::stopApp()
}
} else {
# r_bg session has stopped, close the app
shiny::stopApp()
}
return()
})
# run the app
shiny::runApp(app, port = port, display.mode = "normal", launch.browser = FALSE)
# return webshot2::webshot file value
invisible(p$get_result()) # safe to call as the r_bg must have ended
}
|
library(data.table)
library(dplyr)
#load data
NEI <- readRDS("./summarySCC_PM25.rds")
SCC <- readRDS("./Source_Classification_Code.rds")
# select the sources related to motor vehicles
SCC_vehicles <- SCC[grep("Mobile.*Vehicles", SCC$EI.Sector),]
SCC_vehicles_ <- unique(SCC_vehicles$SCC)
# select the sources from NEI dataset
NEI_vehicles <- subset(NEI, SCC %in% SCC_vehicles_)
NEI_vehicles <- subset(NEI_vehicles, fips == "24510" | fips == "06037")
NEI_vehicles$fips[NEI_vehicles$fips=="24510"] <- "Baltimore"
NEI_vehicles$fips[NEI_vehicles$fips=="06037"] <- "Los Angeles"
# join the descriptions and codes
NEI_vehicles <- merge(x = NEI_vehicles,
y = SCC_vehicles[,c("SCC", "SCC.Level.Two","SCC.Level.Three")],
by = "SCC")
# group by type and year
NEI_vehicles <- group_by(NEI_vehicles, year,SCC.Level.Two, fips)
# sum the total emissions by type and year
NEI_vehicles <- summarize(Total_year = sum(Emissions),
NEI_vehicles)
# group by year
NEI_vehicles_tot <- group_by(NEI_vehicles,year, fips)
# sum emissions by year and add column level
NEI_vehicles_tot <- summarize(Total_year = sum(Total_year),
NEI_vehicles_tot)%>% mutate(SCC.Level.Two = "TOTAL")
NEI_vehicles <- bind_rows(NEI_vehicles, NEI_vehicles_tot)
#plot 6
png('plot6.png', width = 480, height = 480)
ggplot(NEI_vehicles, aes(x = as.factor(year), y =Total_year, fill = SCC.Level.Two))+
geom_bar(stat = "identity")+
ylab(expression("Total PM"[2.5]*" emissions"))+
xlab("Year")+
guides(fill = FALSE)+
scale_fill_manual(values =c("#8dd3c7", "#fdb462", "#bebada", "#fb8072"))+
ggtitle("Total emissions by year and motor vehicle sources \nin Baltimore and Los Angeles")+
facet_grid(fips~SCC.Level.Two)+
theme_light()
dev.off()
| /Plot6.R | no_license | Soco-Roman/Exploratoy_data_analysis_project_2 | R | false | false | 1,848 | r | library(data.table)
library(dplyr)
#load data
NEI <- readRDS("./summarySCC_PM25.rds")
SCC <- readRDS("./Source_Classification_Code.rds")
# select the sources related to motor vehicles
SCC_vehicles <- SCC[grep("Mobile.*Vehicles", SCC$EI.Sector),]
SCC_vehicles_ <- unique(SCC_vehicles$SCC)
# select the sources from NEI dataset
NEI_vehicles <- subset(NEI, SCC %in% SCC_vehicles_)
NEI_vehicles <- subset(NEI_vehicles, fips == "24510" | fips == "06037")
NEI_vehicles$fips[NEI_vehicles$fips=="24510"] <- "Baltimore"
NEI_vehicles$fips[NEI_vehicles$fips=="06037"] <- "Los Angeles"
# join the descriptions and codes
NEI_vehicles <- merge(x = NEI_vehicles,
y = SCC_vehicles[,c("SCC", "SCC.Level.Two","SCC.Level.Three")],
by = "SCC")
# group by type and year
NEI_vehicles <- group_by(NEI_vehicles, year,SCC.Level.Two, fips)
# sum the total emissions by type and year
NEI_vehicles <- summarize(Total_year = sum(Emissions),
NEI_vehicles)
# group by year
NEI_vehicles_tot <- group_by(NEI_vehicles,year, fips)
# sum emissions by year and add column level
NEI_vehicles_tot <- summarize(Total_year = sum(Total_year),
NEI_vehicles_tot)%>% mutate(SCC.Level.Two = "TOTAL")
NEI_vehicles <- bind_rows(NEI_vehicles, NEI_vehicles_tot)
#plot 6
png('plot6.png', width = 480, height = 480)
ggplot(NEI_vehicles, aes(x = as.factor(year), y =Total_year, fill = SCC.Level.Two))+
geom_bar(stat = "identity")+
ylab(expression("Total PM"[2.5]*" emissions"))+
xlab("Year")+
guides(fill = FALSE)+
scale_fill_manual(values =c("#8dd3c7", "#fdb462", "#bebada", "#fb8072"))+
ggtitle("Total emissions by year and motor vehicle sources \nin Baltimore and Los Angeles")+
facet_grid(fips~SCC.Level.Two)+
theme_light()
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/drug_transporter_node_parser.R
\name{parse_drug_transporters_links}
\alias{parse_drug_transporters_links}
\title{Extracts the drug transporters links element and return data as data frame.}
\usage{
parse_drug_transporters_links(save_table = FALSE, save_csv = FALSE,
csv_path = ".", override_csv = FALSE)
}
\arguments{
\item{save_table}{boolean, save table in database if true.}
\item{save_csv}{boolean, save csv version of parsed dataframe if true}
\item{csv_path}{location to save csv files into it, default is current location, save_csv must be true}
\item{override_csv}{override existing csv, if any, in case it is true in the new parse operation}
}
\value{
drug transporters links node attributes date frame
}
\description{
\code{parse_drug_transporters_links} returns data frame of drug transporters_ inks elements.
}
\details{
This functions extracts the transporters links element of drug node in drug bank
xml database with the option to save it in a predefined database via
\code{\link{open_db}} method. It takes one single optional argument to
save the returned dataframe in the database.
It must be called after \code{\link{get_xml_db_rows}} function like
any other parser function.
If \code{\link{get_xml_db_rows}} is called before for any reason, so
no need to call it again before calling this function.
}
\examples{
\donttest{
# return only the parsed dataframe
parse_drug_transporters_links()
# save in database and return parsed dataframe
parse_drug_transporters_links(save_table = TRUE)
# save parsed dataframe as csv if it does not exist in current
# location and return parsed dataframe.
# If the csv exist before read it and return its data.
parse_drug_transporters_links(save_csv = TRUE)
# save in database, save parsed dataframe as csv if it does not exist
# in current location and return parsed dataframe.
# If the csv exist before read it and return its data.
parse_drug_transporters_links(ssave_table = TRUE, save_csv = TRUE)
# save parsed dataframe as csv if it does not exist in given
# location and return parsed dataframe.
# If the csv exist before read it and return its data.
parse_drug_transporters_links(save_csv = TRUE, csv_path = TRUE)
# save parsed dataframe as csv if it does not exist in current
# location and return parsed dataframe.
# If the csv exist override it and return it.
parse_drug_transporters_links(save_csv = TRUE, csv_path = TRUE, override = TRUE)
}
}
| /man/parse_drug_transporters_links.Rd | no_license | Sparklingredstar/dbparser | R | false | true | 2,495 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/drug_transporter_node_parser.R
\name{parse_drug_transporters_links}
\alias{parse_drug_transporters_links}
\title{Extracts the drug transporters links element and return data as data frame.}
\usage{
parse_drug_transporters_links(save_table = FALSE, save_csv = FALSE,
csv_path = ".", override_csv = FALSE)
}
\arguments{
\item{save_table}{boolean, save table in database if true.}
\item{save_csv}{boolean, save csv version of parsed dataframe if true}
\item{csv_path}{location to save csv files into it, default is current location, save_csv must be true}
\item{override_csv}{override existing csv, if any, in case it is true in the new parse operation}
}
\value{
drug transporters links node attributes date frame
}
\description{
\code{parse_drug_transporters_links} returns data frame of drug transporters_ inks elements.
}
\details{
This functions extracts the transporters links element of drug node in drug bank
xml database with the option to save it in a predefined database via
\code{\link{open_db}} method. It takes one single optional argument to
save the returned dataframe in the database.
It must be called after \code{\link{get_xml_db_rows}} function like
any other parser function.
If \code{\link{get_xml_db_rows}} is called before for any reason, so
no need to call it again before calling this function.
}
\examples{
\donttest{
# return only the parsed dataframe
parse_drug_transporters_links()
# save in database and return parsed dataframe
parse_drug_transporters_links(save_table = TRUE)
# save parsed dataframe as csv if it does not exist in current
# location and return parsed dataframe.
# If the csv exist before read it and return its data.
parse_drug_transporters_links(save_csv = TRUE)
# save in database, save parsed dataframe as csv if it does not exist
# in current location and return parsed dataframe.
# If the csv exist before read it and return its data.
parse_drug_transporters_links(ssave_table = TRUE, save_csv = TRUE)
# save parsed dataframe as csv if it does not exist in given
# location and return parsed dataframe.
# If the csv exist before read it and return its data.
parse_drug_transporters_links(save_csv = TRUE, csv_path = TRUE)
# save parsed dataframe as csv if it does not exist in current
# location and return parsed dataframe.
# If the csv exist override it and return it.
parse_drug_transporters_links(save_csv = TRUE, csv_path = TRUE, override = TRUE)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_probabilities.R
\name{plot_probabilities}
\alias{plot_probabilities}
\title{Plot Probabilities}
\usage{
plot_probabilities(input_dataframe, year_int_col_index, prob_col_index,
track_name_col_index, true_song_year_index)
}
\arguments{
\item{input_dataframe}{the input song has the following attributes: "trackname", "year_int", "prob", "true_song_year".
this dataframe is generated from the function get_probability_of_billboard of the musictasteR package}
\item{year_int_col_index}{index for year_int col in the dataframe}
\item{prob_col_index}{index for prob col in the dataframe}
\item{track_name_col_index}{index for track_name col in the dataframe}
\item{true_song_year_index}{index for true song year boolean column}
}
\value{
return a ggplot
}
\description{
The plot probabilities plots the probability of a song being in the top charts by year.
}
\examples{
\dontrun{
plot_probabilities(logit_input, 3, 2, 4, 5)
}
}
| /musictasteR/man/plot_probabilities.Rd | permissive | meakulpa/rubasic | R | false | true | 1,013 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_probabilities.R
\name{plot_probabilities}
\alias{plot_probabilities}
\title{Plot Probabilities}
\usage{
plot_probabilities(input_dataframe, year_int_col_index, prob_col_index,
track_name_col_index, true_song_year_index)
}
\arguments{
\item{input_dataframe}{the input song has the following attributes: "trackname", "year_int", "prob", "true_song_year".
this dataframe is generated from the function get_probability_of_billboard of the musictasteR package}
\item{year_int_col_index}{index for year_int col in the dataframe}
\item{prob_col_index}{index for prob col in the dataframe}
\item{track_name_col_index}{index for track_name col in the dataframe}
\item{true_song_year_index}{index for true song year boolean column}
}
\value{
return a ggplot
}
\description{
The plot probabilities plots the probability of a song being in the top charts by year.
}
\examples{
\dontrun{
plot_probabilities(logit_input, 3, 2, 4, 5)
}
}
|
#' @title Run statistical tests on voxel-based heatmaps
#' @description Generate statistical outputs from voxel-based heatmaps created
#' by [voxelize()]. Statistical output resembles input heatmaps, with green/red
#' colors specifying locations where voxels show statistically significant
#' differences between groups.
#' @param input (required) Specify path to input .RData file containing the
#' group matrix.
#' @param group_matrices (required) Specify object name for matrix of groups.
#' @param groups (required, default = c()) Vector of 2 groups to be compared.
#' @param ML_bounds (optional, default = c(-5, 5)) Bounds for ML axis.
#' @param DV_bounds (optional, default = c(-8, 1)) Bounds for DV axis.
#' @param p_value (optional, default = 0.05) P-value threshold for significance.
#' @param output (required) Specify path to output folder to save heatmap and .RData output.
#' @return Returns *stat_matrix* an object containing a matrix with statistical test
#' output (-1 for significantly lower, 0 for no significance, 1 for significantly higher).
#' @export
#' @md
voxel_stats <- function(input, group_matrices, groups = c(), ML_bounds = c(-5, 5), DV_bounds = c(-8, 1), p_value = 0.05, output){
load(input)
m <- which(names(group_matrices) %in% groups[1])
n <- which(names(group_matrices) %in% groups[2])
stat_matrix <- group_matrices[[1]][[1]]
for(i in 1:length(stat_matrix)){
stat_matrix[i] <- 0
}
for(y in 1:nrow(stat_matrix)){
for(x in 1:ncol(stat_matrix)){
group_1_values <- c()
group_2_values <- c()
for(z in 1:(length(group_matrices[[m]]) - 2)){
group_1_values <- c(group_1_values, group_matrices[[m]][[z]][y, x])
}
for(z in 1:(length(group_matrices[[n]]) - 2)){
group_2_values <- c(group_2_values, group_matrices[[n]][[z]][y, x])
}
stat_output <- t.test(group_1_values, group_2_values, conf.level = 0.95)
heat_value <- 0
if(!is.na(stat_output$p.value)){
if(stat_output$p.value < p_value){
if(stat_output$statistic[[1]] > 0){ # if first group is higher
heat_value <- 1
}
if(stat_output$statistic[[1]] < 0){ # if second group is higher
heat_value <- -1
}
}
}
stat_matrix[y,x] <- heat_value
}
}
xlabels <- round(c(seq(ML_bounds[1], ML_bounds[2], by = 1)), digits = 1)
ylabels <- round(c(seq(DV_bounds[1], DV_bounds[2], by = 1)), digits = 1)
colorBreaks <- seq(-1, 1, length.out = 6)
heatmap_colorkey <- list(at = colorBreaks, labels = list(at = colorBreaks, labels = round(colorBreaks, 1)))
for(z in 1:length(heatmap_colorkey$labels$labels)){
if((z != 1) & (z != length(heatmap_colorkey$labels$labels))){
heatmap_colorkey$labels$labels[z] <- ""
}
}
heatmap_plot <- lattice::levelplot(t(apply(stat_matrix, 2, rev)),
col.regions = colorRampPalette(c("green", "white", "red"), space = "rgb"),
scales = list(
y = list(
at = seq(0, nrow(stat_matrix) - nrow(stat_matrix)/(DV_bounds[2] - DV_bounds[1]),
nrow(stat_matrix)/(DV_bounds[2] - DV_bounds[1])), labels = ylabels),
x = list(
at = seq(0, ncol(stat_matrix) - ncol(stat_matrix)/(ML_bounds[2] - ML_bounds[1]),
ncol(stat_matrix)/(ML_bounds[2] - ML_bounds[1])), labels = xlabels),
tck = c(1,0)),
main = list(paste0(groups[1], " vs ", groups[2], ", threshold ", p_value)),
xlab = "Medial-Lateral (mm)",
ylab = "Dorsal-Ventral (mm)",
pretty = FALSE,
at = colorBreaks,
colorkey = heatmap_colorkey)
quartz() #Get plot in its own window
print(heatmap_plot) #print the plot in the window
# this code is to properly label the legend
lattice::trellis.focus("legend", side="right", clipp.off=TRUE, highlight=FALSE) #legend parameters
grid.text(expression( ), 0.25, 0, hjust = 0.5, vjust = 1.5) #legend parameters and name
lattice::trellis.unfocus()
# save the plot
savepath <- paste0(output, "/", groups[1], " vs ", groups[2], ", threshold ", p_value, ".png")
curwin <- dev.cur()
savePlot(filename = savepath, type = "png", device = curwin)
dev.off()
save(stat_matrix, file = paste0(output, "/stat_matrix_", groups[1], " vs ", groups[2], ", threshold ", p_value, ".RData"))
return(stat_matrix)
}
| /R/voxel_stats.R | no_license | sgoldenlab/SMART2 | R | false | false | 4,814 | r | #' @title Run statistical tests on voxel-based heatmaps
#' @description Generate statistical outputs from voxel-based heatmaps created
#' by [voxelize()]. Statistical output resembles input heatmaps, with green/red
#' colors specifying locations where voxels show statistically significant
#' differences between groups.
#' @param input (required) Specify path to input .RData file containing the
#' group matrix.
#' @param group_matrices (required) Specify object name for matrix of groups.
#' @param groups (required, default = c()) Vector of 2 groups to be compared.
#' @param ML_bounds (optional, default = c(-5, 5)) Bounds for ML axis.
#' @param DV_bounds (optional, default = c(-8, 1)) Bounds for DV axis.
#' @param p_value (optional, default = 0.05) P-value threshold for significance.
#' @param output (required) Specify path to output folder to save heatmap and .RData output.
#' @return Returns *stat_matrix* an object containing a matrix with statistical test
#' output (-1 for significantly lower, 0 for no significance, 1 for significantly higher).
#' @export
#' @md
voxel_stats <- function(input, group_matrices, groups = c(), ML_bounds = c(-5, 5), DV_bounds = c(-8, 1), p_value = 0.05, output){
load(input)
m <- which(names(group_matrices) %in% groups[1])
n <- which(names(group_matrices) %in% groups[2])
stat_matrix <- group_matrices[[1]][[1]]
for(i in 1:length(stat_matrix)){
stat_matrix[i] <- 0
}
for(y in 1:nrow(stat_matrix)){
for(x in 1:ncol(stat_matrix)){
group_1_values <- c()
group_2_values <- c()
for(z in 1:(length(group_matrices[[m]]) - 2)){
group_1_values <- c(group_1_values, group_matrices[[m]][[z]][y, x])
}
for(z in 1:(length(group_matrices[[n]]) - 2)){
group_2_values <- c(group_2_values, group_matrices[[n]][[z]][y, x])
}
stat_output <- t.test(group_1_values, group_2_values, conf.level = 0.95)
heat_value <- 0
if(!is.na(stat_output$p.value)){
if(stat_output$p.value < p_value){
if(stat_output$statistic[[1]] > 0){ # if first group is higher
heat_value <- 1
}
if(stat_output$statistic[[1]] < 0){ # if second group is higher
heat_value <- -1
}
}
}
stat_matrix[y,x] <- heat_value
}
}
xlabels <- round(c(seq(ML_bounds[1], ML_bounds[2], by = 1)), digits = 1)
ylabels <- round(c(seq(DV_bounds[1], DV_bounds[2], by = 1)), digits = 1)
colorBreaks <- seq(-1, 1, length.out = 6)
heatmap_colorkey <- list(at = colorBreaks, labels = list(at = colorBreaks, labels = round(colorBreaks, 1)))
for(z in 1:length(heatmap_colorkey$labels$labels)){
if((z != 1) & (z != length(heatmap_colorkey$labels$labels))){
heatmap_colorkey$labels$labels[z] <- ""
}
}
heatmap_plot <- lattice::levelplot(t(apply(stat_matrix, 2, rev)),
col.regions = colorRampPalette(c("green", "white", "red"), space = "rgb"),
scales = list(
y = list(
at = seq(0, nrow(stat_matrix) - nrow(stat_matrix)/(DV_bounds[2] - DV_bounds[1]),
nrow(stat_matrix)/(DV_bounds[2] - DV_bounds[1])), labels = ylabels),
x = list(
at = seq(0, ncol(stat_matrix) - ncol(stat_matrix)/(ML_bounds[2] - ML_bounds[1]),
ncol(stat_matrix)/(ML_bounds[2] - ML_bounds[1])), labels = xlabels),
tck = c(1,0)),
main = list(paste0(groups[1], " vs ", groups[2], ", threshold ", p_value)),
xlab = "Medial-Lateral (mm)",
ylab = "Dorsal-Ventral (mm)",
pretty = FALSE,
at = colorBreaks,
colorkey = heatmap_colorkey)
quartz() #Get plot in its own window
print(heatmap_plot) #print the plot in the window
# this code is to properly label the legend
lattice::trellis.focus("legend", side="right", clipp.off=TRUE, highlight=FALSE) #legend parameters
grid.text(expression( ), 0.25, 0, hjust = 0.5, vjust = 1.5) #legend parameters and name
lattice::trellis.unfocus()
# save the plot
savepath <- paste0(output, "/", groups[1], " vs ", groups[2], ", threshold ", p_value, ".png")
curwin <- dev.cur()
savePlot(filename = savepath, type = "png", device = curwin)
dev.off()
save(stat_matrix, file = paste0(output, "/stat_matrix_", groups[1], " vs ", groups[2], ", threshold ", p_value, ".RData"))
return(stat_matrix)
}
|
\name{checkTriple}
\alias{checkTriple}
\title{Check Consistency of Conditional Independence for a Triple of Nodes}
\description{
For each subset of \code{nbrsA} and \code{nbrsC} where \code{a} and
\code{c} are conditionally independent, it is checked if \code{b} is in the
conditioning set.
}
\usage{
checkTriple(a, b, c, nbrsA, nbrsC,
sepsetA, sepsetC,
suffStat, indepTest, alpha, version.unf = c(NA, NA),
maj.rule = FALSE, verbose = FALSE)
}
\arguments{
\item{a, b, c}{(integer) positions in adjacency matrix for nodes
\eqn{a}, \eqn{b}, and \eqn{c}, respectively.}
\item{nbrsA, nbrsC}{neighbors of \eqn{a} and \eqn{c}, respectively.}
\item{sepsetA}{vector containing \eqn{Sepset(a,c)}.}
\item{sepsetC}{vector containing \eqn{Sepset(c,a)}.}
\item{suffStat}{a \code{\link{list}} of sufficient statistics for
independent tests; see, e.g., \code{\link{pc}}.}
\item{indepTest}{a \code{\link{function}} for the independence test,
see, e.g., \code{\link{pc}}.}
\item{alpha}{significance level of test.}
\item{version.unf}{(integer) vector of length two: \describe{
\item{\code{version.unf[1]}:}{1 - check for all separating subsets of
\code{nbrsA} and \code{nbrsC} if \code{b} is in that set,\cr
2 - it also checks if there at all exists any sepset which is a
subset of the neighbours (there might be none, although \code{b}
is in the sepset, which indicates an ambiguous situation);}
\item{\code{version.unf[2]}:}{1 - do not consider the initial sepsets
\code{sepsetA} and \code{sepsetC} (same as Tetrad),\cr
2 - consider if \code{b} is in \code{sepsetA} or
\code{sepsetC}.}
}
}
\item{maj.rule}{logical indicating that the following majority rule
is applied: if \code{b} is in less than
50\% of the checked sepsets, we say that \code{b} is in \bold{no} sepset. If
\code{b} is in more than 50\% of the checked sepsets, we say that %ESS bug
\code{b} is in \bold{all} sepsets. If \code{b} is in exactly 50\% of the
checked sepsets, the triple is considered \sQuote{ambiguous}.}
\item{verbose}{Logical asking for detailed output of intermediate steps.}
}
\details{
This function is used in the conservative versions of structure
learning algorithms.
}
\value{
\item{decision}{Decision on possibly ambiguous triple, an integer code,
\describe{
\item{1}{\code{b} is in NO sepset (make v-structure);}
\item{2}{\code{b} is in ALL sepsets (make no v-structure);}
\item{3}{\code{b} is in SOME but not all sepsets (ambiguous triple)}
}}
\item{vers}{Version (1 or 2) of the ambiguous triple
(1=normal ambiguous triple that is \code{b} is in some sepsets;
2=triple coming from \code{version.unf[1]==2}, that is, \code{a} and
\code{c} are indep given the initial sepset but there doesn't exist a
subset of the neighbours that d-separates them.)}
\item{sepsetA}{Updated version of \code{sepsetA}}
\item{sepsetC}{Updated version of \code{sepsetC}}
}
\references{
D. Colombo and M.H. Maathuis (2013).
Order-independent constraint-based causal structure learning,
(arXiv:1211.3295v2).
}
\author{
Markus Kalisch (\email{kalisch@stat.math.ethz.ch}) and Diego Colombo.
}
\examples{
##################################################
## Using Gaussian Data
##################################################
## Load predefined data
data(gmG)
n <- nrow (gmG8$x)
V <- colnames(gmG8$x)
## define independence test (partial correlations), and test level
indepTest <- gaussCItest
alpha <- 0.01
## define sufficient statistics
suffStat <- list(C = cor(gmG8$x), n = n)
## estimate CPDAG
pc.fit <- pc(suffStat, indepTest, alpha=alpha, labels = V, verbose = TRUE)
if (require(Rgraphviz)) {
## show estimated CPDAG
par(mfrow=c(1,2))
plot(pc.fit, main = "Estimated CPDAG")
plot(gmG8$g, main = "True DAG")
}
a <- 6
b <- 1
c <- 8
checkTriple(a, b, c,
nbrsA = c(1,5,7),
nbrsC = c(1,5),
sepsetA = pc.fit@sepset[[a]][[c]],
sepsetC = pc.fit@sepset[[c]][[a]],
suffStat=suffStat, indepTest=indepTest, alpha=alpha,
version.unf = c(2,2),
verbose = TRUE) -> ct
str(ct)
## List of 4
## $ decision: int 2
## $ version : int 1
## $ SepsetA : int [1:2] 1 5
## $ SepsetC : int 1
\dontshow{ stopifnot(identical( ct,
list(decision = 2L, version = 1L, SepsetA = c(1L, 5L), SepsetC = 1L))) }
checkTriple(a, b, c,
nbrsA = c(1,5,7),
nbrsC = c(1,5),
sepsetA = pc.fit@sepset[[a]][[c]],
sepsetC = pc.fit@sepset[[c]][[a]],
version.unf = c(1,1),
suffStat=suffStat, indepTest=indepTest, alpha=alpha) -> c2
stopifnot(identical(ct, c2)) ## in this case, 'version.unf' had no effect
}
\keyword{manip}
| /man/checkTriple.Rd | no_license | igraph/pcalg | R | false | false | 4,793 | rd | \name{checkTriple}
\alias{checkTriple}
\title{Check Consistency of Conditional Independence for a Triple of Nodes}
\description{
For each subset of \code{nbrsA} and \code{nbrsC} where \code{a} and
\code{c} are conditionally independent, it is checked if \code{b} is in the
conditioning set.
}
\usage{
checkTriple(a, b, c, nbrsA, nbrsC,
sepsetA, sepsetC,
suffStat, indepTest, alpha, version.unf = c(NA, NA),
maj.rule = FALSE, verbose = FALSE)
}
\arguments{
\item{a, b, c}{(integer) positions in adjacency matrix for nodes
\eqn{a}, \eqn{b}, and \eqn{c}, respectively.}
\item{nbrsA, nbrsC}{neighbors of \eqn{a} and \eqn{c}, respectively.}
\item{sepsetA}{vector containing \eqn{Sepset(a,c)}.}
\item{sepsetC}{vector containing \eqn{Sepset(c,a)}.}
\item{suffStat}{a \code{\link{list}} of sufficient statistics for
independent tests; see, e.g., \code{\link{pc}}.}
\item{indepTest}{a \code{\link{function}} for the independence test,
see, e.g., \code{\link{pc}}.}
\item{alpha}{significance level of test.}
\item{version.unf}{(integer) vector of length two: \describe{
\item{\code{version.unf[1]}:}{1 - check for all separating subsets of
\code{nbrsA} and \code{nbrsC} if \code{b} is in that set,\cr
2 - it also checks if there at all exists any sepset which is a
subset of the neighbours (there might be none, although \code{b}
is in the sepset, which indicates an ambiguous situation);}
\item{\code{version.unf[2]}:}{1 - do not consider the initial sepsets
\code{sepsetA} and \code{sepsetC} (same as Tetrad),\cr
2 - consider if \code{b} is in \code{sepsetA} or
\code{sepsetC}.}
}
}
\item{maj.rule}{logical indicating that the following majority rule
is applied: if \code{b} is in less than
50\% of the checked sepsets, we say that \code{b} is in \bold{no} sepset. If
\code{b} is in more than 50\% of the checked sepsets, we say that %ESS bug
\code{b} is in \bold{all} sepsets. If \code{b} is in exactly 50\% of the
checked sepsets, the triple is considered \sQuote{ambiguous}.}
\item{verbose}{Logical asking for detailed output of intermediate steps.}
}
\details{
This function is used in the conservative versions of structure
learning algorithms.
}
\value{
\item{decision}{Decision on possibly ambiguous triple, an integer code,
\describe{
\item{1}{\code{b} is in NO sepset (make v-structure);}
\item{2}{\code{b} is in ALL sepsets (make no v-structure);}
\item{3}{\code{b} is in SOME but not all sepsets (ambiguous triple)}
}}
\item{vers}{Version (1 or 2) of the ambiguous triple
(1=normal ambiguous triple that is \code{b} is in some sepsets;
2=triple coming from \code{version.unf[1]==2}, that is, \code{a} and
\code{c} are indep given the initial sepset but there doesn't exist a
subset of the neighbours that d-separates them.)}
\item{sepsetA}{Updated version of \code{sepsetA}}
\item{sepsetC}{Updated version of \code{sepsetC}}
}
\references{
D. Colombo and M.H. Maathuis (2013).
Order-independent constraint-based causal structure learning,
(arXiv:1211.3295v2).
}
\author{
Markus Kalisch (\email{kalisch@stat.math.ethz.ch}) and Diego Colombo.
}
\examples{
##################################################
## Using Gaussian Data
##################################################
## Load predefined data
data(gmG)
n <- nrow (gmG8$x)
V <- colnames(gmG8$x)
## define independence test (partial correlations), and test level
indepTest <- gaussCItest
alpha <- 0.01
## define sufficient statistics
suffStat <- list(C = cor(gmG8$x), n = n)
## estimate CPDAG
pc.fit <- pc(suffStat, indepTest, alpha=alpha, labels = V, verbose = TRUE)
if (require(Rgraphviz)) {
## show estimated CPDAG
par(mfrow=c(1,2))
plot(pc.fit, main = "Estimated CPDAG")
plot(gmG8$g, main = "True DAG")
}
a <- 6
b <- 1
c <- 8
checkTriple(a, b, c,
nbrsA = c(1,5,7),
nbrsC = c(1,5),
sepsetA = pc.fit@sepset[[a]][[c]],
sepsetC = pc.fit@sepset[[c]][[a]],
suffStat=suffStat, indepTest=indepTest, alpha=alpha,
version.unf = c(2,2),
verbose = TRUE) -> ct
str(ct)
## List of 4
## $ decision: int 2
## $ version : int 1
## $ SepsetA : int [1:2] 1 5
## $ SepsetC : int 1
\dontshow{ stopifnot(identical( ct,
list(decision = 2L, version = 1L, SepsetA = c(1L, 5L), SepsetC = 1L))) }
checkTriple(a, b, c,
nbrsA = c(1,5,7),
nbrsC = c(1,5),
sepsetA = pc.fit@sepset[[a]][[c]],
sepsetC = pc.fit@sepset[[c]][[a]],
version.unf = c(1,1),
suffStat=suffStat, indepTest=indepTest, alpha=alpha) -> c2
stopifnot(identical(ct, c2)) ## in this case, 'version.unf' had no effect
}
\keyword{manip}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/jst_reversed.R
\name{jst_reversed}
\alias{jst_reversed}
\title{Run a reversed Joint Sentiment Topic model}
\usage{
jst_reversed(
dfm,
sentiLexInput = NULL,
numSentiLabs = 3,
numTopics = 10,
numIters = 3,
updateParaStep = -1,
alpha = -1,
beta = -1,
gamma = -1,
excludeNeutral = FALSE
)
}
\arguments{
\item{dfm}{A quanteda dfm object}
\item{sentiLexInput}{Optional: A quanteda dictionary object for semi-supervised learning. If
a dictionary is used, \code{numSentiLabs} will be overridden by the number of categories in the
dictionary object. An extra category will by default be added for neutral words. This can be
turned off by setting \code{excludeNeutral = TRUE}.}
\item{numSentiLabs}{Integer, the number of sentiment labels (defaults to 3)}
\item{numTopics}{Integer, the number of topics (defaults to 10)}
\item{numIters}{Integer, the number of iterations (defaults to 3 for test runs, optimize by hand)}
\item{updateParaStep}{Integer. The number of iterations between optimizations
of hyperparameter alpha}
\item{alpha}{Double, hyperparameter for (defaults to .05*(average docsize/number of topics))}
\item{beta}{Double, hyperparameter for (defaults to .01, with multiplier .9/.1 for sentiment dictionary presence)}
\item{gamma}{Double, hyperparameter for (defaults to .05 * (average docsize/number of sentitopics)}
\item{excludeNeutral}{Boolean. If a dictionary is used, an extra category is added for neutral
words. Words in the dictionary receive a low probability of being allocated there. If this is set
to \code{TRUE}, the neutral sentiment category will be omitted. The variable is irrelevant if no
dictionary is used. Defaults to \code{FALSE}.}
}
\value{
A JST_reversed.result object containing a data.frame for each estimated
parameter
}
\description{
Estimates a reversed joint sentiment topic model using a Gibbs sampler, see Details for model description.
}
\details{
Lin, C., He, Y., Everson, R. and Ruger, S., 2012. Weakly supervised joint sentiment-topic
detection from text. IEEE Transactions on Knowledge and Data engineering, 24(6), pp.1134-1145.
}
\examples{
model <- jst(quanteda::dfm(quanteda::data_corpus_irishbudget2010),
paradigm(),
numTopics = 5,
numIters = 150)
}
| /man/jst_reversed.Rd | no_license | maxboiten/rJST | R | false | true | 2,342 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/jst_reversed.R
\name{jst_reversed}
\alias{jst_reversed}
\title{Run a reversed Joint Sentiment Topic model}
\usage{
jst_reversed(
dfm,
sentiLexInput = NULL,
numSentiLabs = 3,
numTopics = 10,
numIters = 3,
updateParaStep = -1,
alpha = -1,
beta = -1,
gamma = -1,
excludeNeutral = FALSE
)
}
\arguments{
\item{dfm}{A quanteda dfm object}
\item{sentiLexInput}{Optional: A quanteda dictionary object for semi-supervised learning. If
a dictionary is used, \code{numSentiLabs} will be overridden by the number of categories in the
dictionary object. An extra category will by default be added for neutral words. This can be
turned off by setting \code{excludeNeutral = TRUE}.}
\item{numSentiLabs}{Integer, the number of sentiment labels (defaults to 3)}
\item{numTopics}{Integer, the number of topics (defaults to 10)}
\item{numIters}{Integer, the number of iterations (defaults to 3 for test runs, optimize by hand)}
\item{updateParaStep}{Integer. The number of iterations between optimizations
of hyperparameter alpha}
\item{alpha}{Double, hyperparameter for (defaults to .05*(average docsize/number of topics))}
\item{beta}{Double, hyperparameter for (defaults to .01, with multiplier .9/.1 for sentiment dictionary presence)}
\item{gamma}{Double, hyperparameter for (defaults to .05 * (average docsize/number of sentitopics)}
\item{excludeNeutral}{Boolean. If a dictionary is used, an extra category is added for neutral
words. Words in the dictionary receive a low probability of being allocated there. If this is set
to \code{TRUE}, the neutral sentiment category will be omitted. The variable is irrelevant if no
dictionary is used. Defaults to \code{FALSE}.}
}
\value{
A JST_reversed.result object containing a data.frame for each estimated
parameter
}
\description{
Estimates a reversed joint sentiment topic model using a Gibbs sampler, see Details for model description.
}
\details{
Lin, C., He, Y., Everson, R. and Ruger, S., 2012. Weakly supervised joint sentiment-topic
detection from text. IEEE Transactions on Knowledge and Data engineering, 24(6), pp.1134-1145.
}
\examples{
model <- jst(quanteda::dfm(quanteda::data_corpus_irishbudget2010),
paradigm(),
numTopics = 5,
numIters = 150)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{receptive_model}
\alias{receptive_model}
\title{Pre-Trained Receptivness Model}
\format{A fitted glmnet model}
\source{
Yeomans, M., Minson, J., Collins, H., Chen, F. & Gino, F. (working paper).
"Conversational Receptiveness: Improving Engagement with Opposing Views"
Study 1. \url{https://osf.io/2n59b/}
}
\usage{
receptive_model
}
\description{
A pre-trained model for detecting conversational receptiveness.
Estimated with glmnet using annotated data from an previous paper.
Primarily for use within the receptiveness() function.
}
\keyword{datasets}
| /man/receptive_model.Rd | no_license | MarkProjectRepo/politeness | R | false | true | 662 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{receptive_model}
\alias{receptive_model}
\title{Pre-Trained Receptivness Model}
\format{A fitted glmnet model}
\source{
Yeomans, M., Minson, J., Collins, H., Chen, F. & Gino, F. (working paper).
"Conversational Receptiveness: Improving Engagement with Opposing Views"
Study 1. \url{https://osf.io/2n59b/}
}
\usage{
receptive_model
}
\description{
A pre-trained model for detecting conversational receptiveness.
Estimated with glmnet using annotated data from an previous paper.
Primarily for use within the receptiveness() function.
}
\keyword{datasets}
|
## makeCacheMatrix creates a cache with four functions: set the value of
## a matrix with set(y), and retrieve it with get(); set the value of the
## inverse with setinverse(inverse), and retrieve it with getinverse()
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
##Set the matrix, and make the value of its inverse NULL. Since matrix
##is changing, old value inv may have had is no longer valid.
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set = set, get = get, setinverse = setinverse, getinverse = getinverse)
}
## If the inverse of the matrix in x is already in the cache, simply return
## it. If not, add it to the cache and return it.
cacheSolve <- function(x, ...) {
inv <- x$getinverse()
if(!is.null(inv)) { ##see if inverse is already in cache
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setinverse(inv) ##add inverse to cache
inv
}
| /cachematrix.R | no_license | kparkhe/ProgrammingAssignment2 | R | false | false | 1,104 | r | ## makeCacheMatrix creates a cache with four functions: set the value of
## a matrix with set(y), and retrieve it with get(); set the value of the
## inverse with setinverse(inverse), and retrieve it with getinverse()
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
##Set the matrix, and make the value of its inverse NULL. Since matrix
##is changing, old value inv may have had is no longer valid.
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set = set, get = get, setinverse = setinverse, getinverse = getinverse)
}
## If the inverse of the matrix in x is already in the cache, simply return
## it. If not, add it to the cache and return it.
cacheSolve <- function(x, ...) {
inv <- x$getinverse()
if(!is.null(inv)) { ##see if inverse is already in cache
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setinverse(inv) ##add inverse to cache
inv
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cts.R
\name{cts_compinfo}
\alias{cts_compinfo}
\title{Get record details from Chemical Translation Service (CTS)}
\usage{
cts_compinfo(inchikey, verbose = TRUE)
}
\arguments{
\item{inchikey}{character; InChIkey.}
\item{verbose}{logical; should a verbose output be printed on the console?}
}
\value{
a list of lists (for each supplied inchikey):
a list of 7. inchikey, inchicode, molweight, exactmass, formula, synonyms and externalIds
}
\description{
Get record details from CTS, see \url{http://cts.fiehnlab.ucdavis.edu/}
}
\examples{
\donttest{
# might fail if API is not available
out <- cts_compinfo("XEFQLINVKFYRCS-UHFFFAOYSA-N")
# = Triclosan
str(out)
out[[1]][1:5]
### multiple inputs
inchikeys <- c("XEFQLINVKFYRCS-UHFFFAOYSA-N","BSYNRYMUTXBXSQ-UHFFFAOYSA-N" )
out2 <- cts_compinfo(inchikeys)
str(out2)
# a list of two
# extract molecular weight
sapply(out2, function(y) y$molweight)
}
}
\references{
Wohlgemuth, G., P. K. Haldiya, E. Willighagen, T. Kind, and O. Fiehn 2010The Chemical Translation Service
-- a Web-Based Tool to Improve Standardization of Metabolomic Reports. Bioinformatics 26(20): 2647–2648.
}
\author{
Eduard Szöcs, \email{eduardszoecs@gmail.com}
}
| /man/cts_compinfo.Rd | permissive | jmorim/webchem | R | false | true | 1,261 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cts.R
\name{cts_compinfo}
\alias{cts_compinfo}
\title{Get record details from Chemical Translation Service (CTS)}
\usage{
cts_compinfo(inchikey, verbose = TRUE)
}
\arguments{
\item{inchikey}{character; InChIkey.}
\item{verbose}{logical; should a verbose output be printed on the console?}
}
\value{
a list of lists (for each supplied inchikey):
a list of 7. inchikey, inchicode, molweight, exactmass, formula, synonyms and externalIds
}
\description{
Get record details from CTS, see \url{http://cts.fiehnlab.ucdavis.edu/}
}
\examples{
\donttest{
# might fail if API is not available
out <- cts_compinfo("XEFQLINVKFYRCS-UHFFFAOYSA-N")
# = Triclosan
str(out)
out[[1]][1:5]
### multiple inputs
inchikeys <- c("XEFQLINVKFYRCS-UHFFFAOYSA-N","BSYNRYMUTXBXSQ-UHFFFAOYSA-N" )
out2 <- cts_compinfo(inchikeys)
str(out2)
# a list of two
# extract molecular weight
sapply(out2, function(y) y$molweight)
}
}
\references{
Wohlgemuth, G., P. K. Haldiya, E. Willighagen, T. Kind, and O. Fiehn 2010The Chemical Translation Service
-- a Web-Based Tool to Improve Standardization of Metabolomic Reports. Bioinformatics 26(20): 2647–2648.
}
\author{
Eduard Szöcs, \email{eduardszoecs@gmail.com}
}
|
#2017
votes2017 <- read.csv("Data/2017.csv", stringsAsFactors=FALSE)
info <- votes2017[,1:6]
votes2017 <- votes2017[,-(1:6)]
rownames(votes2017) <- as.character(info$Constituency)
votes2017[is.na(votes2017)] <- 0
votes2017[,"FPTP"] <- colnames(votes2017)[unlist(apply(votes2017, 1, which.max))]
#2015
votes2015 <- read.csv("Data/2015.csv", stringsAsFactors=FALSE)
info <- votes2015[,1:6]
votes2015 <- votes2015[,-(1:6)]
rownames(votes2015) <- as.character(info$Constituency)
votes2015[is.na(votes2015)] <- 0
votes2015[,"FPTP"] <- colnames(votes2015)[unlist(apply(votes2015, 1, which.max))]
#2010
votes2010 <- read.csv("Data/2010.csv")
votes2010 <- votes2010[-nrow(votes2010),]
info <- votes2010[,1:6]
votes2010 <- votes2010[,-(1:6)]
rownames(votes2010) <- as.character(info$Constituency)
votes2010[is.na(votes2010)] <- 0
votes2010[,"FPTP"] <- colnames(votes2010)[unlist(apply(votes2010, 1, which.max))]
#2005
votes2005 <- read.csv("Data/2005.csv", stringsAsFactors=FALSE, fileEncoding="UTF-8-BOM")
info <- votes2005[,1:6]
votes2005 <- votes2005[,-(1:6)]
rownames(votes2005) <- as.character(info$Constituency)
votes2005[is.na(votes2005)] <- 0
votes2005[,"FPTP"] <- colnames(votes2005)[unlist(apply(votes2005, 1, which.max))]
#2001
votes2001 <- read.csv("Data/2001.csv", stringsAsFactors=FALSE)
info <- votes2001[,1:6]
rownames(votes2001) <- as.character(info$Constituency)
votes2001 <- votes2001[,-(1:6)]
rownames(votes2001) <- as.character(info$Constituency)
votes2001[is.na(votes2001)] <- 0
votes2001[,"FPTP"] <- colnames(votes2001)[unlist(apply(votes2001, 1, which.max))]
#1997
votes1997 <- read.csv("Data/1997.csv", stringsAsFactors=FALSE)
info <- votes1997[,1:6]
rownames(votes1997) <- as.character(info$Constituency)
votes1997 <- votes1997[,-(1:6)]
votes1997[is.na(votes1997)] <- 0
votes1997[,"FPTP"] <- colnames(votes1997)[unlist(apply(votes1997, 1, which.max))]
#1992
votes1992 <- read.csv("Data/1992.csv", stringsAsFactors=FALSE)
info <- votes1992[,1:6]
rownames(votes1992) <- as.character(info$Constituency)
votes1992 <- votes1992[,-(1:6)]
votes1992[is.na(votes1992)] <- 0
votes1992[,"FPTP"] <- colnames(votes1992)[unlist(apply(votes1992, 1, which.max))]
#1987
votes1987 <- read.csv("Data/1987.csv", stringsAsFactors=FALSE)
info <- votes1987[,1:6]
rownames(votes1987) <- as.character(info$Constituency)
votes1987 <- votes1987[,-(1:6)]
votes1987[is.na(votes1987)] <- 0
votes1987[,"FPTP"] <- colnames(votes1987)[unlist(apply(votes1987, 1, which.max))]
#1983
votes1983 <- read.csv("Data/1983.csv", stringsAsFactors=FALSE)
votes1983$Conservative=as.numeric(votes1983$Conservative)
votes1983$Ind=as.numeric(votes1983$Ind)
info <- votes1983[,1:6]
rownames(votes1983) <- as.character(info$Constituency)
votes1983 <- votes1983[,-(1:6)]
votes1983[is.na(votes1983)] <- 0
votes1983[,"FPTP"] <- colnames(votes1983)[unlist(apply(votes1983, 1, which.max))]
#1979
votes1979 <- read.csv("Data/1979.csv", stringsAsFactors=FALSE, fileEncoding="UTF-8-BOM")
info <- votes1979[,1:6]
rownames(votes1979) <- as.character(info$Constituency)
votes1979 <- votes1979[,-(1:6)]
votes1979[is.na(votes1979)] <- 0
votes1979[,"FPTP"] <- colnames(votes1979)[unlist(apply(votes1979, 1, which.max))] | /Other/Create Visualisations/readVotes.R | no_license | macarda/Concentrated-Vote | R | false | false | 3,172 | r | #2017
votes2017 <- read.csv("Data/2017.csv", stringsAsFactors=FALSE)
info <- votes2017[,1:6]
votes2017 <- votes2017[,-(1:6)]
rownames(votes2017) <- as.character(info$Constituency)
votes2017[is.na(votes2017)] <- 0
votes2017[,"FPTP"] <- colnames(votes2017)[unlist(apply(votes2017, 1, which.max))]
#2015
votes2015 <- read.csv("Data/2015.csv", stringsAsFactors=FALSE)
info <- votes2015[,1:6]
votes2015 <- votes2015[,-(1:6)]
rownames(votes2015) <- as.character(info$Constituency)
votes2015[is.na(votes2015)] <- 0
votes2015[,"FPTP"] <- colnames(votes2015)[unlist(apply(votes2015, 1, which.max))]
#2010
votes2010 <- read.csv("Data/2010.csv")
votes2010 <- votes2010[-nrow(votes2010),]
info <- votes2010[,1:6]
votes2010 <- votes2010[,-(1:6)]
rownames(votes2010) <- as.character(info$Constituency)
votes2010[is.na(votes2010)] <- 0
votes2010[,"FPTP"] <- colnames(votes2010)[unlist(apply(votes2010, 1, which.max))]
#2005
votes2005 <- read.csv("Data/2005.csv", stringsAsFactors=FALSE, fileEncoding="UTF-8-BOM")
info <- votes2005[,1:6]
votes2005 <- votes2005[,-(1:6)]
rownames(votes2005) <- as.character(info$Constituency)
votes2005[is.na(votes2005)] <- 0
votes2005[,"FPTP"] <- colnames(votes2005)[unlist(apply(votes2005, 1, which.max))]
#2001
votes2001 <- read.csv("Data/2001.csv", stringsAsFactors=FALSE)
info <- votes2001[,1:6]
rownames(votes2001) <- as.character(info$Constituency)
votes2001 <- votes2001[,-(1:6)]
rownames(votes2001) <- as.character(info$Constituency)
votes2001[is.na(votes2001)] <- 0
votes2001[,"FPTP"] <- colnames(votes2001)[unlist(apply(votes2001, 1, which.max))]
#1997
votes1997 <- read.csv("Data/1997.csv", stringsAsFactors=FALSE)
info <- votes1997[,1:6]
rownames(votes1997) <- as.character(info$Constituency)
votes1997 <- votes1997[,-(1:6)]
votes1997[is.na(votes1997)] <- 0
votes1997[,"FPTP"] <- colnames(votes1997)[unlist(apply(votes1997, 1, which.max))]
#1992
votes1992 <- read.csv("Data/1992.csv", stringsAsFactors=FALSE)
info <- votes1992[,1:6]
rownames(votes1992) <- as.character(info$Constituency)
votes1992 <- votes1992[,-(1:6)]
votes1992[is.na(votes1992)] <- 0
votes1992[,"FPTP"] <- colnames(votes1992)[unlist(apply(votes1992, 1, which.max))]
#1987
votes1987 <- read.csv("Data/1987.csv", stringsAsFactors=FALSE)
info <- votes1987[,1:6]
rownames(votes1987) <- as.character(info$Constituency)
votes1987 <- votes1987[,-(1:6)]
votes1987[is.na(votes1987)] <- 0
votes1987[,"FPTP"] <- colnames(votes1987)[unlist(apply(votes1987, 1, which.max))]
#1983
votes1983 <- read.csv("Data/1983.csv", stringsAsFactors=FALSE)
votes1983$Conservative=as.numeric(votes1983$Conservative)
votes1983$Ind=as.numeric(votes1983$Ind)
info <- votes1983[,1:6]
rownames(votes1983) <- as.character(info$Constituency)
votes1983 <- votes1983[,-(1:6)]
votes1983[is.na(votes1983)] <- 0
votes1983[,"FPTP"] <- colnames(votes1983)[unlist(apply(votes1983, 1, which.max))]
#1979
votes1979 <- read.csv("Data/1979.csv", stringsAsFactors=FALSE, fileEncoding="UTF-8-BOM")
info <- votes1979[,1:6]
rownames(votes1979) <- as.character(info$Constituency)
votes1979 <- votes1979[,-(1:6)]
votes1979[is.na(votes1979)] <- 0
votes1979[,"FPTP"] <- colnames(votes1979)[unlist(apply(votes1979, 1, which.max))] |
library(ggplot2)
library(scales)
thunderbirdFile <- "thunderbird_issues.csv"
thunderbirdData <- read.csv(thunderbirdFile, header=TRUE, sep=",")
thunderbirdData$countCat <- cut(thunderbirdData$count, breaks=c(1,5,10,15,20,25,30,35,40,45,50,100,500))
thunderbirdData <- thunderbirdData[!is.na(thunderbirdData$countCat), ]
# Plot raw data analysis statistics
ggplot(thunderbirdData, aes(x = factor(thunderbirdData$countCat))) +
geom_bar(aes(y = (..count..))) +
labs(x="Comments Count Category",y="Number of Issues") +
ggtitle("Mozilla Thunderbird Issue Trackers Comments Distribution") +
theme(plot.title = element_text(hjust = 0.5), axis.text.x = element_text(color = "black", size = 11), axis.text.y = element_text(color = "black", size = 11)) +
geom_text(aes( label = (..count..), y= ..count.. ), stat="count", vjust = -.5)
| /thesis-data-analysis/scripts/raw-dataset-analysis/thunderbird_stats.R | no_license | ansin218/master-thesis | R | false | false | 838 | r | library(ggplot2)
library(scales)
thunderbirdFile <- "thunderbird_issues.csv"
thunderbirdData <- read.csv(thunderbirdFile, header=TRUE, sep=",")
thunderbirdData$countCat <- cut(thunderbirdData$count, breaks=c(1,5,10,15,20,25,30,35,40,45,50,100,500))
thunderbirdData <- thunderbirdData[!is.na(thunderbirdData$countCat), ]
# Plot raw data analysis statistics
ggplot(thunderbirdData, aes(x = factor(thunderbirdData$countCat))) +
geom_bar(aes(y = (..count..))) +
labs(x="Comments Count Category",y="Number of Issues") +
ggtitle("Mozilla Thunderbird Issue Trackers Comments Distribution") +
theme(plot.title = element_text(hjust = 0.5), axis.text.x = element_text(color = "black", size = 11), axis.text.y = element_text(color = "black", size = 11)) +
geom_text(aes( label = (..count..), y= ..count.. ), stat="count", vjust = -.5)
|
# Bibliotecas -------------------------------------------------------------
# biblios <- c('tidyverse','dplyr', 'ggplot2', 'lubridate', 'stringr',
# 'inspectdf', 'skimr', 'naniar', 'visdat', 'tidymodels',
# 'klaR', 'corrplot', 'NetCluster', 'factoextra', 'maptree', 'treemap', 'DT','patchwork')
biblios <- c('tidyverse', 'stringr', 'janitor', 'inspectdf', 'dplyr', 'skimr',
'plotly', 'RcppRoll', 'lubridate', 'factoextra', 'forecats', 'tidymodels')
library(tidymodels)
for (i in biblios){
if(!require(i, character.only = TRUE)){install.packages(paste0(i)); library(i, character.only = TRUE)}
}
# Importando os dados em .csv, usando o read.csv --------------------------
#path <- "data\\"
path <- "data/"
file_aisles <- "aisles.csv"
base_aisles <- read.csv(paste(path,file_aisles,sep = ""))
file_dept <- "departments.csv"
base_dept <- read.csv(paste(path,file_dept,sep = ""))
file_ord_prior <- "order_products__prior.csv"
base_ord_prior <- read.csv(paste(path,file_ord_prior,sep = "")) %>% glimpse()
file_ord_train <- "order_products__train.csv"
base_ord_train <- read.csv(paste(path,file_ord_train,sep = "")) %>% glimpse()
file_orders <- "orders.csv"
base_orders <- read.csv(paste(path,file_orders,sep = "")) %>% glimpse()
file_products <- "products.csv"
base_products <- read.csv(paste(path,file_products,sep = "")) %>% glimpse()
# x <- tibble(carac = stringr::str_length(base_aisles$aisle))
#
# x %>% ggplot(aes(carac)) +
# geom_histogram(bins = 20)
# Iniciando Pré-Análises --------------------------------------------------
# sum(!(base_orders$order_id %in% base_ord_prior$order_id))
#
# base_orders_rec <- base_orders %>% filter(!is.na(days_since_prior_order))
#
#
# # Incluindo o produto na base de pedidos anteriores
# base_ord_prior %>% left_join(base_products)
# base_ord_prior_prod <- base_ord_prior %>% left_join(base_products)
# # base_ord_prior_prod <- base_ord_prior_prod[,1:5]
#
# rm(base_ord_)
#
# base_orders_rec_count <- base_orders_rec %>% group_by(user_id) %>% count() %>% transmute(compras = n)
#
# base_orders_rec_count_10 <- base_orders_rec_count %>% filter(compras <= 10)
# base_orders_rec_count_10 %>% nrow()
#
# base_orders_rec_count_10_complete <- base_orders_rec_count_10 %>% left_join(base_orders_rec)
#
# base_orders_rec_count_10_complete_prod <- base_orders_rec_count_10_complete %>% left_join(base_ord_prior_prod)
#
# base_orders_rec_count_10_complete_prod_dept <- base_orders_rec_count_10_complete_prod %>% left_join(base_dept)
#
# base_graf1 <- base_orders_rec_count_10_complete_prod_dept %>% group_by(department) %>% count() %>% transmute(quantidade = n) %>% arrange(desc(quantidade))
#
# base_graf1[1:10,] %>% ggplot(aes(x = reorder(department, -quantidade), y = quantidade)) +
# geom_col(na.rm = TRUE)
#
#
# x <- base_orders_rec_count %>% filter(compras > 10)
#
# mean(x$compras)
#
# base_orders_rec_count_10_complete$compras %>% mean()
#
#
# base_orders %>% filter(is.na(days_since_prior_order)) %>% count()
#
#
# # Analise 2020-08-10 ------------------------------------------------------
#
#
#
# ## Verificações_Iniciais ---------------------------------------------------
#
#
# # Verificando se na base_train existem elementos que já estão nas outras bases.
# base_orders %>% skim()
#
# base_orders %>% group_by(user_id) %>% summarise(n_pedidos = max(order_number))
#
# n_vezes_mais30 <- base_orders %>% dplyr::filter(days_since_prior_order == 30) %>% group_by(user_id) %>% summarise(n_30 = n())
#
# max(n_vezes_mais30$n_30)
#
# base_orders$days_since_prior_order[base_orders$days_since_prior_order == -1] <- -1
#
# base_orders %>% summary()
#
# base_ord_train %>% skim()
#
# nrow(base_orders)
# nrow(base_ord_prior)
# nrow(base_ord_train)
#
# sum(base_ord_prior$order_id %in% base_ord_train$order_id)
# # Náo há interseção entre os order_id dessas bases
#
# sum(base_ord_prior$order_id %in% base_orders$order_id)
# # Há interseção total entre os 'order_id' da tabela 'order_pior' na tabela 'orders'
#
# sum(base_ord_train$order_id %in% base_orders$order_id)
# # há interseção total entre os 'order_id' da tabela 'order_train' na tabela 'orders'
#
# orders_in_ord_train <- sum(base_orders$order_id %in% base_ord_train$order_id)
#
# orders_in_ord_prior <- sum(base_orders$order_id %in% base_ord_prior$order_id)
#
# # Ao se buscar os order_id da tabela 'orders' na tabela 'order_prior', encontram-se 3214874 (93,4%).
# # Ao se buscar os order_id da tabela 'orders' na tabela 'order_train', encontram-se 131209 (3,84%).
# # ou seja, existem 75000 (2,19%) de pedidos da tabela 'orders' que não estão em nenhuma das bases ('order_prior' nem 'order_train'). Esse 'order_id' faltantes,
# # são pertencentes a base 'order_train', que não está disponível.
#
# orders_in_ord_prior/nrow(base_orders)
# orders_in_ord_train/nrow(base_orders)
#
# 1 - ((orders_in_ord_prior+orders_in_ord_train)/nrow(base_orders))
#
# nrow(base_orders)-(orders_in_ord_prior+orders_in_ord_train)
# Inicio Código -----------------------------------------------------------
# CONCLUSÔES DA ANÁLISE PRÉVIA:
# As bases order_train e order_prior são excludentes, ou seja, os order_id não possuem interseção.
# A base 'order_prior' tem todos os seus order_id encontrados na base 'orders', bem como a base 'order_train'.
# Existem 75k 'order_id', que pertencem a base de testes. Contudo, como essa base de teste não está disponível, podemos remover esses 75k registros.
# AÇÕES
# 1 - Remover 75k registros da base 'orders'.
# 2 - As bases order_train e order_prior, poderão ser mescladas, uma vez que não iremos usar a base para predição.
# Removendo os registros da tabela `orders` que estão categorizados como 'test', uma vez que essas 'order_id' não possuem dados correspondentes nas bases de product_order
base_orders_cl <- base_orders %>% filter(eval_set != 'test')
# Mesclando as bases 'order_prior' e 'order_train'
base_ord_geral <- dplyr::union(base_ord_prior,base_ord_train)
# Fazendo um left join da base de 'base_prod' com a base de base_aisles e base_dept, para trazer os nomes dos corredores e departamentos
base_products_names <- base_products %>% left_join(base_aisles) %>% left_join(base_dept)
base_products_names <- base_products_names[,c(1:2,5:6)]
# Fazendo um left join da base de order_geral com a base_products_names, para trazer dados dos produtos comprados (nome_produto, corredor e departamento)
base_ord_geral_prod <- base_ord_geral %>% left_join(base_products_names)
# Filtro Média Móvel Vivi -------------------------------------------------
base_orders_cl_mm <- base_orders_cl %>%
filter(order_number != 1) %>%
arrange(user_id, order_number) %>%
mutate(order_hour_of_day = as.numeric(order_hour_of_day)) %>%
group_by(user_id) %>%
mutate(days_ma = roll_mean(days_since_prior_order, 5, fill = NA, na.rm = T)) %>%
ungroup() %>%
glimpse
# Código de 2020-08-15
# filtrando somente os clientes que estão abaixo da mediana
base_orders_cl_mm <- base_orders_cl_mm %>% arrange(user_id,-order_number)
users_last_day_ma <- base_orders_cl_mm %>% dplyr::group_by(user_id) %>% summarise(ult_ordem = first(order_number), days_ma = nth(days_ma,3), media_days = mean(days_since_prior_order)) %>% filter(days_ma == 30 | (is.na(days_ma) & media_days >= 15)) %>% glimpse()
base_orders_cl_rec <- base_orders_cl_mm %>% filter(days_ma <8)
base_orders_cl_not_rec <- base_orders_cl_mm %>% filter(days_ma >=8)
base_ord_geral_prod_rec <- base_ord_geral_prod %>% dplyr::filter(order_id %in% base_orders_cl_rec$order_id)
base_ord_geral_prod_not_rec <- base_ord_geral_prod %>% dplyr::filter(order_id %in% base_orders_cl_not_rec$order_id)
# Trazendo a coluna user_id
base_ord_geral_prod_rec2 <- base_ord_geral_prod_rec %>% left_join(base_orders_cl_rec)
base_ord_geral_prod_rec2 <- base_ord_geral_prod_rec2[,c(1:8,10,14)]
base_ord_geral_prod_not_rec2 <- base_ord_geral_prod_not_rec %>% left_join(base_orders_cl_not_rec)
base_ord_geral_prod_not_rec2 <- base_ord_geral_prod_not_rec2[,c(1:8,10,14)]
# Gráfico da média móvel
# base_orders_cl_not_rec2 %>%
# na.omit() %>%
# ggplot(aes(x = days_ma)) +
# geom_bar(fill = 'darkgreen') +
# geom_vline(xintercept = 8, color = 'orange',
# linetype = 'dashed') +
# theme_minimal()
# HIPOTESE
# Compras que tem recorrência, provavelmente é feita, repetindo uma cesta anterior.
# Compras com menor recorrência tem maior variaçao na cesta de compras
# Rodando o modelo para os cem principais recorrentes e os 100 piores recorrentes
# Histograma de Produtos comprados por Ordem ------------------------------
order_n_total <- base_ord_geral_prod_not_rec %>% group_by(order_id) %>% summarise(quant_prod = n(), unid_recompra = sum(reordered))
bin <- order_n_total$quant_prod %>% max()
order_n_total %>% ggplot(aes(x = quant_prod)) +
geom_histogram(bins = bin/10) +
scale_y_sqrt()
x4 <- function(x) x^4
x_4<- function(x) sqrt(sqrt(x))
x2 <- function(x) x^2
x_2<- function(x) sqrt(x)
# order_n_total %>% ggplot() +
# geom_histogram(aes(x = quant_prod), bins = bin/10,) +
# scale_y_continuous(trans = scales::trans_new(name = "sqrt_sqrt",transform = x_4, inverse = x4)) +
# labs(title = "Histograma de No Produtos comprados")
#
# order_n_total %>% ggplot(aes(x = quant_prod)) +
# geom_freqpoly(bins = bin/10) +
# scale_y_continuous(trans = scales::trans_new(name = "sqrt_sqrt",transform = x_4, inverse = x4)) +
# labs(title = "Histograma de No Produtos comprados")
# Produtos Mais Recorrentes ----------------------------------------------------
# em qual posição do carrinho, se localizam o produtos MAIS recorrentes
rec_ord_cart <- base_ord_geral_prod_rec2 %>% group_by(add_to_cart_order) %>%
summarise(recorrencias = sum(reordered),
total = n()) %>%
mutate(rec_perc = recorrencias/total) %>%
arrange(add_to_cart_order)
rec_ord_cart %>% ggplot(aes(add_to_cart_order, rec_perc)) +
geom_col() +
labs(title = "Gráfico de ordem_carrinho x percentual de produtos recorrentes")
# Produtos Menos Recorrentes ----------------------------------------------------
# em qual posição do carrinho, se localizam o produtos não-recorrentes
nao_rec_ord_cart <- base_ord_geral_prod_not_rec2 %>% group_by(add_to_cart_order) %>%
summarise(total = n(),
nao_recorrencia = total - sum(reordered)) %>%
mutate(nao_rec_perc = nao_recorrencia/total) %>%
arrange(add_to_cart_order)
nao_rec_ord_cart %>% ggplot(aes(add_to_cart_order, nao_rec_perc)) +
geom_col() +
labs(title = "Gráfico de ordem_carrinho x percentual de produtos nao_recorrentes")
# Heatmaps Integradora Intermediária --------------------------------------
# fazer uma análise pelos produtos que entram primeiro na cesta (por produto), nas compras feitas por clientes pouco recorrentes.
# HIPÓTESE:
# Existe uma relação entre a posição do produto no carrinho e a recorrência de compra
# PREMISSA DA ANÁLISE:
# Separar os clientes em 2 catergorias: Clientes muito recorrentes e clientes pouco recorrentes.
# Essa definição inicial é feita com base na variável 'days_since_pior_order'.
# Primeiro se calcula o valor médio dessa variável, por user_id.
# Uma vez definidas os valores médios por cliente, calculam-se os quartis.
# São definidos como clientes pouco recorrentes, o que estão abaixo da madiana e Clientes recorrentes, aqueles que estão acima da mediana.
# Definido isso, será feita uma análise para cada um dos grupos de modo a buscar as discrepâncias.
# GRÁFICOS:
# Foram criados 2 Heatmaps, um para cada grupo de clientes, onde são apresentados os percentuais de recorrencias de produtos (100 produtos de maior
# recorrência de compra), nas diferentes posições do carrinho de compras.
# INSIGHT:
# Comparando ambos os gráficos, não se percebem relevantes variações nem nos produtos apresentados, nem tampouco na proporção do produto nas diversas posições.
prod_ord_cart <- base_ord_geral_prod_not_rec2 %>% dplyr::group_by(product_name, add_to_cart_order) %>%
summarise(quantidade = n(),
recorrencias = sum(reordered)) %>%
mutate(rec_perc = recorrencias/quantidade) %>%
arrange(-quantidade)
# Definindo a média do número de produtos recorrentes
a <- base_ord_geral_prod_not_rec2$order_id %>% n_distinct() #número de pedidos
b <- base_ord_geral_prod_not_rec2$reordered %>% sum() #numero de produtos
n_prod1 = b/a
(texto1 <- paste("Média Produtos/Ordem = ", round(n_prod1,2), sep = ""))
prod_ord_cart2 <- prod_ord_cart %>% dplyr::group_by(product_name) %>% mutate(perc = recorrencias/sum(recorrencias))
prod_ord_cart2_list <- prod_ord_cart2 %>% group_by(product_name) %>% summarise(recorrencias_total = sum(recorrencias)) %>% arrange(-recorrencias_total)
prod_ord_cart2_list <- prod_ord_cart2_list[1:100,1]
prod_100_n_rec <- prod_ord_cart2 %>% right_join(prod_ord_cart2_list)
prod_100_n_rec %>% ggplot() +
geom_tile(aes(product_name,add_to_cart_order, fill = perc*100)) +
scale_fill_gradient2(low = "white", high = "darkgreen", limits = c(0,40)) + #,trans = scales::trans_new(name = "quad",transform = x2, inverse = x_2))+
theme(axis.text.x = element_text(angle = 90, size = 8, hjust = 1)) +
labs(title = "Heatmap de Produtos x Cart_Order para clientes Não-Recorrentes") +
theme(axis.text.x = element_text(hjust = 1.0, vjust = 0.3)) +
geom_hline(yintercept = n_prod1, color = "orange") +
scale_y_continuous(limits = c(0,20),expand = c(0,0)) +
geom_text(aes(x = 5, y = n_prod1+0.1, label = texto1 ), size = 3, color = 'orange', hjust = 0, vjust = 0)
# hm1 <- prod_100_n_rec %>% ggplot() +
# geom_tile(aes(product_name,add_to_cart_order, fill = perc*100)) +
# scale_fill_gradient2() +
# ylim(0,150) +
# theme(axis.text.x = element_text(angle = 90, size = 8, hjust = 1)) +
# labs(title = "Heatmap de Produtos x Cart_Order para clientes Não-Recorrentes")
# ggplotly(hm1, tooltip = "perc")
# fazer uma análise pelos produtos que entram primeiro na cesta (por produto), nas compras feitas por clientes MAIS recorrentes.
prod_ord_cart_rec <- base_ord_geral_prod_rec2 %>% dplyr::group_by(product_name, add_to_cart_order) %>%
summarise(quantidade = n(),
recorrencias = sum(reordered)) %>%
mutate(rec_perc = recorrencias/quantidade) %>%
arrange(-quantidade)
# Definindo a média do número de produtos recorrentes
a <- base_ord_geral_prod_rec2$order_id %>% n_distinct() #número de pedidos
b <- base_ord_geral_prod_rec2$reordered %>% sum() #numero de produtos
n_prod2 = b/a
(texto2 <- paste("Média Produtos/Ordem = ", round(n_prod2,2), sep = ""))
prod_ord_cart_rec2 <- prod_ord_cart_rec %>% dplyr::group_by(product_name) %>% mutate(perc = recorrencias/sum(recorrencias))
prod_ord_cart_rec2_list <- prod_ord_cart_rec2 %>% group_by(product_name) %>% summarise(recorrencias_total = sum(recorrencias)) %>% arrange(-recorrencias_total)
prod_ord_cart_rec2_list <- prod_ord_cart_rec2_list[1:100,1]
prod_100_rec <- prod_ord_cart_rec2 %>% right_join(prod_ord_cart_rec2_list)
prod_100_rec %>% ggplot() +
geom_tile(aes(product_name,add_to_cart_order, fill = perc*100)) +
# scale_fill_gradient2(aes(fill = "darkgreen"))+
scale_fill_gradient2(low = "white", high = "darkgreen", limits = c(0,40))+#, trans = scales::trans_new(name = "quad",transform = x2, inverse = x_2))+
theme(axis.text.x = element_text(angle = 90, size = 8, hjust = 1)) +
labs(title = "Heatmap de Produtos x Cart_Order para clientes Recorrentes") +
theme(axis.text.x = element_text(hjust = 1.0, vjust = 0.3)) +
geom_hline(yintercept = n_prod2, color = "orange") +
geom_text(aes(x = 5, y = n_prod2+0.1, label = texto2 ), size = 3, color = 'orange', hjust = 0, vjust = 0) +
scale_y_continuous(limits = c(0,20),expand = c(0,0))
# hm1 <- prod_100_rec %>% ggplot() +
# geom_tile(aes(product_name,add_to_cart_order, fill = perc*100)) +
# scale_fill_gradient2()+
# ylim(0,150)+
# theme(axis.text.x = element_text(angle = 90, size = 8, hjust = 1)) +
# labs(title = "Heatmap de Produtos x Cart_Order para clientes Recorrentes")
# ggplotly(hm1, tooltip = "perc")
# Análise de HClust -------------------------------------------------------
# Nova Análise de HClust, onde agora o percentual é feito de maneira diferente. Será feita a contabilização dos percentuais por
# order de inclusão no carrinho. Ou seja, cada ordem_cart terá um total de produtos que somará 100% e cada produtos terá seu percentual
# na posição do carrinho.
# Buscando os 100 principais produtos da base geral
base_ord_geral_prod_total <- base_ord_geral_prod %>% left_join(base_orders_cl_mm)
# base_ord_geral_prod_total2 <- base_ord_geral_prod_total[,c(1:8,10,14)]
base_ord_geral_prod_total2 <- base_ord_geral_prod_total
prod_ord_cart_geral <- base_ord_geral_prod_total2 %>% dplyr::group_by(product_name, add_to_cart_order) %>%
summarise(quantidade = n(),
recorrencias = sum(reordered)) %>%
mutate(rec_perc = recorrencias/quantidade) %>%
arrange(-quantidade)
prod_ord_cart_geral2 <- prod_ord_cart_geral %>% dplyr::group_by(product_name) %>% mutate(perc = recorrencias/sum(recorrencias))
prod_ord_cart_geral2_list <- prod_ord_cart_geral2 %>% group_by(product_name) %>% summarise(recorrencias_total = sum(recorrencias)) %>% arrange(-recorrencias_total)
prod_ord_cart_geral2_list <- prod_ord_cart_geral2_list[1:50,1]
prod_100_geral <- prod_ord_cart_geral2 %>% right_join(prod_ord_cart_geral2_list)
ord_cart_prod2 <- prod_ord_cart %>% right_join(prod_ord_cart_geral2_list) %>% dplyr::group_by(add_to_cart_order) %>% mutate(perc = recorrencias/sum(recorrencias))
mat_similarity_ord <- ord_cart_prod2 %>% dplyr::select(product_name,add_to_cart_order, perc) %>% pivot_wider(names_from = add_to_cart_order, values_from = perc)
# mat_similarity_ord <- mat_similarity_ord[1:100,]/
# Removendo os NAs
mat_similarity_ord <- mat_similarity_ord %>% replace(is.na(.),0)
# Normalizando os dados
receita <- mat_similarity_ord %>% recipe(product_name ~ .) %>%
step_normalize(all_numeric(), -all_outcomes())
prep_receita <- prep(receita)
mat_similarity_ord_norm <- juice(prep_receita)[[ncol(juice(prep_receita))]] %>% cbind(juice(prep_receita)[,-ncol(juice(prep_receita))])
# Coletando as colunas com NA ou NaN
x <- inspect_na(mat_similarity_ord_norm)
col_remove <- x$col_name[x$pcnt == 100]
# Removendo as colunas com NA ou NaN
mat_similarity_ord_norm <- mat_similarity_ord_norm %>% select(-c(col_remove))
class(mat_similarity_ord_norm$.)
# dist_mat <- get_dist(mat_similarity_ord_norm, upper = TRUE, diag = TRUE)
#
# n <- 5
# vet_clust <- c(2:((nrow(mat_similarity_ord_norm)-1)/n))
# vet_clust <- vet_clust * n
# vet_clust2 <- c(c(2:9),vet_clust)
# silho <- tibble(k = numeric(), silho_avg = numeric(), negatives = numeric(), singulares = numeric())
#
# for (i in vet_clust2){
# cutted <- hcut(mat_similarity_ord_norm, hc_func = "hclust", hc_method = "ward.D2", k=i, graph = TRUE)
# negativos <- sum(cutted$silinfo$widths$sil_width < 0) / length(cutted$silinfo$widths$sil_width)
# sing <- nrow(as_tibble(cutted$cluster) %>% group_by(value) %>% count() %>% filter(n == 1))
# silho <- silho %>% bind_rows(c(k = i, silho_avg = cutted$silinfo$avg.width, negatives = negativos, singulares = sing))
# print(i)
# }
#
# best_k <- silho$k[silho$silho_avg == max(silho$silho_avg)]
# best_k_neg <- silho$k[silho$negatives == min(silho$negatives)]
#
# p1 <- silho %>% ggplot(aes(x = k)) +
# geom_line(aes(y = silho_avg), color = "blue") +
# # geom_rect(aes(xmin = 35, xmax = 53, ymin = 0.33, ymax = 0.35), alpha = 1/500, color = "red", fill = "green") +
# # geom_vline(xintercept = c(35, 53), show.legend = TRUE) +
# geom_line(aes(y = singulares/40), color = "red") +
# scale_y_continuous(
# name = "Avg_Silh",
# sec.axis = sec_axis(trans =~.*40, name = "n_Sing_Clust")
# ) +
# geom_vline(xintercept = 6)
# p1
k_select <- 4
cutted_ord_not_rec <- hcut(mat_similarity_ord_norm, hc_func = "hclust", hc_method = "ward.D2", k=k_select, graph = TRUE)
cutted_ord_not_rec$labels <- as.character(mat_similarity_ord_norm$.)
fviz_dend(cutted_ord_not_rec, k = k_select,
cex = 0.6,
type = "rectangle",
k_colors = c("darkgreen","orange"),
labels_track_height = 0.8,
# k_colors = c(1:4,6),
ggtheme = theme_light(),
main = "Dendrograma de Produtos - Clientes Não Recorrentes",
ylim = c(-30,60),
)
# Hcluster com clientes recorrentes
ord_cart_prod_rec2 <- prod_ord_cart_rec %>% right_join(prod_ord_cart_geral2_list) %>% dplyr::group_by(add_to_cart_order) %>% mutate(perc = recorrencias/sum(recorrencias))
mat_similarity_ord_rec <- ord_cart_prod_rec2 %>% dplyr::select(product_name,add_to_cart_order, perc) %>% pivot_wider(names_from = add_to_cart_order, values_from = perc)
# mat_similarity_ord_rec <- mat_similarity_ord_rec[1:100,]
# Removendo os NAs
mat_similarity_ord_rec <- mat_similarity_ord_rec %>% replace(is.na(.),0)
# Normalizando os dados
receita <- mat_similarity_ord_rec %>% recipe(product_name ~ .) %>%
step_normalize(all_numeric(), -all_outcomes())
prep_receita <- prep(receita)
mat_similarity_ord_rec_norm <- juice(prep_receita)[[ncol(juice(prep_receita))]] %>% cbind(juice(prep_receita)[,-ncol(juice(prep_receita))])
# Coletando as colunas com NA ou NaN
x <- inspect_na(mat_similarity_ord_rec_norm)
col_remove <- x$col_name[x$pcnt == 100]
# Removendo as colunas com NA ou NaN
mat_similarity_ord_rec_norm <- mat_similarity_ord_rec_norm %>% select(-c(col_remove))
# dist_mat <- get_dist(mat_similarity_ord_rec_norm, upper = TRUE, diag = TRUE)
# n <- 5
# vet_clust <- c(2:((nrow(mat_similarity_ord_rec_norm)-1)/n))
# vet_clust <- vet_clust * n
# vet_clust2 <- c(c(2:9),vet_clust)
# silho <- tibble(k = numeric(), silho_avg = numeric(), negatives = numeric(), singulares = numeric())
#
# for (i in vet_clust2){
# cutted <- hcut(mat_similarity_ord_rec_norm, hc_func = "hclust", hc_method = "ward.D2", k=i, graph = TRUE)
# negativos <- sum(cutted$silinfo$widths$sil_width < 0) / length(cutted$silinfo$widths$sil_width)
# sing <- nrow(as_tibble(cutted$cluster) %>% group_by(value) %>% count() %>% filter(n == 1))
# silho <- silho %>% bind_rows(c(k = i, silho_avg = cutted$silinfo$avg.width, negatives = negativos, singulares = sing))
# print(i)
# }
#
# best_k <- silho$k[silho$silho_avg == max(silho$silho_avg)]
# best_k_neg <- silho$k[silho$negatives == min(silho$negatives)]
#
# p1 <- silho %>% ggplot(aes(x = k)) +
# geom_line(aes(y = silho_avg), color = "blue") +
# # geom_rect(aes(xmin = 35, xmax = 53, ymin = 0.33, ymax = 0.35), alpha = 1/500, color = "red", fill = "green") +
# # geom_vline(xintercept = c(35, 53), show.legend = TRUE) +
# geom_line(aes(y = singulares/40), color = "red") +
# scale_y_continuous(
# name = "Avg_Silh",
# sec.axis = sec_axis(trans =~.*40, name = "n_Sing_Clust")
# ) +
# geom_vline(xintercept = 4)
# p1
k_select <- 4
cutted_ord_rec <- hcut(mat_similarity_ord_rec_norm, hc_func = "hclust", hc_method = "ward.D2", k=k_select, graph = TRUE)
cutted_ord_rec$labels <- as.character(mat_similarity_ord_rec_norm$.)
fviz_dend(cutted_ord_rec, k = k_select,
cex = 0.6,
type = "rectangle",
k_colors = c("darkgreen","orange"),
labels_track_height = 0.8,
# k_colors = c(1:4,6),
ggtheme = theme_light(),
main = "Dendrograma de Produtos - Clientes Recorrentes",
ylim = c(-30,60),
)
cutted_ord_rec$cluster
library(dendextend)
dend_not_rec <- as.dendrogram(cutted_ord_not_rec)
dend_rec <- as.dendrogram(cutted_ord_rec)
tang <- dendlist(dend_not_rec, dend_rec)
tang %>%
untangle(method = "step1side") %>% # Find the best alignment layout
tanglegram(labels_cex = 0.6,
margin_inner = 15,
k_labels = 4,
k_branches = 4,
axes = FALSE,
lwd = 2,
main_left = "Produtos - Clientes Pouco Recorrentes",
cex_main_left = 1,
main_right = "Produtos - Clientes Recorrentes",
cex_main_right = 1,
dLeaf = 0.1
)
dendlist(dend_not_rec, dend_rec) %>%
untangle(method = "step1side") %>% # Find the best alignment layout
tanglegram(labels_cex = 0.6,
margin_inner = 15,
k_labels = 4,
k_branches = 4,
axes = FALSE,
lwd = 2,
main_left = "Produtos - Clientes Pouco Recorrentes",
cex_main_left = 1,
main_right = "Produtos - Clientes Recorrentes",
cex_main_right = 1,
dLeaf = 0.1
)
# Montando um hclust de produto por order de carrinho para produtos de clientes pouco recorrentes
mat_similarity <- prod_ord_cart2 %>% dplyr::select(product_name,add_to_cart_order, perc) %>% pivot_wider(names_from = add_to_cart_order, values_from = perc)
mat_similarity2 <- mat_similarity[1:100,]
# Montando um hclust de produto por order de carrinho para produtos de clientes MAIS recorrentes
mat_similarity_rec <- prod_ord_cart_rec2 %>% dplyr::select(product_name,add_to_cart_order, perc) %>% pivot_wider(names_from = add_to_cart_order, values_from = perc)
mat_similarity_rec2 <- mat_similarity_rec[1:100,]
# vet_clust <- c(1:((nrow(mat_similarity2)-2)/20))
n <- 5
vet_clust <- c(1:((nrow(mat_similarity2)-1)/n))
vet_clust <- vet_clust * n
vet_clust2 <- c(c(2:10),vet_clust)
silho <- tibble(k = numeric(), silho_avg = numeric(), negatives = numeric(), singulares = numeric())
for (i in vet_clust2){
cutted <- hcut(mat_similarity2, hc_func = "hclust", hc_method = "complete", k=i, graph = TRUE)
negativos <- sum(cutted$silinfo$widths$sil_width < 0) / length(cutted$silinfo$widths$sil_width)
sing <- nrow(as_tibble(cutted$cluster) %>% group_by(value) %>% count() %>% filter(n == 1))
silho <- silho %>% bind_rows(c(k = i, silho_avg = cutted$silinfo$avg.width, negatives = negativos, singulares = sing))
print(i)
}
best_k <- silho$k[silho$silho_avg == max(silho$silho_avg)]
best_k_neg <- silho$k[silho$negatives == min(silho$negatives)]
p1 <- silho %>% ggplot(aes(x = k)) +
geom_line(aes(y = silho_avg), color = "blue") +
# geom_rect(aes(xmin = 35, xmax = 53, ymin = 0.33, ymax = 0.35), alpha = 1/500, color = "red", fill = "green") +
# geom_vline(xintercept = c(35, 53), show.legend = TRUE) +
geom_line(aes(y = singulares/40), color = "red") +
scale_y_continuous(
name = "Avg_Silh",
sec.axis = sec_axis(trans =~.*40, name = "n_Sing_Clust")
)
p1
cutted_not_rec <- hcut(mat_similarity2, hc_func = "hclust", hc_method = "complete", k=20, graph = TRUE)
# Removendo os NAs
mat_similarity2 <- mat_similarity2 %>% replace(is.na(.),0)
# Normalizando os dados
receita <- mat_similarity2 %>% recipe(product_name ~ .) %>%
step_normalize(all_numeric(), -all_outcomes())
prep_receita <- prep(receita)
mat_similarity2_norm <- juice(prep_receita)[[ncol(juice(prep_receita))]] %>% cbind(juice(prep_receita)[,-ncol(juice(prep_receita))])
# Coletando as colunas com NA ou NaN
x <- inspect_na(mat_similarity2_norm)
col_remove <- x$col_name[x$cnt == 100]
# Removendo as colunas com NA ou NaN
mat_similarity2_norm_sem_na <- mat_similarity2_norm %>% select(-c(col_remove))
dist_mat <- get_dist(mat_similarity2_norm_sem_na, upper = TRUE, diag = TRUE)
print(dist_mat)
cutted_not_rec$labels <- mat_similarity2$product_name
cutted_rec <- hcut(mat_similarity_rec2, hc_func = "hclust", hc_method = "complete", k=20, graph = TRUE)
cutted_rec$labels <- mat_similarity_rec2$product_name
cutted_not_rec$size
fviz_dend(cutted_not_rec, k = 20,
cex = 0.7,
type = "rectangle",
k_colors = c("darkgreen","orange"),
labels_track_height = 0.8,
# k_colors = c(1:4,6),
ggtheme = theme_light(),
main = "Dendrograma de Produtos - Clientes Não Recorrentes")
fviz_dend(cutted_rec, k = 20,
cex = 0.7,
type = "rectangle",
k_colors = c("darkgreen","orange"),
labels_track_height = 0.8,
# k_colors = c(1:4,6),
ggtheme = theme_light(),
main = "Dendrograma de Produtos - Clientes Recorrentes")
cutted_not_rec$size
cutted_rec$size
# Heatmap recorrencia: Aisles x Ordem_Carrinho -----------------------------------------
# fazer uma análise pelos produtos que entram primeiro na cesta (por aisle)
ais_ord_cart <- base_ord_geral_prod_not_rec %>% group_by(aisle, add_to_cart_order) %>%
summarise(quantidade = n(),
recorrencias = sum(reordered)) %>%
mutate(rec_perc = recorrencias/quantidade) %>%
arrange(-quantidade)
hm1 <- ais_ord_cart %>% ggplot() +
geom_tile(aes(aisle,add_to_cart_order, fill = rec_perc*100)) +
scale_fill_gradient2()+
theme(axis.text.x = element_text(angle = 90, size = 8, hjust = 1))
ggplotly(hm1, tooltip = "rec_perc")
# Buscando os principais 100 produtos
lista_produtos <- prod_ord_cart %>% dplyr::group_by(product_name) %>% summarise(recorrencia_media = mean(recorrencias)) %>% dplyr::arrange(-recorrencia_media)
lista_produtos[1:100,1]
hm_prod <- prod_ord_cart %>% dplyr::filter(product_name %in% lista_produtos[1:100,1]) %>% ggplot() +
geom_tile(aes(product_name,add_to_cart_order, fill = rec_perc*100)) +
scale_fill_gradient2()+
theme(axis.text.x = element_text(angle = 90, size = 8, hjust = 1))
ggplotly(hm_prod, tooltip = "rec_perc")
# Produtos que entram primeiro na cesta -----------------------------------
# fazer uma análise pelos produtos que entram primeiro na cesta (por nome de produto)
prod_ord_cart <- base_ord_geral_prod %>% group_by(product_name, add_to_cart_order) %>%
summarise(quantidade = n(),
recorrencias = sum(reordered)) %>%
mutate(rec_perc = recorrencias/quantidade) %>%
arrange(-quantidade)
# fazer uma análise pelos produtos que entram primeiro na cesta (por aisle)
ais_ord_cart <- base_ord_geral_prod %>% group_by(aisle, add_to_cart_order) %>%
summarise(quantidade = n(),
recorrencias = sum(reordered)) %>%
mutate(rec_perc = recorrencias/quantidade) %>%
arrange(-quantidade)
# fazer uma análise pelos produtos que entram primeiro na cesta (por departamento)
dept_ord_cart <- base_ord_geral_prod %>% group_by(department, add_to_cart_order) %>%
summarise(quantidade = n(),
recorrencias = sum(reordered)) %>%
mutate(rec_perc = recorrencias/quantidade) %>%
arrange(-quantidade)
# Número máximo de produtos em uma mesma compra
max(prod_ord_cart$add_to_cart_order)
max(base_ord_geral_prod$reordered)
# fazer uma análise pelos produtos que entram por último
# separar por compras de maior numero de produtos
# Seperar por quantidades de produtos comprados em cada compra
# Fazer a relação de quantidade comprada e ordem de inserção na cesta
# Comentários 2020-08-11
# Na verdade, há um alto número de compras com 30 dias desde a última compra, pois o site deixa de registrar após mais de 30 dias
# Na tabela de 'order', verificar que o 'days_since_last_order' = NA, ocorre sempre para a primeira compra. Problema ao buscar o valor máximo, pois NA é maior que qualquer numero.
# Importante considerar que a base não possui somente os últimos 30 dias, porém sim todos as compras, somente não contabilizando intervalos superiores a 30 dias.
# Verificar a soma da diferença entre a o 'days_since_last_order' entre uma compra e a anterior. Se essa soma for
# Critério para usuário menos recorrente: média móvel (5 compras) de days_since_prior_order > 8 (mediana), não contabilizando a primeira compra pela ocorrencia de NA. (pegar código Vivi)
| /1a parte/análises/SergioG_Script.R | no_license | sanchezvivi/instacart | R | false | false | 32,075 | r |
# Bibliotecas -------------------------------------------------------------
# biblios <- c('tidyverse','dplyr', 'ggplot2', 'lubridate', 'stringr',
# 'inspectdf', 'skimr', 'naniar', 'visdat', 'tidymodels',
# 'klaR', 'corrplot', 'NetCluster', 'factoextra', 'maptree', 'treemap', 'DT','patchwork')
biblios <- c('tidyverse', 'stringr', 'janitor', 'inspectdf', 'dplyr', 'skimr',
'plotly', 'RcppRoll', 'lubridate', 'factoextra', 'forecats', 'tidymodels')
library(tidymodels)
for (i in biblios){
if(!require(i, character.only = TRUE)){install.packages(paste0(i)); library(i, character.only = TRUE)}
}
# Importando os dados em .csv, usando o read.csv --------------------------
#path <- "data\\"
path <- "data/"
file_aisles <- "aisles.csv"
base_aisles <- read.csv(paste(path,file_aisles,sep = ""))
file_dept <- "departments.csv"
base_dept <- read.csv(paste(path,file_dept,sep = ""))
file_ord_prior <- "order_products__prior.csv"
base_ord_prior <- read.csv(paste(path,file_ord_prior,sep = "")) %>% glimpse()
file_ord_train <- "order_products__train.csv"
base_ord_train <- read.csv(paste(path,file_ord_train,sep = "")) %>% glimpse()
file_orders <- "orders.csv"
base_orders <- read.csv(paste(path,file_orders,sep = "")) %>% glimpse()
file_products <- "products.csv"
base_products <- read.csv(paste(path,file_products,sep = "")) %>% glimpse()
# x <- tibble(carac = stringr::str_length(base_aisles$aisle))
#
# x %>% ggplot(aes(carac)) +
# geom_histogram(bins = 20)
# Iniciando Pré-Análises --------------------------------------------------
# sum(!(base_orders$order_id %in% base_ord_prior$order_id))
#
# base_orders_rec <- base_orders %>% filter(!is.na(days_since_prior_order))
#
#
# # Incluindo o produto na base de pedidos anteriores
# base_ord_prior %>% left_join(base_products)
# base_ord_prior_prod <- base_ord_prior %>% left_join(base_products)
# # base_ord_prior_prod <- base_ord_prior_prod[,1:5]
#
# rm(base_ord_)
#
# base_orders_rec_count <- base_orders_rec %>% group_by(user_id) %>% count() %>% transmute(compras = n)
#
# base_orders_rec_count_10 <- base_orders_rec_count %>% filter(compras <= 10)
# base_orders_rec_count_10 %>% nrow()
#
# base_orders_rec_count_10_complete <- base_orders_rec_count_10 %>% left_join(base_orders_rec)
#
# base_orders_rec_count_10_complete_prod <- base_orders_rec_count_10_complete %>% left_join(base_ord_prior_prod)
#
# base_orders_rec_count_10_complete_prod_dept <- base_orders_rec_count_10_complete_prod %>% left_join(base_dept)
#
# base_graf1 <- base_orders_rec_count_10_complete_prod_dept %>% group_by(department) %>% count() %>% transmute(quantidade = n) %>% arrange(desc(quantidade))
#
# base_graf1[1:10,] %>% ggplot(aes(x = reorder(department, -quantidade), y = quantidade)) +
# geom_col(na.rm = TRUE)
#
#
# x <- base_orders_rec_count %>% filter(compras > 10)
#
# mean(x$compras)
#
# base_orders_rec_count_10_complete$compras %>% mean()
#
#
# base_orders %>% filter(is.na(days_since_prior_order)) %>% count()
#
#
# # Analise 2020-08-10 ------------------------------------------------------
#
#
#
# ## Verificações_Iniciais ---------------------------------------------------
#
#
# # Verificando se na base_train existem elementos que já estão nas outras bases.
# base_orders %>% skim()
#
# base_orders %>% group_by(user_id) %>% summarise(n_pedidos = max(order_number))
#
# n_vezes_mais30 <- base_orders %>% dplyr::filter(days_since_prior_order == 30) %>% group_by(user_id) %>% summarise(n_30 = n())
#
# max(n_vezes_mais30$n_30)
#
# base_orders$days_since_prior_order[base_orders$days_since_prior_order == -1] <- -1
#
# base_orders %>% summary()
#
# base_ord_train %>% skim()
#
# nrow(base_orders)
# nrow(base_ord_prior)
# nrow(base_ord_train)
#
# sum(base_ord_prior$order_id %in% base_ord_train$order_id)
# # Náo há interseção entre os order_id dessas bases
#
# sum(base_ord_prior$order_id %in% base_orders$order_id)
# # Há interseção total entre os 'order_id' da tabela 'order_pior' na tabela 'orders'
#
# sum(base_ord_train$order_id %in% base_orders$order_id)
# # há interseção total entre os 'order_id' da tabela 'order_train' na tabela 'orders'
#
# orders_in_ord_train <- sum(base_orders$order_id %in% base_ord_train$order_id)
#
# orders_in_ord_prior <- sum(base_orders$order_id %in% base_ord_prior$order_id)
#
# # Ao se buscar os order_id da tabela 'orders' na tabela 'order_prior', encontram-se 3214874 (93,4%).
# # Ao se buscar os order_id da tabela 'orders' na tabela 'order_train', encontram-se 131209 (3,84%).
# # ou seja, existem 75000 (2,19%) de pedidos da tabela 'orders' que não estão em nenhuma das bases ('order_prior' nem 'order_train'). Esse 'order_id' faltantes,
# # são pertencentes a base 'order_train', que não está disponível.
#
# orders_in_ord_prior/nrow(base_orders)
# orders_in_ord_train/nrow(base_orders)
#
# 1 - ((orders_in_ord_prior+orders_in_ord_train)/nrow(base_orders))
#
# nrow(base_orders)-(orders_in_ord_prior+orders_in_ord_train)
# Inicio Código -----------------------------------------------------------
# CONCLUSÔES DA ANÁLISE PRÉVIA:
# As bases order_train e order_prior são excludentes, ou seja, os order_id não possuem interseção.
# A base 'order_prior' tem todos os seus order_id encontrados na base 'orders', bem como a base 'order_train'.
# Existem 75k 'order_id', que pertencem a base de testes. Contudo, como essa base de teste não está disponível, podemos remover esses 75k registros.
# AÇÕES
# 1 - Remover 75k registros da base 'orders'.
# 2 - As bases order_train e order_prior, poderão ser mescladas, uma vez que não iremos usar a base para predição.
# Removendo os registros da tabela `orders` que estão categorizados como 'test', uma vez que essas 'order_id' não possuem dados correspondentes nas bases de product_order
base_orders_cl <- base_orders %>% filter(eval_set != 'test')
# Mesclando as bases 'order_prior' e 'order_train'
base_ord_geral <- dplyr::union(base_ord_prior,base_ord_train)
# Fazendo um left join da base de 'base_prod' com a base de base_aisles e base_dept, para trazer os nomes dos corredores e departamentos
base_products_names <- base_products %>% left_join(base_aisles) %>% left_join(base_dept)
base_products_names <- base_products_names[,c(1:2,5:6)]
# Fazendo um left join da base de order_geral com a base_products_names, para trazer dados dos produtos comprados (nome_produto, corredor e departamento)
base_ord_geral_prod <- base_ord_geral %>% left_join(base_products_names)
# Filtro Média Móvel Vivi -------------------------------------------------
base_orders_cl_mm <- base_orders_cl %>%
filter(order_number != 1) %>%
arrange(user_id, order_number) %>%
mutate(order_hour_of_day = as.numeric(order_hour_of_day)) %>%
group_by(user_id) %>%
mutate(days_ma = roll_mean(days_since_prior_order, 5, fill = NA, na.rm = T)) %>%
ungroup() %>%
glimpse
# Código de 2020-08-15
# filtrando somente os clientes que estão abaixo da mediana
base_orders_cl_mm <- base_orders_cl_mm %>% arrange(user_id,-order_number)
users_last_day_ma <- base_orders_cl_mm %>% dplyr::group_by(user_id) %>% summarise(ult_ordem = first(order_number), days_ma = nth(days_ma,3), media_days = mean(days_since_prior_order)) %>% filter(days_ma == 30 | (is.na(days_ma) & media_days >= 15)) %>% glimpse()
base_orders_cl_rec <- base_orders_cl_mm %>% filter(days_ma <8)
base_orders_cl_not_rec <- base_orders_cl_mm %>% filter(days_ma >=8)
base_ord_geral_prod_rec <- base_ord_geral_prod %>% dplyr::filter(order_id %in% base_orders_cl_rec$order_id)
base_ord_geral_prod_not_rec <- base_ord_geral_prod %>% dplyr::filter(order_id %in% base_orders_cl_not_rec$order_id)
# Trazendo a coluna user_id
base_ord_geral_prod_rec2 <- base_ord_geral_prod_rec %>% left_join(base_orders_cl_rec)
base_ord_geral_prod_rec2 <- base_ord_geral_prod_rec2[,c(1:8,10,14)]
base_ord_geral_prod_not_rec2 <- base_ord_geral_prod_not_rec %>% left_join(base_orders_cl_not_rec)
base_ord_geral_prod_not_rec2 <- base_ord_geral_prod_not_rec2[,c(1:8,10,14)]
# Gráfico da média móvel
# base_orders_cl_not_rec2 %>%
# na.omit() %>%
# ggplot(aes(x = days_ma)) +
# geom_bar(fill = 'darkgreen') +
# geom_vline(xintercept = 8, color = 'orange',
# linetype = 'dashed') +
# theme_minimal()
# HIPOTESE
# Compras que tem recorrência, provavelmente é feita, repetindo uma cesta anterior.
# Compras com menor recorrência tem maior variaçao na cesta de compras
# Rodando o modelo para os cem principais recorrentes e os 100 piores recorrentes
# Histograma de Produtos comprados por Ordem ------------------------------
order_n_total <- base_ord_geral_prod_not_rec %>% group_by(order_id) %>% summarise(quant_prod = n(), unid_recompra = sum(reordered))
bin <- order_n_total$quant_prod %>% max()
order_n_total %>% ggplot(aes(x = quant_prod)) +
geom_histogram(bins = bin/10) +
scale_y_sqrt()
x4 <- function(x) x^4
x_4<- function(x) sqrt(sqrt(x))
x2 <- function(x) x^2
x_2<- function(x) sqrt(x)
# order_n_total %>% ggplot() +
# geom_histogram(aes(x = quant_prod), bins = bin/10,) +
# scale_y_continuous(trans = scales::trans_new(name = "sqrt_sqrt",transform = x_4, inverse = x4)) +
# labs(title = "Histograma de No Produtos comprados")
#
# order_n_total %>% ggplot(aes(x = quant_prod)) +
# geom_freqpoly(bins = bin/10) +
# scale_y_continuous(trans = scales::trans_new(name = "sqrt_sqrt",transform = x_4, inverse = x4)) +
# labs(title = "Histograma de No Produtos comprados")
# Produtos Mais Recorrentes ----------------------------------------------------
# em qual posição do carrinho, se localizam o produtos MAIS recorrentes
rec_ord_cart <- base_ord_geral_prod_rec2 %>% group_by(add_to_cart_order) %>%
summarise(recorrencias = sum(reordered),
total = n()) %>%
mutate(rec_perc = recorrencias/total) %>%
arrange(add_to_cart_order)
rec_ord_cart %>% ggplot(aes(add_to_cart_order, rec_perc)) +
geom_col() +
labs(title = "Gráfico de ordem_carrinho x percentual de produtos recorrentes")
# Produtos Menos Recorrentes ----------------------------------------------------
# em qual posição do carrinho, se localizam o produtos não-recorrentes
nao_rec_ord_cart <- base_ord_geral_prod_not_rec2 %>% group_by(add_to_cart_order) %>%
summarise(total = n(),
nao_recorrencia = total - sum(reordered)) %>%
mutate(nao_rec_perc = nao_recorrencia/total) %>%
arrange(add_to_cart_order)
nao_rec_ord_cart %>% ggplot(aes(add_to_cart_order, nao_rec_perc)) +
geom_col() +
labs(title = "Gráfico de ordem_carrinho x percentual de produtos nao_recorrentes")
# Heatmaps Integradora Intermediária --------------------------------------
# fazer uma análise pelos produtos que entram primeiro na cesta (por produto), nas compras feitas por clientes pouco recorrentes.
# HIPÓTESE:
# Existe uma relação entre a posição do produto no carrinho e a recorrência de compra
# PREMISSA DA ANÁLISE:
# Separar os clientes em 2 catergorias: Clientes muito recorrentes e clientes pouco recorrentes.
# Essa definição inicial é feita com base na variável 'days_since_pior_order'.
# Primeiro se calcula o valor médio dessa variável, por user_id.
# Uma vez definidas os valores médios por cliente, calculam-se os quartis.
# São definidos como clientes pouco recorrentes, o que estão abaixo da madiana e Clientes recorrentes, aqueles que estão acima da mediana.
# Definido isso, será feita uma análise para cada um dos grupos de modo a buscar as discrepâncias.
# GRÁFICOS:
# Foram criados 2 Heatmaps, um para cada grupo de clientes, onde são apresentados os percentuais de recorrencias de produtos (100 produtos de maior
# recorrência de compra), nas diferentes posições do carrinho de compras.
# INSIGHT:
# Comparando ambos os gráficos, não se percebem relevantes variações nem nos produtos apresentados, nem tampouco na proporção do produto nas diversas posições.
prod_ord_cart <- base_ord_geral_prod_not_rec2 %>% dplyr::group_by(product_name, add_to_cart_order) %>%
summarise(quantidade = n(),
recorrencias = sum(reordered)) %>%
mutate(rec_perc = recorrencias/quantidade) %>%
arrange(-quantidade)
# Definindo a média do número de produtos recorrentes
a <- base_ord_geral_prod_not_rec2$order_id %>% n_distinct() #número de pedidos
b <- base_ord_geral_prod_not_rec2$reordered %>% sum() #numero de produtos
n_prod1 = b/a
(texto1 <- paste("Média Produtos/Ordem = ", round(n_prod1,2), sep = ""))
prod_ord_cart2 <- prod_ord_cart %>% dplyr::group_by(product_name) %>% mutate(perc = recorrencias/sum(recorrencias))
prod_ord_cart2_list <- prod_ord_cart2 %>% group_by(product_name) %>% summarise(recorrencias_total = sum(recorrencias)) %>% arrange(-recorrencias_total)
prod_ord_cart2_list <- prod_ord_cart2_list[1:100,1]
prod_100_n_rec <- prod_ord_cart2 %>% right_join(prod_ord_cart2_list)
prod_100_n_rec %>% ggplot() +
geom_tile(aes(product_name,add_to_cart_order, fill = perc*100)) +
scale_fill_gradient2(low = "white", high = "darkgreen", limits = c(0,40)) + #,trans = scales::trans_new(name = "quad",transform = x2, inverse = x_2))+
theme(axis.text.x = element_text(angle = 90, size = 8, hjust = 1)) +
labs(title = "Heatmap de Produtos x Cart_Order para clientes Não-Recorrentes") +
theme(axis.text.x = element_text(hjust = 1.0, vjust = 0.3)) +
geom_hline(yintercept = n_prod1, color = "orange") +
scale_y_continuous(limits = c(0,20),expand = c(0,0)) +
geom_text(aes(x = 5, y = n_prod1+0.1, label = texto1 ), size = 3, color = 'orange', hjust = 0, vjust = 0)
# hm1 <- prod_100_n_rec %>% ggplot() +
# geom_tile(aes(product_name,add_to_cart_order, fill = perc*100)) +
# scale_fill_gradient2() +
# ylim(0,150) +
# theme(axis.text.x = element_text(angle = 90, size = 8, hjust = 1)) +
# labs(title = "Heatmap de Produtos x Cart_Order para clientes Não-Recorrentes")
# ggplotly(hm1, tooltip = "perc")
# fazer uma análise pelos produtos que entram primeiro na cesta (por produto), nas compras feitas por clientes MAIS recorrentes.
prod_ord_cart_rec <- base_ord_geral_prod_rec2 %>% dplyr::group_by(product_name, add_to_cart_order) %>%
summarise(quantidade = n(),
recorrencias = sum(reordered)) %>%
mutate(rec_perc = recorrencias/quantidade) %>%
arrange(-quantidade)
# Definindo a média do número de produtos recorrentes
a <- base_ord_geral_prod_rec2$order_id %>% n_distinct() #número de pedidos
b <- base_ord_geral_prod_rec2$reordered %>% sum() #numero de produtos
n_prod2 = b/a
(texto2 <- paste("Média Produtos/Ordem = ", round(n_prod2,2), sep = ""))
prod_ord_cart_rec2 <- prod_ord_cart_rec %>% dplyr::group_by(product_name) %>% mutate(perc = recorrencias/sum(recorrencias))
prod_ord_cart_rec2_list <- prod_ord_cart_rec2 %>% group_by(product_name) %>% summarise(recorrencias_total = sum(recorrencias)) %>% arrange(-recorrencias_total)
prod_ord_cart_rec2_list <- prod_ord_cart_rec2_list[1:100,1]
prod_100_rec <- prod_ord_cart_rec2 %>% right_join(prod_ord_cart_rec2_list)
prod_100_rec %>% ggplot() +
geom_tile(aes(product_name,add_to_cart_order, fill = perc*100)) +
# scale_fill_gradient2(aes(fill = "darkgreen"))+
scale_fill_gradient2(low = "white", high = "darkgreen", limits = c(0,40))+#, trans = scales::trans_new(name = "quad",transform = x2, inverse = x_2))+
theme(axis.text.x = element_text(angle = 90, size = 8, hjust = 1)) +
labs(title = "Heatmap de Produtos x Cart_Order para clientes Recorrentes") +
theme(axis.text.x = element_text(hjust = 1.0, vjust = 0.3)) +
geom_hline(yintercept = n_prod2, color = "orange") +
geom_text(aes(x = 5, y = n_prod2+0.1, label = texto2 ), size = 3, color = 'orange', hjust = 0, vjust = 0) +
scale_y_continuous(limits = c(0,20),expand = c(0,0))
# hm1 <- prod_100_rec %>% ggplot() +
# geom_tile(aes(product_name,add_to_cart_order, fill = perc*100)) +
# scale_fill_gradient2()+
# ylim(0,150)+
# theme(axis.text.x = element_text(angle = 90, size = 8, hjust = 1)) +
# labs(title = "Heatmap de Produtos x Cart_Order para clientes Recorrentes")
# ggplotly(hm1, tooltip = "perc")
# Análise de HClust -------------------------------------------------------
# Nova Análise de HClust, onde agora o percentual é feito de maneira diferente. Será feita a contabilização dos percentuais por
# order de inclusão no carrinho. Ou seja, cada ordem_cart terá um total de produtos que somará 100% e cada produtos terá seu percentual
# na posição do carrinho.
# Buscando os 100 principais produtos da base geral
base_ord_geral_prod_total <- base_ord_geral_prod %>% left_join(base_orders_cl_mm)
# base_ord_geral_prod_total2 <- base_ord_geral_prod_total[,c(1:8,10,14)]
base_ord_geral_prod_total2 <- base_ord_geral_prod_total
prod_ord_cart_geral <- base_ord_geral_prod_total2 %>% dplyr::group_by(product_name, add_to_cart_order) %>%
summarise(quantidade = n(),
recorrencias = sum(reordered)) %>%
mutate(rec_perc = recorrencias/quantidade) %>%
arrange(-quantidade)
prod_ord_cart_geral2 <- prod_ord_cart_geral %>% dplyr::group_by(product_name) %>% mutate(perc = recorrencias/sum(recorrencias))
prod_ord_cart_geral2_list <- prod_ord_cart_geral2 %>% group_by(product_name) %>% summarise(recorrencias_total = sum(recorrencias)) %>% arrange(-recorrencias_total)
prod_ord_cart_geral2_list <- prod_ord_cart_geral2_list[1:50,1]
prod_100_geral <- prod_ord_cart_geral2 %>% right_join(prod_ord_cart_geral2_list)
ord_cart_prod2 <- prod_ord_cart %>% right_join(prod_ord_cart_geral2_list) %>% dplyr::group_by(add_to_cart_order) %>% mutate(perc = recorrencias/sum(recorrencias))
mat_similarity_ord <- ord_cart_prod2 %>% dplyr::select(product_name,add_to_cart_order, perc) %>% pivot_wider(names_from = add_to_cart_order, values_from = perc)
# mat_similarity_ord <- mat_similarity_ord[1:100,]/
# Removendo os NAs
mat_similarity_ord <- mat_similarity_ord %>% replace(is.na(.),0)
# Normalizando os dados
receita <- mat_similarity_ord %>% recipe(product_name ~ .) %>%
step_normalize(all_numeric(), -all_outcomes())
prep_receita <- prep(receita)
mat_similarity_ord_norm <- juice(prep_receita)[[ncol(juice(prep_receita))]] %>% cbind(juice(prep_receita)[,-ncol(juice(prep_receita))])
# Coletando as colunas com NA ou NaN
x <- inspect_na(mat_similarity_ord_norm)
col_remove <- x$col_name[x$pcnt == 100]
# Removendo as colunas com NA ou NaN
mat_similarity_ord_norm <- mat_similarity_ord_norm %>% select(-c(col_remove))
class(mat_similarity_ord_norm$.)
# dist_mat <- get_dist(mat_similarity_ord_norm, upper = TRUE, diag = TRUE)
#
# n <- 5
# vet_clust <- c(2:((nrow(mat_similarity_ord_norm)-1)/n))
# vet_clust <- vet_clust * n
# vet_clust2 <- c(c(2:9),vet_clust)
# silho <- tibble(k = numeric(), silho_avg = numeric(), negatives = numeric(), singulares = numeric())
#
# for (i in vet_clust2){
# cutted <- hcut(mat_similarity_ord_norm, hc_func = "hclust", hc_method = "ward.D2", k=i, graph = TRUE)
# negativos <- sum(cutted$silinfo$widths$sil_width < 0) / length(cutted$silinfo$widths$sil_width)
# sing <- nrow(as_tibble(cutted$cluster) %>% group_by(value) %>% count() %>% filter(n == 1))
# silho <- silho %>% bind_rows(c(k = i, silho_avg = cutted$silinfo$avg.width, negatives = negativos, singulares = sing))
# print(i)
# }
#
# best_k <- silho$k[silho$silho_avg == max(silho$silho_avg)]
# best_k_neg <- silho$k[silho$negatives == min(silho$negatives)]
#
# p1 <- silho %>% ggplot(aes(x = k)) +
# geom_line(aes(y = silho_avg), color = "blue") +
# # geom_rect(aes(xmin = 35, xmax = 53, ymin = 0.33, ymax = 0.35), alpha = 1/500, color = "red", fill = "green") +
# # geom_vline(xintercept = c(35, 53), show.legend = TRUE) +
# geom_line(aes(y = singulares/40), color = "red") +
# scale_y_continuous(
# name = "Avg_Silh",
# sec.axis = sec_axis(trans =~.*40, name = "n_Sing_Clust")
# ) +
# geom_vline(xintercept = 6)
# p1
k_select <- 4
cutted_ord_not_rec <- hcut(mat_similarity_ord_norm, hc_func = "hclust", hc_method = "ward.D2", k=k_select, graph = TRUE)
cutted_ord_not_rec$labels <- as.character(mat_similarity_ord_norm$.)
fviz_dend(cutted_ord_not_rec, k = k_select,
cex = 0.6,
type = "rectangle",
k_colors = c("darkgreen","orange"),
labels_track_height = 0.8,
# k_colors = c(1:4,6),
ggtheme = theme_light(),
main = "Dendrograma de Produtos - Clientes Não Recorrentes",
ylim = c(-30,60),
)
# Hcluster com clientes recorrentes
ord_cart_prod_rec2 <- prod_ord_cart_rec %>% right_join(prod_ord_cart_geral2_list) %>% dplyr::group_by(add_to_cart_order) %>% mutate(perc = recorrencias/sum(recorrencias))
mat_similarity_ord_rec <- ord_cart_prod_rec2 %>% dplyr::select(product_name,add_to_cart_order, perc) %>% pivot_wider(names_from = add_to_cart_order, values_from = perc)
# mat_similarity_ord_rec <- mat_similarity_ord_rec[1:100,]
# Removendo os NAs
mat_similarity_ord_rec <- mat_similarity_ord_rec %>% replace(is.na(.),0)
# Normalizando os dados
receita <- mat_similarity_ord_rec %>% recipe(product_name ~ .) %>%
step_normalize(all_numeric(), -all_outcomes())
prep_receita <- prep(receita)
mat_similarity_ord_rec_norm <- juice(prep_receita)[[ncol(juice(prep_receita))]] %>% cbind(juice(prep_receita)[,-ncol(juice(prep_receita))])
# Coletando as colunas com NA ou NaN
x <- inspect_na(mat_similarity_ord_rec_norm)
col_remove <- x$col_name[x$pcnt == 100]
# Removendo as colunas com NA ou NaN
mat_similarity_ord_rec_norm <- mat_similarity_ord_rec_norm %>% select(-c(col_remove))
# dist_mat <- get_dist(mat_similarity_ord_rec_norm, upper = TRUE, diag = TRUE)
# n <- 5
# vet_clust <- c(2:((nrow(mat_similarity_ord_rec_norm)-1)/n))
# vet_clust <- vet_clust * n
# vet_clust2 <- c(c(2:9),vet_clust)
# silho <- tibble(k = numeric(), silho_avg = numeric(), negatives = numeric(), singulares = numeric())
#
# for (i in vet_clust2){
# cutted <- hcut(mat_similarity_ord_rec_norm, hc_func = "hclust", hc_method = "ward.D2", k=i, graph = TRUE)
# negativos <- sum(cutted$silinfo$widths$sil_width < 0) / length(cutted$silinfo$widths$sil_width)
# sing <- nrow(as_tibble(cutted$cluster) %>% group_by(value) %>% count() %>% filter(n == 1))
# silho <- silho %>% bind_rows(c(k = i, silho_avg = cutted$silinfo$avg.width, negatives = negativos, singulares = sing))
# print(i)
# }
#
# best_k <- silho$k[silho$silho_avg == max(silho$silho_avg)]
# best_k_neg <- silho$k[silho$negatives == min(silho$negatives)]
#
# p1 <- silho %>% ggplot(aes(x = k)) +
# geom_line(aes(y = silho_avg), color = "blue") +
# # geom_rect(aes(xmin = 35, xmax = 53, ymin = 0.33, ymax = 0.35), alpha = 1/500, color = "red", fill = "green") +
# # geom_vline(xintercept = c(35, 53), show.legend = TRUE) +
# geom_line(aes(y = singulares/40), color = "red") +
# scale_y_continuous(
# name = "Avg_Silh",
# sec.axis = sec_axis(trans =~.*40, name = "n_Sing_Clust")
# ) +
# geom_vline(xintercept = 4)
# p1
k_select <- 4
cutted_ord_rec <- hcut(mat_similarity_ord_rec_norm, hc_func = "hclust", hc_method = "ward.D2", k=k_select, graph = TRUE)
cutted_ord_rec$labels <- as.character(mat_similarity_ord_rec_norm$.)
fviz_dend(cutted_ord_rec, k = k_select,
cex = 0.6,
type = "rectangle",
k_colors = c("darkgreen","orange"),
labels_track_height = 0.8,
# k_colors = c(1:4,6),
ggtheme = theme_light(),
main = "Dendrograma de Produtos - Clientes Recorrentes",
ylim = c(-30,60),
)
cutted_ord_rec$cluster
library(dendextend)
dend_not_rec <- as.dendrogram(cutted_ord_not_rec)
dend_rec <- as.dendrogram(cutted_ord_rec)
tang <- dendlist(dend_not_rec, dend_rec)
tang %>%
untangle(method = "step1side") %>% # Find the best alignment layout
tanglegram(labels_cex = 0.6,
margin_inner = 15,
k_labels = 4,
k_branches = 4,
axes = FALSE,
lwd = 2,
main_left = "Produtos - Clientes Pouco Recorrentes",
cex_main_left = 1,
main_right = "Produtos - Clientes Recorrentes",
cex_main_right = 1,
dLeaf = 0.1
)
dendlist(dend_not_rec, dend_rec) %>%
untangle(method = "step1side") %>% # Find the best alignment layout
tanglegram(labels_cex = 0.6,
margin_inner = 15,
k_labels = 4,
k_branches = 4,
axes = FALSE,
lwd = 2,
main_left = "Produtos - Clientes Pouco Recorrentes",
cex_main_left = 1,
main_right = "Produtos - Clientes Recorrentes",
cex_main_right = 1,
dLeaf = 0.1
)
# Montando um hclust de produto por order de carrinho para produtos de clientes pouco recorrentes
mat_similarity <- prod_ord_cart2 %>% dplyr::select(product_name,add_to_cart_order, perc) %>% pivot_wider(names_from = add_to_cart_order, values_from = perc)
mat_similarity2 <- mat_similarity[1:100,]
# Montando um hclust de produto por order de carrinho para produtos de clientes MAIS recorrentes
mat_similarity_rec <- prod_ord_cart_rec2 %>% dplyr::select(product_name,add_to_cart_order, perc) %>% pivot_wider(names_from = add_to_cart_order, values_from = perc)
mat_similarity_rec2 <- mat_similarity_rec[1:100,]
# vet_clust <- c(1:((nrow(mat_similarity2)-2)/20))
n <- 5
vet_clust <- c(1:((nrow(mat_similarity2)-1)/n))
vet_clust <- vet_clust * n
vet_clust2 <- c(c(2:10),vet_clust)
silho <- tibble(k = numeric(), silho_avg = numeric(), negatives = numeric(), singulares = numeric())
for (i in vet_clust2){
cutted <- hcut(mat_similarity2, hc_func = "hclust", hc_method = "complete", k=i, graph = TRUE)
negativos <- sum(cutted$silinfo$widths$sil_width < 0) / length(cutted$silinfo$widths$sil_width)
sing <- nrow(as_tibble(cutted$cluster) %>% group_by(value) %>% count() %>% filter(n == 1))
silho <- silho %>% bind_rows(c(k = i, silho_avg = cutted$silinfo$avg.width, negatives = negativos, singulares = sing))
print(i)
}
best_k <- silho$k[silho$silho_avg == max(silho$silho_avg)]
best_k_neg <- silho$k[silho$negatives == min(silho$negatives)]
p1 <- silho %>% ggplot(aes(x = k)) +
geom_line(aes(y = silho_avg), color = "blue") +
# geom_rect(aes(xmin = 35, xmax = 53, ymin = 0.33, ymax = 0.35), alpha = 1/500, color = "red", fill = "green") +
# geom_vline(xintercept = c(35, 53), show.legend = TRUE) +
geom_line(aes(y = singulares/40), color = "red") +
scale_y_continuous(
name = "Avg_Silh",
sec.axis = sec_axis(trans =~.*40, name = "n_Sing_Clust")
)
p1
cutted_not_rec <- hcut(mat_similarity2, hc_func = "hclust", hc_method = "complete", k=20, graph = TRUE)
# Removendo os NAs
mat_similarity2 <- mat_similarity2 %>% replace(is.na(.),0)
# Normalizando os dados
receita <- mat_similarity2 %>% recipe(product_name ~ .) %>%
step_normalize(all_numeric(), -all_outcomes())
prep_receita <- prep(receita)
mat_similarity2_norm <- juice(prep_receita)[[ncol(juice(prep_receita))]] %>% cbind(juice(prep_receita)[,-ncol(juice(prep_receita))])
# Coletando as colunas com NA ou NaN
x <- inspect_na(mat_similarity2_norm)
col_remove <- x$col_name[x$cnt == 100]
# Removendo as colunas com NA ou NaN
mat_similarity2_norm_sem_na <- mat_similarity2_norm %>% select(-c(col_remove))
dist_mat <- get_dist(mat_similarity2_norm_sem_na, upper = TRUE, diag = TRUE)
print(dist_mat)
cutted_not_rec$labels <- mat_similarity2$product_name
cutted_rec <- hcut(mat_similarity_rec2, hc_func = "hclust", hc_method = "complete", k=20, graph = TRUE)
cutted_rec$labels <- mat_similarity_rec2$product_name
cutted_not_rec$size
fviz_dend(cutted_not_rec, k = 20,
cex = 0.7,
type = "rectangle",
k_colors = c("darkgreen","orange"),
labels_track_height = 0.8,
# k_colors = c(1:4,6),
ggtheme = theme_light(),
main = "Dendrograma de Produtos - Clientes Não Recorrentes")
fviz_dend(cutted_rec, k = 20,
cex = 0.7,
type = "rectangle",
k_colors = c("darkgreen","orange"),
labels_track_height = 0.8,
# k_colors = c(1:4,6),
ggtheme = theme_light(),
main = "Dendrograma de Produtos - Clientes Recorrentes")
cutted_not_rec$size
cutted_rec$size
# Heatmap recorrencia: Aisles x Ordem_Carrinho -----------------------------------------
# fazer uma análise pelos produtos que entram primeiro na cesta (por aisle)
ais_ord_cart <- base_ord_geral_prod_not_rec %>% group_by(aisle, add_to_cart_order) %>%
summarise(quantidade = n(),
recorrencias = sum(reordered)) %>%
mutate(rec_perc = recorrencias/quantidade) %>%
arrange(-quantidade)
hm1 <- ais_ord_cart %>% ggplot() +
geom_tile(aes(aisle,add_to_cart_order, fill = rec_perc*100)) +
scale_fill_gradient2()+
theme(axis.text.x = element_text(angle = 90, size = 8, hjust = 1))
ggplotly(hm1, tooltip = "rec_perc")
# Buscando os principais 100 produtos
lista_produtos <- prod_ord_cart %>% dplyr::group_by(product_name) %>% summarise(recorrencia_media = mean(recorrencias)) %>% dplyr::arrange(-recorrencia_media)
lista_produtos[1:100,1]
hm_prod <- prod_ord_cart %>% dplyr::filter(product_name %in% lista_produtos[1:100,1]) %>% ggplot() +
geom_tile(aes(product_name,add_to_cart_order, fill = rec_perc*100)) +
scale_fill_gradient2()+
theme(axis.text.x = element_text(angle = 90, size = 8, hjust = 1))
ggplotly(hm_prod, tooltip = "rec_perc")
# Produtos que entram primeiro na cesta -----------------------------------
# fazer uma análise pelos produtos que entram primeiro na cesta (por nome de produto)
prod_ord_cart <- base_ord_geral_prod %>% group_by(product_name, add_to_cart_order) %>%
summarise(quantidade = n(),
recorrencias = sum(reordered)) %>%
mutate(rec_perc = recorrencias/quantidade) %>%
arrange(-quantidade)
# fazer uma análise pelos produtos que entram primeiro na cesta (por aisle)
ais_ord_cart <- base_ord_geral_prod %>% group_by(aisle, add_to_cart_order) %>%
summarise(quantidade = n(),
recorrencias = sum(reordered)) %>%
mutate(rec_perc = recorrencias/quantidade) %>%
arrange(-quantidade)
# fazer uma análise pelos produtos que entram primeiro na cesta (por departamento)
dept_ord_cart <- base_ord_geral_prod %>% group_by(department, add_to_cart_order) %>%
summarise(quantidade = n(),
recorrencias = sum(reordered)) %>%
mutate(rec_perc = recorrencias/quantidade) %>%
arrange(-quantidade)
# Número máximo de produtos em uma mesma compra
max(prod_ord_cart$add_to_cart_order)
max(base_ord_geral_prod$reordered)
# fazer uma análise pelos produtos que entram por último
# separar por compras de maior numero de produtos
# Seperar por quantidades de produtos comprados em cada compra
# Fazer a relação de quantidade comprada e ordem de inserção na cesta
# Comentários 2020-08-11
# Na verdade, há um alto número de compras com 30 dias desde a última compra, pois o site deixa de registrar após mais de 30 dias
# Na tabela de 'order', verificar que o 'days_since_last_order' = NA, ocorre sempre para a primeira compra. Problema ao buscar o valor máximo, pois NA é maior que qualquer numero.
# Importante considerar que a base não possui somente os últimos 30 dias, porém sim todos as compras, somente não contabilizando intervalos superiores a 30 dias.
# Verificar a soma da diferença entre a o 'days_since_last_order' entre uma compra e a anterior. Se essa soma for
# Critério para usuário menos recorrente: média móvel (5 compras) de days_since_prior_order > 8 (mediana), não contabilizando a primeira compra pela ocorrencia de NA. (pegar código Vivi)
|
# ====感染状況を日本標準マップで表示する画像を作成====
# Returns:
# data.table: データセット
cumSumConfirmedByDateAndRegion <- reactive({
dt <- mapData[date >= input$mapDateRange[1] & date <= input$mapDateRange[2]]
dt
})
output$comfirmedMapWrapper <- renderUI({
if (input$switchMapVersion == T) {
echarts4rOutput("echartsSimpleMap", height = "550px")
} else {
echarts4rOutput("echartsMap", height = "550px")
}
})
output$echartsMapPlaySetting <- renderUI({
if(input$switchMapVersion == F) {
tags$span(
dropdownButton(
tags$h4(icon("eye"), i18n$t("表示設定")),
tags$hr(),
materialSwitch(
inputId = "showPopupOnMap",
label = i18n$t("日次増加数のポップアップ"),
status = "danger",
value = T
),
materialSwitch(
inputId = "replyMapLoop",
label = i18n$t("ループ再生"),
status = "danger",
value = T
),
dateRangeInput(
inputId = "mapDateRange",
label = i18n$t("表示日付"),
start = byDate$date[nrow(byDate) - 15],
end = byDate$date[nrow(byDate)],
min = byDate$date[1],
max = byDate$date[nrow(byDate)],
separator = " ~ ",
language = "ja"
),
sliderInput(
inputId = "mapFrameSpeed",
label = i18n$t("再生速度(秒/日)"),
min = 0.5,
max = 3,
step = 0.1,
value = 0.8
),
circle = F,
inline = T,
status = "danger",
icon = icon("gear"),
size = "sm",
width = "300px",
right = T,
tooltip = tooltipOptions(title = i18n$t("表示設定"), placement = "top")
),
style = "float:right;"
)
}
})
output$selectMapBottomButton <- renderUI({
if (input$switchMapVersion == T) {
radioGroupButtons(
inputId = "selectMapBottomButton",
label = NULL,
justified = T,
choiceNames = c(
paste(icon("creative-commons-sampling-plus"), i18n$t("現在")),
paste(icon("syringe"), i18n$t("接種")),
paste(icon("ambulance"), i18n$t("重症"))
),
choiceValues = c("active", "vaccine_ratio", "severe"),
status = "danger"
)
} else {
tags$p()
}
})
simpleMapDataset <- reactive({
dt <- merge(
x = mapData[, .SD[.N], by = ja],
y = mapData[, .SD[.N - 1], by = ja],
by = c("ja", "full_ja", "en", "lat", "lng", "regions"), no.dups = T, sort = F
)
dt[mhlwSummary[日付 == max(日付)], `:=`(
total = count.x,
severe = i.重症者,
active = i.陽性者 - i.退院者 - ifelse(is.na(i.死亡者), 0, i.死亡者),
diff = (count.x - count.y)
), on = c(ja = "都道府県名")]
# join vaccine by prefecture dataset
elderly <- vaccine_by_region[category == "elderly"][date == max(date)]
medical <- vaccine_by_region[category == "medical"][date == max(date)]
vaccine <- medical[elderly, .(
prefecture = prefecture,
first_medical = first,
second_medical = second,
first_elderly = i.first,
second_elderly = i.second,
date_medical = date,
date_elderly = i.date
), on = c(code = "code")]
dt <- dt[vaccine, on = c(full_ja = "prefecture")]
# join population
dt <- dt[prefecture_master[, .(都道府県, population = 人口)],
on = c(full_ja = "都道府県")
]
# vaccine ratio
dt[, vaccine_ratio := round((second_medical + second_elderly) / population * 100, 2)]
# set NA to 0
setnafill(dt, fill = 0, cols = "severe")
dt
})
output$echartsSimpleMap <- renderEcharts4r({
dt <- simpleMapDataset()
# 本日増加分
todayTotalIncreaseNumber <- sum(dt$diff, na.rm = T)
subText <- i18n$t("各都道府県からの新規報告なし")
if (todayTotalIncreaseNumber > 0) {
subText <- paste0(
sprintf(
i18n$t("発表がある%s都道府県合計新規%s人, 合計%s人\n\n"),
sum(dt$diff > 0),
prettyNum(todayTotalIncreaseNumber, big.mark = ","),
prettyNum(sum(dt$count.x, na.rm = T), big.mark = ",")
),
i18n$t("※こちらの合計値には空港検疫、チャーター便、\n クルーズ関連の事例などは含まれていない。")
)
}
dt[, translatedRegionName := convertRegionName(full_ja, languageSetting)]
if (input$selectMapBottomButton %in% c("vaccine_ratio")) {
subText <- sprintf(
i18n$t("データ更新日:\n\n医療従事者等(%s)\n一般接種(高齢者含む)(%s)"),
as.Date(as.character(unique(dt$date_medical)), format = "%Y%m%d"),
as.Date(as.character(unique(dt$date_elderly)), format = "%Y%m%d")
)
color_in_range <- c("#DADADA", "#3fcc8d", middleGreen, darkGreen, superDarkGreen, superDarkGreen2)
split_list <- list(
list(min = 75.0, label = "> 75.0 %"),
list(min = 70.0, max = 75.0, label = "70.0 % ~ 75.0 %"),
list(min = 65.0, max = 70.0, label = "65.0 % ~ 70.0 %"),
list(min = 60.0, max = 65.0, label = "60.0 % ~ 65.0 %"),
list(min = 0.0, max = 60.0, label = "0.0 % ~ 60.0 %"),
list(value = 0)
)
formatter <- htmlwidgets::JS(paste0(
"function(params) {
if(params.value) {
return(`<b>${params.name}</b><br>", i18n$t("2回目接種済率:"), "${params.value} %`)
} else {
return('');
}
}
"))
title_text <- i18n$t("2回目接種済マップ")
} else {
color_in_range <- c("#DADADA", "#FFCEAB", "#FF9D57", "#FF781E", "#EA5432", "#C02B11", "#8C0B00", "#000000")
split_list <- list(
list(min = 3000, label = "> 3,000"),
list(min = 1000, max = 3000, label = "1,000 - 3,000"),
list(min = 500, max = 1000, label = "500 - 1,000"),
list(min = 100, max = 500),
list(min = 50, max = 100),
list(min = 10, max = 50),
list(min = 1, max = 10),
list(value = 0)
)
formatter <- htmlwidgets::JS(paste0(
"function(params) {
if(params.value) {
return(`${params.name}<br>",
switch(input$selectMapBottomButton,
active = i18n$t("現在感染者数:"),
total = i18n$t("累積感染者数:"),
severe = i18n$t("現在重症者数:")
), "${params.value}`)
} else {
return('');
}
}
"
))
title_text <- i18n$t("リアルタイム感染者数マップ")
}
map <- dt %>%
e_charts(translatedRegionName) %>%
e_map_register("japan", japanMap) %>%
e_map_(input$selectMapBottomButton,
map = "japan",
name = "感染確認数",
nameMap = useMapNameMap(languageSetting),
layoutSize = "50%",
center = c(137.1374062, 36.8951298),
zoom = 1.5,
itemStyle = list(
borderWidth = 0.2,
borderColor = "white"
),
emphasis = list(
label = list(
fontSize = 8
)
),
roam = "move"
) %>%
e_visual_map_(
input$selectMapBottomButton,
top = "25%",
left = "0%",
inRange = list(color = color_in_range),
type = "piecewise",
splitList = split_list
) %>%
e_color(background = "#FFFFFF") %>%
e_mark_point(serie = dt[diff > 0]$en) %>%
e_tooltip(formatter = formatter) %>%
e_title(
text = title_text,
subtext = subText
)
# 本日増加分をプロット
if (input$selectMapBottomButton %in% c("active")) {
newToday <- dt[diff > 0]
for (i in 1:nrow(newToday)) {
map <- map %>%
e_mark_point(
data = list(
name = newToday[i]$ja,
coord = c(newToday[i]$lng, newToday[i]$lat),
symbolSize = c(7, newToday[i]$diff / 2)
),
symbol = "triangle",
symbolOffset = c(0, "-50%"),
itemStyle = list(
color = "#520e05",
shadowColor = "white",
shadowBlur = 0,
opacity = 0.75
)
)
}
}
map
})
output$echartsMap <- renderEcharts4r({
mapDt <- cumSumConfirmedByDateAndRegion()
# mapDt <- mapData # TEST
newByDate <- rowSums(byDate[date >= input$mapDateRange[1] & date <= input$mapDateRange[2], 2:48])
provinceCountByDate <- rowSums(
byDate[date >= input$mapDateRange[1] & date <= input$mapDateRange[2], 2:48] != 0
)
dateSeq <- seq.Date(input$mapDateRange[1], input$mapDateRange[2], by = "day")
# 日別合計
sumByDay <- cumsum(rowSums(byDate[, 2:ncol(byDate)]))
sumByDay <- data.table(byDate[, 1], sumByDay)
timeSeriesTitle <- lapply(seq_along(dateSeq), function(i) {
subText <- i18n$t("各都道府県からの新規報告なし")
if (provinceCountByDate[i] > 0) {
subText <- sprintf(i18n$t("発表がある%s都道府県合計新規%s人, 合計%s人\n\n"),
provinceCountByDate[i], newByDate[i], sumByDay[date == dateSeq[i]]$sumByDay
)
}
return(
list(
text = dateSeq[i],
subtext = subText
)
)
})
timeSeriesTitleSub <- lapply(seq_along(dateSeq), function(i) {
columnName <- colnames(byDate)[49:ncol(byDate)]
item <- ""
for (name in columnName) {
diff <- byDate[date == dateSeq[i], name, with = F][[1]]
if (diff > 0) {
# 新規
item <- paste(item, paste0(name, i18n$t("新規"), diff), " ")
}
}
return(
list(
subtext = item,
right = "5%",
bottom = "10%"
)
)
})
timeSeriesTitleSource <- lapply(seq_along(dateSeq), function(i) {
return(
list(
subtext = i18n$t("マップのソースについて"),
sublink = "https://github.com/dataofjapan/land",
subtextStyle = list(
color = "#3c8dbc",
fontSize = 10
),
left = "0%",
top = "8%"
)
)
})
mapDt[, translatedRegionName := convertRegionName(full_ja, languageSetting)]
# provinceCode <- fread(paste0(DATA_PATH, 'prefectures.csv')) # TEST
if (input$showPopupOnMap) {
provinceColnames <- colnames(byDate)[2:ncol(byDate)]
provinceDiffPopup <- lapply(dateSeq, function(dateItem) {
row <- as.matrix(byDate[date == dateItem])[1, 2:48]
value <- row[row != "0"]
name <- names(value)
dateData <- list()
for (i in seq_along(value)) {
province <- provinceCode[`name-ja` == name[i]]
dateData[[i]] <- list(
coord = list(province$lng, province$lat),
value = paste0(name[i], "\n", value[i])
)
}
list(
data = dateData,
itemStyle = list(color = darkYellow),
label = list(fontSize = 8),
symbol = "pin",
symbolSize = 40
)
})
}
map <- mapDt %>%
group_by(date) %>%
e_charts(translatedRegionName, timeline = T) %>%
e_map_register("japan", japanMap) %>%
e_map(count,
map = "japan",
name = "感染確認数",
nameMap = useMapNameMap(languageSetting),
layoutSize = "50%",
center = c(137.1374062, 36.8951298),
zoom = 1.5,
itemStyle = list(
borderWidth = 0.2,
borderColor = "white"
),
emphasis = list(
label = list(
fontSize = 8
)
),
roam = "move"
) %>%
e_visual_map(
count,
top = "20%",
left = "0%",
inRange = list(color = c("#DADADA", "#FFCEAB", "#FF9D57", "#FF781E", "#EA5432", "#C02B11", "#8C0B00")),
type = "piecewise",
splitList = list(
list(min = 1000),
list(min = 500, max = 1000),
list(min = 100, max = 500),
list(min = 50, max = 100),
list(min = 10, max = 50),
list(min = 1, max = 10),
list(value = 0)
)
) %>%
e_color(background = "#FFFFFF") %>%
e_timeline_opts(
left = "0%", right = "0%", symbol = "diamond",
playInterval = input$mapFrameSpeed * 1000,
loop = input$replyMapLoop,
currentIndex = length(dateSeq) - 1
) %>%
e_tooltip(formatter = htmlwidgets::JS('
function(params) {
if(params.value) {
return(params.name + ":" + params.value)
} else {
return("");
}
}
')) %>%
e_timeline_serie(
title = timeSeriesTitle
) %>%
e_timeline_serie(
title = timeSeriesTitleSub,
index = 2
) %>%
e_timeline_serie(
title = timeSeriesTitleSource,
index = 3
)
if (input$showPopupOnMap) {
map %>%
e_timeline_on_serie(
markPoint = provinceDiffPopup,
serie_index = 1
)
} else {
map
}
})
# ====事例マップ==== # TODO ホームページの内容ではないから別のところに移動
output$caseMap <- renderLeaflet({
defaultRadius <- 8
genderColor <- c("女" = "red", "男" = "blue", "不明" = "grey")
statusColor <- c("入院" = "red", "退院" = "green", "不明" = "grey")
map <- leaflet() %>% addTiles()
for (i in 1:length(activity)) {
xOffset <- 0
yOffset <- 0
lat <- 0
lng <- 0
id <- as.numeric(names(activity[i]))
label <- paste(
"<b>患者番号:", id,
'<span class="label label-info" style="float:right;">',
activity[[i]]$status[2],
"</span><br/>居住地:", detail[id, ]$residence,
" 性別:", detail[id, ]$gender,
"</b>"
)
popup <- paste0(label, "<hr/>")
for (j in 1:length(activity[[i]]$process)) {
popup <- paste(
popup,
paste(
'<li><span class="label label-primary">',
as.Date(names(activity[[i]]$process[j]), format = "%Y%m%d"),
"</span>",
activity[[i]]$process[[j]], "</li>"
)
)
}
popup <- paste(popup, "<hr/><b>", lang[[langCode]][68], ":", detail[id, ]$link, "</b>")
for (j in 1:length(activity[[i]]$process)) {
currentLat <- position[pos == activity[[i]]$activity[[j]]$pos]$lat
currentLng <- position[pos == activity[[i]]$activity[[j]]$pos]$lng
if (lat != currentLat && lng != currentLng) {
if (lat != 0 && lng != 0) {
map <- addFlows(map,
color = genderColor[detail[id, ]$gender][[1]],
lat0 = lat + xOffset, lat1 = currentLat + xOffset,
lng0 = lng + yOffset, lng1 = currentLng + yOffset,
opacity = 0.8,
flow = 1,
maxThickness = 1,
time = as.Date(names(activity[[i]]$activity[j]), format = "%Y%m%d")
)
}
lat <- currentLat
lng <- currentLng
radius <- defaultRadius
if (!is.na(position[pos == activity[[i]]$activity[[j]]$pos]$radius)) {
radius <- position[pos == activity[[i]]$activity[[j]]$pos]$radius
}
map <- addCircleMarkers(map,
lat = currentLat + xOffset,
lng = currentLng + yOffset,
radius = radius,
color = genderColor[detail[id, ]$gender][[1]],
fillColor = statusColor[activity[[i]]$status][[1]],
weight = 1, opacity = 1,
popup = HTML(popup),
label = HTML(label)
)
}
}
}
map
}) | /03_Components/Main/ConfirmedMap.server.R | permissive | swsoyee/2019-ncov-japan | R | false | false | 15,173 | r | # ====感染状況を日本標準マップで表示する画像を作成====
# Returns:
# data.table: データセット
cumSumConfirmedByDateAndRegion <- reactive({
dt <- mapData[date >= input$mapDateRange[1] & date <= input$mapDateRange[2]]
dt
})
output$comfirmedMapWrapper <- renderUI({
if (input$switchMapVersion == T) {
echarts4rOutput("echartsSimpleMap", height = "550px")
} else {
echarts4rOutput("echartsMap", height = "550px")
}
})
output$echartsMapPlaySetting <- renderUI({
if(input$switchMapVersion == F) {
tags$span(
dropdownButton(
tags$h4(icon("eye"), i18n$t("表示設定")),
tags$hr(),
materialSwitch(
inputId = "showPopupOnMap",
label = i18n$t("日次増加数のポップアップ"),
status = "danger",
value = T
),
materialSwitch(
inputId = "replyMapLoop",
label = i18n$t("ループ再生"),
status = "danger",
value = T
),
dateRangeInput(
inputId = "mapDateRange",
label = i18n$t("表示日付"),
start = byDate$date[nrow(byDate) - 15],
end = byDate$date[nrow(byDate)],
min = byDate$date[1],
max = byDate$date[nrow(byDate)],
separator = " ~ ",
language = "ja"
),
sliderInput(
inputId = "mapFrameSpeed",
label = i18n$t("再生速度(秒/日)"),
min = 0.5,
max = 3,
step = 0.1,
value = 0.8
),
circle = F,
inline = T,
status = "danger",
icon = icon("gear"),
size = "sm",
width = "300px",
right = T,
tooltip = tooltipOptions(title = i18n$t("表示設定"), placement = "top")
),
style = "float:right;"
)
}
})
output$selectMapBottomButton <- renderUI({
if (input$switchMapVersion == T) {
radioGroupButtons(
inputId = "selectMapBottomButton",
label = NULL,
justified = T,
choiceNames = c(
paste(icon("creative-commons-sampling-plus"), i18n$t("現在")),
paste(icon("syringe"), i18n$t("接種")),
paste(icon("ambulance"), i18n$t("重症"))
),
choiceValues = c("active", "vaccine_ratio", "severe"),
status = "danger"
)
} else {
tags$p()
}
})
simpleMapDataset <- reactive({
dt <- merge(
x = mapData[, .SD[.N], by = ja],
y = mapData[, .SD[.N - 1], by = ja],
by = c("ja", "full_ja", "en", "lat", "lng", "regions"), no.dups = T, sort = F
)
dt[mhlwSummary[日付 == max(日付)], `:=`(
total = count.x,
severe = i.重症者,
active = i.陽性者 - i.退院者 - ifelse(is.na(i.死亡者), 0, i.死亡者),
diff = (count.x - count.y)
), on = c(ja = "都道府県名")]
# join vaccine by prefecture dataset
elderly <- vaccine_by_region[category == "elderly"][date == max(date)]
medical <- vaccine_by_region[category == "medical"][date == max(date)]
vaccine <- medical[elderly, .(
prefecture = prefecture,
first_medical = first,
second_medical = second,
first_elderly = i.first,
second_elderly = i.second,
date_medical = date,
date_elderly = i.date
), on = c(code = "code")]
dt <- dt[vaccine, on = c(full_ja = "prefecture")]
# join population
dt <- dt[prefecture_master[, .(都道府県, population = 人口)],
on = c(full_ja = "都道府県")
]
# vaccine ratio
dt[, vaccine_ratio := round((second_medical + second_elderly) / population * 100, 2)]
# set NA to 0
setnafill(dt, fill = 0, cols = "severe")
dt
})
output$echartsSimpleMap <- renderEcharts4r({
dt <- simpleMapDataset()
# 本日増加分
todayTotalIncreaseNumber <- sum(dt$diff, na.rm = T)
subText <- i18n$t("各都道府県からの新規報告なし")
if (todayTotalIncreaseNumber > 0) {
subText <- paste0(
sprintf(
i18n$t("発表がある%s都道府県合計新規%s人, 合計%s人\n\n"),
sum(dt$diff > 0),
prettyNum(todayTotalIncreaseNumber, big.mark = ","),
prettyNum(sum(dt$count.x, na.rm = T), big.mark = ",")
),
i18n$t("※こちらの合計値には空港検疫、チャーター便、\n クルーズ関連の事例などは含まれていない。")
)
}
dt[, translatedRegionName := convertRegionName(full_ja, languageSetting)]
if (input$selectMapBottomButton %in% c("vaccine_ratio")) {
subText <- sprintf(
i18n$t("データ更新日:\n\n医療従事者等(%s)\n一般接種(高齢者含む)(%s)"),
as.Date(as.character(unique(dt$date_medical)), format = "%Y%m%d"),
as.Date(as.character(unique(dt$date_elderly)), format = "%Y%m%d")
)
color_in_range <- c("#DADADA", "#3fcc8d", middleGreen, darkGreen, superDarkGreen, superDarkGreen2)
split_list <- list(
list(min = 75.0, label = "> 75.0 %"),
list(min = 70.0, max = 75.0, label = "70.0 % ~ 75.0 %"),
list(min = 65.0, max = 70.0, label = "65.0 % ~ 70.0 %"),
list(min = 60.0, max = 65.0, label = "60.0 % ~ 65.0 %"),
list(min = 0.0, max = 60.0, label = "0.0 % ~ 60.0 %"),
list(value = 0)
)
formatter <- htmlwidgets::JS(paste0(
"function(params) {
if(params.value) {
return(`<b>${params.name}</b><br>", i18n$t("2回目接種済率:"), "${params.value} %`)
} else {
return('');
}
}
"))
title_text <- i18n$t("2回目接種済マップ")
} else {
color_in_range <- c("#DADADA", "#FFCEAB", "#FF9D57", "#FF781E", "#EA5432", "#C02B11", "#8C0B00", "#000000")
split_list <- list(
list(min = 3000, label = "> 3,000"),
list(min = 1000, max = 3000, label = "1,000 - 3,000"),
list(min = 500, max = 1000, label = "500 - 1,000"),
list(min = 100, max = 500),
list(min = 50, max = 100),
list(min = 10, max = 50),
list(min = 1, max = 10),
list(value = 0)
)
formatter <- htmlwidgets::JS(paste0(
"function(params) {
if(params.value) {
return(`${params.name}<br>",
switch(input$selectMapBottomButton,
active = i18n$t("現在感染者数:"),
total = i18n$t("累積感染者数:"),
severe = i18n$t("現在重症者数:")
), "${params.value}`)
} else {
return('');
}
}
"
))
title_text <- i18n$t("リアルタイム感染者数マップ")
}
map <- dt %>%
e_charts(translatedRegionName) %>%
e_map_register("japan", japanMap) %>%
e_map_(input$selectMapBottomButton,
map = "japan",
name = "感染確認数",
nameMap = useMapNameMap(languageSetting),
layoutSize = "50%",
center = c(137.1374062, 36.8951298),
zoom = 1.5,
itemStyle = list(
borderWidth = 0.2,
borderColor = "white"
),
emphasis = list(
label = list(
fontSize = 8
)
),
roam = "move"
) %>%
e_visual_map_(
input$selectMapBottomButton,
top = "25%",
left = "0%",
inRange = list(color = color_in_range),
type = "piecewise",
splitList = split_list
) %>%
e_color(background = "#FFFFFF") %>%
e_mark_point(serie = dt[diff > 0]$en) %>%
e_tooltip(formatter = formatter) %>%
e_title(
text = title_text,
subtext = subText
)
# 本日増加分をプロット
if (input$selectMapBottomButton %in% c("active")) {
newToday <- dt[diff > 0]
for (i in 1:nrow(newToday)) {
map <- map %>%
e_mark_point(
data = list(
name = newToday[i]$ja,
coord = c(newToday[i]$lng, newToday[i]$lat),
symbolSize = c(7, newToday[i]$diff / 2)
),
symbol = "triangle",
symbolOffset = c(0, "-50%"),
itemStyle = list(
color = "#520e05",
shadowColor = "white",
shadowBlur = 0,
opacity = 0.75
)
)
}
}
map
})
output$echartsMap <- renderEcharts4r({
mapDt <- cumSumConfirmedByDateAndRegion()
# mapDt <- mapData # TEST
newByDate <- rowSums(byDate[date >= input$mapDateRange[1] & date <= input$mapDateRange[2], 2:48])
provinceCountByDate <- rowSums(
byDate[date >= input$mapDateRange[1] & date <= input$mapDateRange[2], 2:48] != 0
)
dateSeq <- seq.Date(input$mapDateRange[1], input$mapDateRange[2], by = "day")
# 日別合計
sumByDay <- cumsum(rowSums(byDate[, 2:ncol(byDate)]))
sumByDay <- data.table(byDate[, 1], sumByDay)
timeSeriesTitle <- lapply(seq_along(dateSeq), function(i) {
subText <- i18n$t("各都道府県からの新規報告なし")
if (provinceCountByDate[i] > 0) {
subText <- sprintf(i18n$t("発表がある%s都道府県合計新規%s人, 合計%s人\n\n"),
provinceCountByDate[i], newByDate[i], sumByDay[date == dateSeq[i]]$sumByDay
)
}
return(
list(
text = dateSeq[i],
subtext = subText
)
)
})
timeSeriesTitleSub <- lapply(seq_along(dateSeq), function(i) {
columnName <- colnames(byDate)[49:ncol(byDate)]
item <- ""
for (name in columnName) {
diff <- byDate[date == dateSeq[i], name, with = F][[1]]
if (diff > 0) {
# 新規
item <- paste(item, paste0(name, i18n$t("新規"), diff), " ")
}
}
return(
list(
subtext = item,
right = "5%",
bottom = "10%"
)
)
})
timeSeriesTitleSource <- lapply(seq_along(dateSeq), function(i) {
return(
list(
subtext = i18n$t("マップのソースについて"),
sublink = "https://github.com/dataofjapan/land",
subtextStyle = list(
color = "#3c8dbc",
fontSize = 10
),
left = "0%",
top = "8%"
)
)
})
mapDt[, translatedRegionName := convertRegionName(full_ja, languageSetting)]
# provinceCode <- fread(paste0(DATA_PATH, 'prefectures.csv')) # TEST
if (input$showPopupOnMap) {
provinceColnames <- colnames(byDate)[2:ncol(byDate)]
provinceDiffPopup <- lapply(dateSeq, function(dateItem) {
row <- as.matrix(byDate[date == dateItem])[1, 2:48]
value <- row[row != "0"]
name <- names(value)
dateData <- list()
for (i in seq_along(value)) {
province <- provinceCode[`name-ja` == name[i]]
dateData[[i]] <- list(
coord = list(province$lng, province$lat),
value = paste0(name[i], "\n", value[i])
)
}
list(
data = dateData,
itemStyle = list(color = darkYellow),
label = list(fontSize = 8),
symbol = "pin",
symbolSize = 40
)
})
}
map <- mapDt %>%
group_by(date) %>%
e_charts(translatedRegionName, timeline = T) %>%
e_map_register("japan", japanMap) %>%
e_map(count,
map = "japan",
name = "感染確認数",
nameMap = useMapNameMap(languageSetting),
layoutSize = "50%",
center = c(137.1374062, 36.8951298),
zoom = 1.5,
itemStyle = list(
borderWidth = 0.2,
borderColor = "white"
),
emphasis = list(
label = list(
fontSize = 8
)
),
roam = "move"
) %>%
e_visual_map(
count,
top = "20%",
left = "0%",
inRange = list(color = c("#DADADA", "#FFCEAB", "#FF9D57", "#FF781E", "#EA5432", "#C02B11", "#8C0B00")),
type = "piecewise",
splitList = list(
list(min = 1000),
list(min = 500, max = 1000),
list(min = 100, max = 500),
list(min = 50, max = 100),
list(min = 10, max = 50),
list(min = 1, max = 10),
list(value = 0)
)
) %>%
e_color(background = "#FFFFFF") %>%
e_timeline_opts(
left = "0%", right = "0%", symbol = "diamond",
playInterval = input$mapFrameSpeed * 1000,
loop = input$replyMapLoop,
currentIndex = length(dateSeq) - 1
) %>%
e_tooltip(formatter = htmlwidgets::JS('
function(params) {
if(params.value) {
return(params.name + ":" + params.value)
} else {
return("");
}
}
')) %>%
e_timeline_serie(
title = timeSeriesTitle
) %>%
e_timeline_serie(
title = timeSeriesTitleSub,
index = 2
) %>%
e_timeline_serie(
title = timeSeriesTitleSource,
index = 3
)
if (input$showPopupOnMap) {
map %>%
e_timeline_on_serie(
markPoint = provinceDiffPopup,
serie_index = 1
)
} else {
map
}
})
# ====事例マップ==== # TODO ホームページの内容ではないから別のところに移動
output$caseMap <- renderLeaflet({
defaultRadius <- 8
genderColor <- c("女" = "red", "男" = "blue", "不明" = "grey")
statusColor <- c("入院" = "red", "退院" = "green", "不明" = "grey")
map <- leaflet() %>% addTiles()
for (i in 1:length(activity)) {
xOffset <- 0
yOffset <- 0
lat <- 0
lng <- 0
id <- as.numeric(names(activity[i]))
label <- paste(
"<b>患者番号:", id,
'<span class="label label-info" style="float:right;">',
activity[[i]]$status[2],
"</span><br/>居住地:", detail[id, ]$residence,
" 性別:", detail[id, ]$gender,
"</b>"
)
popup <- paste0(label, "<hr/>")
for (j in 1:length(activity[[i]]$process)) {
popup <- paste(
popup,
paste(
'<li><span class="label label-primary">',
as.Date(names(activity[[i]]$process[j]), format = "%Y%m%d"),
"</span>",
activity[[i]]$process[[j]], "</li>"
)
)
}
popup <- paste(popup, "<hr/><b>", lang[[langCode]][68], ":", detail[id, ]$link, "</b>")
for (j in 1:length(activity[[i]]$process)) {
currentLat <- position[pos == activity[[i]]$activity[[j]]$pos]$lat
currentLng <- position[pos == activity[[i]]$activity[[j]]$pos]$lng
if (lat != currentLat && lng != currentLng) {
if (lat != 0 && lng != 0) {
map <- addFlows(map,
color = genderColor[detail[id, ]$gender][[1]],
lat0 = lat + xOffset, lat1 = currentLat + xOffset,
lng0 = lng + yOffset, lng1 = currentLng + yOffset,
opacity = 0.8,
flow = 1,
maxThickness = 1,
time = as.Date(names(activity[[i]]$activity[j]), format = "%Y%m%d")
)
}
lat <- currentLat
lng <- currentLng
radius <- defaultRadius
if (!is.na(position[pos == activity[[i]]$activity[[j]]$pos]$radius)) {
radius <- position[pos == activity[[i]]$activity[[j]]$pos]$radius
}
map <- addCircleMarkers(map,
lat = currentLat + xOffset,
lng = currentLng + yOffset,
radius = radius,
color = genderColor[detail[id, ]$gender][[1]],
fillColor = statusColor[activity[[i]]$status][[1]],
weight = 1, opacity = 1,
popup = HTML(popup),
label = HTML(label)
)
}
}
}
map
}) |
path <- "/home/manish/Desktop/Data2017/June/Mercedes/"
setwd(path)
# With removing outliers --------------------------------------------------
library(data.table)
library(e1071)
train <- fread("train.csv")
test <- fread("test.csv")
skewness(train$y)
train[,summary(y)]
train <- train[y < 260]
skewness(train$y)
train_id <- train$ID
train_labels <- train$y
test_id <- test$ID
train[,c('ID','y') := NULL]
test[,ID := NULL]
df_all <- rbind(train,test)
categorical_vars <- paste0('X',c(0:6,8))
#categorical_df <- df_all[,categorical_vars,with=F]
binary_df <- df_all[,-categorical_vars, with=F]
# dropping variables with near zero standard deviation or variance
sapply(binary_df[,cols_to_drop,with=F], function(x) sd(x))
# dropping columns with 0 standard deviation
cols_to_drop <- c('X11', 'X93', 'X107', 'X233', 'X235', 'X268', 'X289', 'X290', 'X293', 'X297', 'X330','X347')
binary_df <- binary_df[, -cols_to_drop, with=F]
head(binary_df)
train_binary <- binary_df[1:nrow(train)]
test_binary <- binary_df[(nrow(train)+1):nrow(binary_df)]
# PCA
tpca <- prcomp(train_binary, scale. = T)
var <- (tpca$sdev)^2
prop_var <- var/sum(var)
sum(prop_var[1:30])
pca_train <- tpca$x[,1:30]
pca_train <- as.data.table(pca_train)
pctest <- predict(tpca, test_binary)
pctest <- as.data.table(pctest[,1:30])
fwrite(pca_train, "features/pca_binary_train.csv")
fwrite(pctest, "features/pca_binary_test.csv")
| /scripts/pca_binary.R | no_license | saraswatmks/Mercedes_Kaggle | R | false | false | 1,417 | r | path <- "/home/manish/Desktop/Data2017/June/Mercedes/"
setwd(path)
# With removing outliers --------------------------------------------------
library(data.table)
library(e1071)
train <- fread("train.csv")
test <- fread("test.csv")
skewness(train$y)
train[,summary(y)]
train <- train[y < 260]
skewness(train$y)
train_id <- train$ID
train_labels <- train$y
test_id <- test$ID
train[,c('ID','y') := NULL]
test[,ID := NULL]
df_all <- rbind(train,test)
categorical_vars <- paste0('X',c(0:6,8))
#categorical_df <- df_all[,categorical_vars,with=F]
binary_df <- df_all[,-categorical_vars, with=F]
# dropping variables with near zero standard deviation or variance
sapply(binary_df[,cols_to_drop,with=F], function(x) sd(x))
# dropping columns with 0 standard deviation
cols_to_drop <- c('X11', 'X93', 'X107', 'X233', 'X235', 'X268', 'X289', 'X290', 'X293', 'X297', 'X330','X347')
binary_df <- binary_df[, -cols_to_drop, with=F]
head(binary_df)
train_binary <- binary_df[1:nrow(train)]
test_binary <- binary_df[(nrow(train)+1):nrow(binary_df)]
# PCA
tpca <- prcomp(train_binary, scale. = T)
var <- (tpca$sdev)^2
prop_var <- var/sum(var)
sum(prop_var[1:30])
pca_train <- tpca$x[,1:30]
pca_train <- as.data.table(pca_train)
pctest <- predict(tpca, test_binary)
pctest <- as.data.table(pctest[,1:30])
fwrite(pca_train, "features/pca_binary_train.csv")
fwrite(pctest, "features/pca_binary_test.csv")
|
# EXERCISE 1 #
# Author: Andreas Kracht Frandsen
# Date: 2020-01-16
# For documentation please read the Computational Part of the main document.
# Load required packages.
library(quantmod)
library(Rsolnp)
# Obtains the relevant data from the VIX Index. We are only gonna use VIX.Adjusted.
getSymbols(Symbols = "^VIX",
from = '2010-01-01',
to = '2019-01-01',
src = 'yahoo')
# First 5 observation of VIX.
head(VIX)
# Plot of VIX.Adjusted
plot(y = VIX$VIX.Adjusted,
x = index(VIX),
type = 'l',
xaxs = "i",
yaxs = "i",
xlab = 'Date',
ylab = 'VIX',
main = 'Volatility Index')
## GAS-GAMMA ##
# The following function is the filter for the GAS-GAMMA model.
# Input: Par (Vector), the parameters in the model, which are omega, alpha, beta and a.
# Y (Vector), the returns of the relevant series.
# Output: Output (List), the means (Vector) and log likelihood (Double).
GASGAMMA_Filter <- function(Par, Y) {
iT = length(Y)
Mu = numeric(iT)
Omega = Par[1]
Alpha = Par[2]
Beta = Par[3]
a = Par[4]
# The first mu is initialized as the unconditional expectation of mu.
Mu[1] = Omega/(1 - Beta)
# In case mu gets negative or hits zero we adjust it.
if (Mu[1] < 1e-10) {Mu[1] = 1e-10}
# For the rest (T-1) mu's we use the updating equation.
for (t in 2:iT) {
Mu[t] = Omega + Alpha * ((sqrt(a)*(Y[t-1] - Mu[t-1]))/Mu[t-1]) + Beta * Mu[t-1]
# In case mu gets negative or hits zero we adjust it.
if (Mu[t] < 1e-10) {Mu[t] = 1e-10}
}
# The log likelihood is computed as shown in the theoretical part.
LLK = iT * (a * log(a) - lgamma(a)) + (a - 1) * sum(log(Y)) - a * sum(Y / Mu + log(Mu))
Output = list()
Output[["Mu"]] = Mu
Output[["LLK"]] = LLK
return(Output)
}
# The following function evaluates the negative log likelihood for further use in the optimization proces.
# Input: Par (Vector), the parameters in the model, which are omega, alpha, beta and a.
# Y (Vector), the returns of the relevant series.
# Output: NLL (Double), the negative log likelihood.
NegLogLikelihood <- function(Par, Y) {
Filter = GASGAMMA_Filter(Par, Y)
NLL = -Filter[["LLK"]]
# In case the negative log likelihood isn't finite we adjust it to a large value.
if (!is.finite(NLL)) {
NLL = 1e5
}
return(NLL)
}
# The following function estimates the GAS-GAMMA model by first finding maximum likelihood estimates of our parameters.
# Input: Y (Vector), the returns of the relevant series.
# Output: Output (List), the optimized parameters (Vector), the BIC (Double) and the filtered values of GASGAMMA_Filter.
Estimate_GASGAMMA <- function(Y) {
# Use the gosolnp from the Rsolnp package to optimize the negative log likelihood. With random initialized starting values.
optimiser = gosolnp(fun = NegLogLikelihood,
Y = Y,
n.sim = 20,
LB = c(-0.5, 0.001, 0.01, 0.1),
UB = c(0.5, 1.5, 0.999, 300)
)
Par = optimiser$pars
LLK = -tail(optimiser$value, n=1)
# Here we run the filter using the optimal parameter values, to obtain the final estimates of mu.
FilteredValues = GASGAMMA_Filter(Par, Y)
iT = length(Y)
# Computation of Bayesian Information Criterion, using the fact that we estimate four parameters.
BIC = (log(iT) * 4 - 2 * LLK)
Output = list()
Output[["Par"]] = Par
Output[["BIC"]] = BIC
Output[["FilteredValues"]] = FilteredValues
return(Output)
}
## GAS-GAMMA-C ##
GASGAMMA_Filter_c <- function(Par, Y) {
iT = length(Y)
Omega = Par[1]
Beta = Par[2]
a = Par[3]
# The first mu is initialized as the unconditional expectation of mu.
Mu = Omega / (1 - Beta)
# In case mu gets negative or hits zero we adjust it.
if (Mu < 1e-10) {Mu = 1e-10}
# The log likelihood is computed as shown in the theoretical part.
LLK = iT * (a * log(a) - lgamma(a)) + (a - 1) * sum(log(Y)) - a * sum(Y / Mu + log(Mu))
Output = list()
Output[["Mu"]] = Mu
Output[["LLK"]] = LLK
return(Output)
}
# The following function evaluates the negative log likelihood for further use in the optimization proces.
# Input: Par (Vector), the parameters in the model, which are omega, alpha, beta and a.
# Y (Vector), the returns of the relevant series.
# Output: NLL (Double), the negative log likelihood.
NegLogLikelihood_c <- function(Par, Y) {
Filter = GASGAMMA_Filter_c(Par, Y)
NLL = -Filter[["LLK"]]
# In case the negative log likelihood isn't finite we adjust it to a large value.
if (!is.finite(NLL)) {
NLL = 1e5
}
return(NLL)
}
# The following function estimates the GAS-GAMMA-C model by first finding maximum likelihood estimates of our parameters.
# Input: Y (Vector), the returns of the relevant series.
# Output: Output (List), the optimized parameters (Vector), the BIC (Double) and the filtered values of GASGAMMA_Filter_c.
Estimate_GASGAMMA_c <- function(Y) {
# Use the gosolnp from the Rsolnp package to optimize the negative log likelihood. With random initialized starting values.
optimiser = gosolnp(fun = NegLogLikelihood_c,
Y = Y,
n.sim = 20,
LB = c(-0.5, 0.01, 0.1),
UB = c(0.5, 0.999, 300)
)
Par = optimiser$pars
LLK = -tail(optimiser$value, n=1)
# Here we run the filter using the optimal parameter values, to obtain the final estimates of mu.
FilteredValues = GASGAMMA_Filter_c(Par, Y)
iT = length(Y)
# Computation of Bayesian Information Criterion, using the fact that we estimate three parameters.
BIC = (log(iT) * 3 - 2 * LLK)
Output = list()
Output[["Par"]] = Par
Output[["BIC"]] = BIC
Output[["FilteredValues"]] = FilteredValues
return(Output)
}
## MEM-GAMMA ##
# The following function is the filter for the MEM-GAMMA model.
# Input: Par (Vector), the parameters in the model, which are kappa, eta, phi and a.
# Y (Vector), the returns of the relevant series.
# Output: Output (List), the means (Vector) and log likelihood (Double).
MEMGAMMA_Filter <- function(Par, Y) {
iT = length(Y)
Mu = numeric(iT)
Kappa = Par[1]
Eta = Par[2]
Phi = Par[3]
a = Par[4]
# The first mu is initialized as the unconditional expectation of mu.
Mu[1] = Kappa/(1 - Eta - Phi)
# In case mu gets negative or hits zero we adjust it.
if (Mu[1] < 1e-10) {Mu[1] = 1e-10}
# For the rest (T-1) mu's we use the updating equation.
for (t in 2:iT) {
Mu[t] = Kappa + Eta * Y[t-1] + Phi * Mu[t-1]
# In case mu gets negative or hits zero we adjust it.
if (Mu[t] < 1e-10) {Mu[t] = 1e-10}
}
LLK = iT * (a * log(a) - lgamma(a)) + (a - 1) * sum(log(Y)) - a * sum(Y / Mu + log(Mu))
Output = list()
Output[["Mu"]] = Mu
Output[["LLK"]] = LLK
return(Output)
}
# The following function evaluates the negative log likelihood for further use in the optimization proces.
# Input: Par (Vector), the parameters in the model, which are omega, alpha, beta and a.
# Y (Vector), the returns of the relevant series.
# Output: NLL (Double), the negative log likelihood.
NegLogLikelihood_MEM <- function(Par, Y) {
Filter = MEMGAMMA_Filter(Par, Y)
NLL = -Filter[["LLK"]]
# In case the negative log likelihood isn't finite we adjust it to a large value.
if (!is.finite(NLL)) {
NLL = 1e5
}
return(NLL)
}
# The following function estimates the MEM-GAMMA model by first finding maximum likelihood estimates of our parameters.
# Input: Y (Vector), the returns of the relevant series.
# Output: Output (List), the optimized parameters (Vector), the BIC (Double) and the filtered values of GASGAMMA_Filter.
Estimate_MEMGAMMA <- function(Y) {
# Use the gosolnp from the Rsolnp package to optimize the negative log likelihood. With random initialized starting values.
optimiser = gosolnp(fun = NegLogLikelihood_MEM,
Y = Y,
n.sim = 20,
LB = c(0.1, 0.01, 0.01, 0.1),
UB = c(10, 0.99, 0.99, 300)
)
Par = optimiser$pars
LLK = -tail(optimiser$value, n=1)
# Here we run the filter using the optimal parameter values, to obtain the final estimates of mu.
FilteredValues = MEMGAMMA_Filter(Par, Y)
iT = length(Y)
# Computation of Bayesian Information Criterion, using the fact that we estimate four parameters.
BIC = (log(iT) * 4 - 2 * LLK)
Output = list()
Output[["Par"]] = Par
Output[["BIC"]] = BIC
Output[["FilteredValues"]] = FilteredValues
return(Output)
}
# Fit all the models using VIX data.
Fit_GAS_GAMMA = Estimate_GASGAMMA(VIX$VIX.Adjusted)
Fit_GAS_GAMMA_c = Estimate_GASGAMMA_c(VIX$VIX.Adjusted)
Fit_MEM_GAMMA = Estimate_MEMGAMMA(VIX$VIX.Adjusted)
# PLOT OF MEM.
# Control graphics device, to obtain 3 X 1 grid.
par(mfrow = c(3,1))
plot(y = VIX$VIX.Adjusted,
x = index(VIX),
type = 'l',
xaxs = "i",
yaxs = "i",
xlab = 'Date',
ylab = 'VIX',
main = 'i) VIX Index')
plot(y = Fit_MEM_GAMMA$FilteredValues$Mu,
x = index(VIX),
type = 'l',
xaxs = "i",
yaxs = "i",
xlab = 'Date',
ylab = expression(mu),
main = 'ii) Mean')
plot(y = Fit_MEM_GAMMA$FilteredValues$Mu^2/Fit_MEM_GAMMA$Par[4],
x = index(VIX),
type = 'l',
xaxs = "i",
yaxs = "i",
xlab = 'Date',
ylab = expression(sigma^2),
main = 'iii) Variance')
# EXERCISE 2 #
# Author: Andreas Kracht Frandsen
# Date: 2020-01-16
# For documentation please read the Computational Part of the main document.
# Load required packages.
library(Rsolnp)
library(mvtnorm)
library(quantmod)
# Obtains the relevant updated dataset given in the assignment.
GSPC_DJI <- read.csv2('data/data.csv', sep = ';', dec = ',')
# Just to gather dates related to the dataset. Used for plotting.
getSymbols(Symbols = "^GSPC",
from = '2007-01-03',
to = '2019-01-01',
src = 'yahoo')
# First 5 observations of GSP_DJI.
head(GSPC_DJI)
# Plot of GSPC
plot(y = GSPC_DJI$GSPC,
x = index(head(GSPC$GSPC.Adjusted,-1)),
type = 'l',
xaxs = "i",
yaxs = "i",
xlab = 'Date',
ylab = 'Return',
main = 'SP500 Return Data')
# Plot of DJI
plot(y = GSPC_DJI$DJI,
x = index(head(GSPC$GSPC.Adjusted,-1)),
type = 'l',
xaxs = "i",
yaxs = "i",
xlab = 'Date',
ylab = 'Return',
main = 'DOW Return Data')
## GARCH(1,1) ##
# The following function is the filter for the GARCH(1,1) model.
# Input: Omega, Alpha and Beta, the parameters in the model (Double).
# Y (Vector), the returns of the relevant series.
# Output: Output (List), log likelihood (Double) and the variances (Vector).
GARCHFilter <- function(Y, Omega, Alpha, Beta) {
iT = length(Y)
Sigma2 = numeric(iT)
# The first variance is set to the empirical variance of the first 10 % of the observations.
Sigma2[1] = var(Y[1:round(iT * 0.1)])
# Compute the likelihood of the first observation.
LLK = dnorm(Y[1], 0, sqrt(Sigma2[1]), log = TRUE)
# For the rest (T-1) observations we use the updating equation.
for (t in 2:iT) {
Sigma2[t] = Omega + Alpha * Y[t-1]^2 + Beta * Sigma2[t - 1]
LLK = LLK + dnorm(Y[t], 0, sqrt(Sigma2[t]), log = TRUE)
}
Output = list()
Output[["LLK"]] = LLK
Output[["Sigma2"]] = Sigma2
return(Output)
}
# The following function evaluates the negative log likelihood for further use in the optimization proces.
# Input: Par (Vector), the parameters in the model, which are omega, alpha and beta.
# Y (Vector), the returns of the relevant series.
# Output: -LLK (Double), the negative log likelihood.
ObjectiveFunction <- function(Par, Y) {
Omega = Par[1]
Alpha = Par[2]
Beta = Par[3]
LLK = GARCHFilter(Y, Omega, Alpha, Beta)$LLK
return(-LLK)
}
# The following function serves as a basis to evaluate the inner part of the inequality constraints that need to be satisfied to impose weak stationarity.
# Input: Par (Vector), the parameters in the inner part of the inequality constraints, which are alpha and beta.
# Output: Alpha+Beta (Double), the inner part of the inequality constraints.
ineqfun_GARCH_WS <- function(Par, ...) {
Alpha = Par[2]
Beta = Par[3]
return(Alpha + Beta)
}
# The following function estimates the GARCH(1,1) model by first finding maximum likelihood estimates of our parameters.
# Input: Y (Vector), the returns of the relevant series.
# Output: Output (List), the optimized parameters (Vector), the BIC (Double), the variances (Vector), the log likelihood (Double)
# and the standardized residuals.
EstimateGARCH <- function(Y, ineqfun_GARCH = ineqfun_GARCH_WS, ineqLB = 0.00, ineqUB = 0.9999) {
# We set starting value for Alpha and Beta and set Omega to target the unconditional variance of the GARCH(1,1) model.
Alpha = 0.125
Beta = 0.85
Omega = var(Y) * (1.0 - Alpha - Beta)
Par = c(Omega, Alpha, Beta)
# Use the solnp from the Rsolnp package to optimize the negative log likelihood.
# By default we specity ineqLB = 0.00 and ineqUB = 0.9999 in order to match 0 < alpha + beta < 0.9999.
optimizer = solnp(Par,
fun = ObjectiveFunction,
Y = Y,
ineqfun = ineqfun_GARCH,
ineqLB = ineqLB,
ineqUB = ineqUB,
LB = c(0.00001, 0.0001, 0.0001),
UB = c(10.0, 0.999, 0.999)
)
Par = optimizer$pars
LLK = -tail(optimizer$values, 1)
# Here we run the filter using the optimal parameter values, to obtain the final estimates of the variance.
Sigma2 = GARCHFilter(Y, Par[1], Par[2], Par[3])$Sigma2
# Computation of Bayesian Information Criterion.
iT = length(Y)
BIC = (-2 * LLK + log(iT) * length(Par))
# Compute standardized residuals.
st_res <- Y/sqrt(Sigma2)
Output = list()
Output[["Par"]] = Par
Output[["LLK"]] = LLK
Output[["BIC"]] = BIC
Output[["Sigma2"]] = Sigma2
Output[["st_res"]] = st_res
return(Output)
}
# Fit GARCH(1,1) for GSPC.
Fit_GSPC = EstimateGARCH(GSPC_DJI$GSPC)
# Double check alpha+beta.
sum(Fit_GSPC$Par[-1])
# Fit GARCH(1,1) for DJI.
Fit_DJI = EstimateGARCH(GSPC_DJI$DJI)
# Double check alpha+beta.
sum(Fit_DJI$Par[-1])
# Make one list including both fits.
fit <- list(Fit_GSPC, Fit_DJI)
## DCC AND CCC ##
# The following function is the filter for the DCC (CCC) model.
# Input: A and B (Double), the parameters in the model, which are a and b.
# Eta (Matrix), the standardized residuals from GARCH(1,1).
# Q (Matrix), the unconditional correlation.
# Output: Output (List), the log likelihood (Double) and the correlation matrix R.
DCCFilter <- function(Eta, A, B, Q) {
iN = ncol(Eta)
iT = nrow(Eta)
Cor = array(0, dim = c(iN, iN, iT))
aQ = array(0, dim = c(iN, iN, iT))
## Initialize to the unconditional correlation.
Cor[ , , 1] = Q
aQ[ , , 1] = Q
# Compute the contribution to the likelihood of the first observation.
LLK = Eta[1, , drop = FALSE] %*% solve(Cor[,, 1]) %*% t(Eta[1, , drop = FALSE]) -
Eta[1, , drop = FALSE]%*% t(Eta[1, , drop = FALSE]) + log(det(Cor[,, 1]))
# For the rest (T-1) observations.
for (t in 2:iT) {
# Update the Q matrix.
aQ[,, t] = Q * (1 - A - B) + A * t(Eta[t - 1, , drop = FALSE]) %*% Eta[t - 1, , drop = FALSE] +
B * aQ[,,t - 1]
## Compute the correlation matrix R.
Cor[,, t] = diag(sqrt(1/diag(aQ[,, t]))) %*% aQ[,, t] %*% diag(sqrt(1/diag(aQ[,, t])))
LLK = LLK + Eta[t, , drop = FALSE] %*% solve(Cor[,, t]) %*% t(Eta[t, , drop = FALSE]) -
Eta[t, , drop = FALSE] %*% t(Eta[t, , drop = FALSE]) + log(det(Cor[,, t]))
}
Output = list()
Output[["LLK"]] = -0.5 * LLK
Output[["Cor"]] = Cor
return(Output)
}
# The following function estimates the DCC (CCC) model by first finding maximum likelihood estimates of our parameters.
# Input: Y (Matrix), the returns of the relevant series.
# fit (List), the fit of the Garch(1,1) models combined.
# CCC (Boolean), shall the CCC model be computed instead of the DCC.
# Output: Output (List), the optimized parameters (Vector), the BIC (Double), the total log likelihood (Double),
# the correlation matrix (Matrix), the standard deviations (Matrix), the parameters of the Garch(1,1) (Vector)
# and the standardized residuals (Vector).
Estimate_DCC <- function(Y, fit, CCC = FALSE) {
Eta <- cbind(unlist(fit[[1]]["st_res"]), unlist(fit[[2]]["st_res"]))
# Compute unconditional correlation.
Q = cor(Eta)
if(CCC == FALSE){
# Initial parameters of a and b.
Par = c(0.04, 0.9)
# Use the solnp from the Rsolnp package to optimize the negative log likelihood.
optimizer = solnp(Par, fun = function(Par, Eta, Q) {
Filter = DCCFilter(Eta, Par[1], Par[2], Q)
NLLK = -as.numeric(Filter$LLK)
return(NLLK)
}, ineqfun = function(Par, ...) {
sum(Par)
}, ineqLB = 1e-4, ineqUB = 0.999,
LB = c(1e-4, 1e-4), UB = c(0.999, 0.999),
Eta = Eta, Q = Q)
Par = optimizer$pars
# Likelihood contribution of correlation.
LLK_C = -tail(optimizer$values, 1)
# Here we run the filter using the optimal parameter values, to obtain the final estimates of the correlation matrix.
Filter = DCCFilter(Eta, Par[1], Par[2], Q)
}
else{
Filter = DCCFilter(Eta, 0, 0, Q)
LLK_C = Filter[["LLK"]]
}
Sigma = sqrt(cbind(unlist(fit[[1]]["Sigma2"]), unlist(fit[[2]]["Sigma2"])))
Coef = cbind(unlist(fit[[1]]["Par"]), unlist(fit[[2]]["Par"]))
# Likelihood contribution of volatility from GARCH(1,1)'s.
LLK_V = sum(unlist(fit[[1]]["LLK"]), unlist(fit[[2]]["LLK"]))
# Total likelihood.
LLK = LLK_V + LLK_C
Cor = Filter[["Cor"]]
iT = nrow(Y)
# Computation of Bayesian Information Criterion.
BIC = log(iT) * 8 - 2 * LLK
Output = list()
Output[["LLK"]] = LLK
Output[["Coef"]] = Coef
if(CCC == FALSE){
Output[["Par"]] = Par
}
Output[["Sigma"]] = Sigma
Output[["Cor"]] = Cor
Output[["Eta"]] = Eta
Output[["BIC"]] = BIC
return(Output)
}
# Fit DCC and CCC for our returns.
Fit_DCC = Estimate_DCC(GSPC_DJI, fit)
Fit_CCC = Estimate_DCC(GSPC_DJI, fit, CCC = TRUE)
## MINIMUM VARIANCE PORTFOLIO ##
# The following function computes the Minimum Variance Portfolio.
# Input: fit (List), the fit of the DCC or CCC model.
# Output: weight (Array), the optimal portfolio weights for the Minimum Variance Portfolio.
MVP <- function(fit){
iT = length(fit$Sigma[ , 1])
iN = ncol(Fit_DCC$Sigma)
D = array(0, dim = c(iN, iN, iT))
SIGMA_INV = array(0, dim = c(iN, iN, iT))
TOP = array(0, dim = c(1, iN, iT))
ell = array(1, dim = c(iN, 1))
BOTTOM = array(0, dim = c(1,1,iT))
weight = array(0, dim = c(1, iN, iT))
for (t in 1:iT) {
D[ , , t] = diag(fit$Sigma[t, ])
SIGMA_INV[ , , t] = solve(D[ , , t]) %*% solve(fit$Cor[ , , t]) %*% solve(D[ , , t])
TOP[ , , t] = SIGMA_INV[ , , t] %*% ell
BOTTOM[ , , t] = t(ell) %*% SIGMA_INV[ , , t] %*% ell
weight[ , , t] = TOP[ , , t] / BOTTOM[ , , t]
}
return(weight)
}
# Compute the weights for both DCC and CCC models.
weight_DCC <- MVP(Fit_DCC)
weight_CCC <- MVP(Fit_CCC)
# Plot MVP for DCC
plot(y = weight_DCC[1,1,],
x = index(head(GSPC$GSPC.Adjusted,-1)),
type ='l',
ylim =c(-5,5),
xaxs = "i",
yaxs = "i",
xlab = 'Date',
ylab = 'Weight',
main = 'Portfolio Weights for S&P500 and DOW using DCC'
)
lines(y = weight_DCC[1,2,],
x = index(head(GSPC$GSPC.Adjusted,-1)),
type ='l',
col = 'red',
lty = 'dashed')
# Plot MPV for CCC
plot(y = weight_CCC[1,1,],
x = index(head(GSPC$GSPC.Adjusted,-1)),
type ='l',
ylim =c(-5,5),
xaxs = "i",
yaxs = "i",
xlab = 'Date',
ylab = 'Weight',
main = 'Portfolio Weights for S&P500 and DOW using CCC'
)
lines(y = weight_CCC[1,2,],
x = index(head(GSPC$GSPC.Adjusted,-1)),
type = 'l',
col = 'red',
lty = 'dashed')
## CoVaR ##
# The following function computes the difference between the Multivariate Gaussian CDF and the squared significance level.
# Input: CoVar (Double), the CoVaR.
# VaR (Double), the VaR.
# sigma (Matrix), the standard deviation matrix.
# alpha (Double), the significance level.
# Output: target (Double), the value to optimize over.
bi_pnorm_t <- function(CoVaR, VaR, sigma, alpha){
func <- pmvnorm(upper = c(CoVaR, VaR), sigma = sigma)
target <- func - alpha^2
}
# The following function computes the CoVaR.
# Input: fit (List), the fit of either DCC or CCC.
# alpha (Double), the significance level.
# Output: CoVaR (Vector), the CoVaR over time.
covar <- function(fit, alpha){
iT <- length(fit$Sigma[,1])
D <- array(0, dim = c(2,2,iT))
CoVaR <- c()
for (t in 1:iT) {
D[,,t] = diag(fit$Sigma[t,])
SIGMA = D[,,t] %*% fit$Cor[,,t] %*% D[,,t]
sdY_2 <- sqrt(SIGMA[1, 2])
VaR <- qnorm(alpha, 0, sdY_2)
CoVaR[t] <- uniroot(bi_pnorm_t, interval = c(-10^4, 10), VaR = VaR, sigma = SIGMA, alpha=alpha)[[1]]
}
return(CoVaR)
}
# Compute the CoVaR at 0.01 and 0.05 significance level for both models.
DCC_CoVaR_1 <- covar(Fit_DCC, 0.01)
DCC_CoVaR_5 <- covar(Fit_DCC, 0.05)
CCC_CoVaR_1 <- covar(Fit_CCC, 0.01)
CCC_CoVaR_5 <- covar(Fit_CCC, 0.05)
# Plot of CoVaR for DCC
plot(y = DCC_CoVaR_1,
x = index(head(GSPC$GSPC.Adjusted,-1)),
type = 'l',
xaxs = "i",
yaxs = "i",
xlab = 'Date',
ylab = 'CoVaR',
main = 'CoVaR of DCC Model')
lines(y = DCC_CoVaR_5,
x = index(head(GSPC$GSPC.Adjusted,-1)),
type = 'l',
xaxs = "i",
yaxs = "i",
col = 'red',
lty = 'dashed')
# Plot of CoVaR for CCC
plot(y= CCC_CoVaR_1,
x = index(head(GSPC$GSPC.Adjusted,-1)),
type = 'l',
xaxs = "i",
yaxs = "i",
xlab = 'Date',
ylab = 'CoVaR',
main = 'CoVaR of CCC Model')
lines(y = CCC_CoVaR_5,
x = index(head(GSPC$GSPC.Adjusted,-1)),
type = 'l',
xaxs = "i",
yaxs = "i",
col = 'red',
lty = 'dashed')
| /Exam/_book/code/Code_Flow_38.R | no_license | afrandsen/financial-econometrics | R | false | false | 22,738 | r | # EXERCISE 1 #
# Author: Andreas Kracht Frandsen
# Date: 2020-01-16
# For documentation please read the Computational Part of the main document.
# Load required packages.
library(quantmod)
library(Rsolnp)
# Obtains the relevant data from the VIX Index. We are only gonna use VIX.Adjusted.
getSymbols(Symbols = "^VIX",
from = '2010-01-01',
to = '2019-01-01',
src = 'yahoo')
# First 5 observation of VIX.
head(VIX)
# Plot of VIX.Adjusted
plot(y = VIX$VIX.Adjusted,
x = index(VIX),
type = 'l',
xaxs = "i",
yaxs = "i",
xlab = 'Date',
ylab = 'VIX',
main = 'Volatility Index')
## GAS-GAMMA ##
# The following function is the filter for the GAS-GAMMA model.
# Input: Par (Vector), the parameters in the model, which are omega, alpha, beta and a.
# Y (Vector), the returns of the relevant series.
# Output: Output (List), the means (Vector) and log likelihood (Double).
GASGAMMA_Filter <- function(Par, Y) {
iT = length(Y)
Mu = numeric(iT)
Omega = Par[1]
Alpha = Par[2]
Beta = Par[3]
a = Par[4]
# The first mu is initialized as the unconditional expectation of mu.
Mu[1] = Omega/(1 - Beta)
# In case mu gets negative or hits zero we adjust it.
if (Mu[1] < 1e-10) {Mu[1] = 1e-10}
# For the rest (T-1) mu's we use the updating equation.
for (t in 2:iT) {
Mu[t] = Omega + Alpha * ((sqrt(a)*(Y[t-1] - Mu[t-1]))/Mu[t-1]) + Beta * Mu[t-1]
# In case mu gets negative or hits zero we adjust it.
if (Mu[t] < 1e-10) {Mu[t] = 1e-10}
}
# The log likelihood is computed as shown in the theoretical part.
LLK = iT * (a * log(a) - lgamma(a)) + (a - 1) * sum(log(Y)) - a * sum(Y / Mu + log(Mu))
Output = list()
Output[["Mu"]] = Mu
Output[["LLK"]] = LLK
return(Output)
}
# The following function evaluates the negative log likelihood for further use in the optimization proces.
# Input: Par (Vector), the parameters in the model, which are omega, alpha, beta and a.
# Y (Vector), the returns of the relevant series.
# Output: NLL (Double), the negative log likelihood.
NegLogLikelihood <- function(Par, Y) {
Filter = GASGAMMA_Filter(Par, Y)
NLL = -Filter[["LLK"]]
# In case the negative log likelihood isn't finite we adjust it to a large value.
if (!is.finite(NLL)) {
NLL = 1e5
}
return(NLL)
}
# The following function estimates the GAS-GAMMA model by first finding maximum likelihood estimates of our parameters.
# Input: Y (Vector), the returns of the relevant series.
# Output: Output (List), the optimized parameters (Vector), the BIC (Double) and the filtered values of GASGAMMA_Filter.
Estimate_GASGAMMA <- function(Y) {
# Use the gosolnp from the Rsolnp package to optimize the negative log likelihood. With random initialized starting values.
optimiser = gosolnp(fun = NegLogLikelihood,
Y = Y,
n.sim = 20,
LB = c(-0.5, 0.001, 0.01, 0.1),
UB = c(0.5, 1.5, 0.999, 300)
)
Par = optimiser$pars
LLK = -tail(optimiser$value, n=1)
# Here we run the filter using the optimal parameter values, to obtain the final estimates of mu.
FilteredValues = GASGAMMA_Filter(Par, Y)
iT = length(Y)
# Computation of Bayesian Information Criterion, using the fact that we estimate four parameters.
BIC = (log(iT) * 4 - 2 * LLK)
Output = list()
Output[["Par"]] = Par
Output[["BIC"]] = BIC
Output[["FilteredValues"]] = FilteredValues
return(Output)
}
## GAS-GAMMA-C ##
GASGAMMA_Filter_c <- function(Par, Y) {
iT = length(Y)
Omega = Par[1]
Beta = Par[2]
a = Par[3]
# The first mu is initialized as the unconditional expectation of mu.
Mu = Omega / (1 - Beta)
# In case mu gets negative or hits zero we adjust it.
if (Mu < 1e-10) {Mu = 1e-10}
# The log likelihood is computed as shown in the theoretical part.
LLK = iT * (a * log(a) - lgamma(a)) + (a - 1) * sum(log(Y)) - a * sum(Y / Mu + log(Mu))
Output = list()
Output[["Mu"]] = Mu
Output[["LLK"]] = LLK
return(Output)
}
# The following function evaluates the negative log likelihood for further use in the optimization proces.
# Input: Par (Vector), the parameters in the model, which are omega, alpha, beta and a.
# Y (Vector), the returns of the relevant series.
# Output: NLL (Double), the negative log likelihood.
NegLogLikelihood_c <- function(Par, Y) {
Filter = GASGAMMA_Filter_c(Par, Y)
NLL = -Filter[["LLK"]]
# In case the negative log likelihood isn't finite we adjust it to a large value.
if (!is.finite(NLL)) {
NLL = 1e5
}
return(NLL)
}
# The following function estimates the GAS-GAMMA-C model by first finding maximum likelihood estimates of our parameters.
# Input: Y (Vector), the returns of the relevant series.
# Output: Output (List), the optimized parameters (Vector), the BIC (Double) and the filtered values of GASGAMMA_Filter_c.
Estimate_GASGAMMA_c <- function(Y) {
# Use the gosolnp from the Rsolnp package to optimize the negative log likelihood. With random initialized starting values.
optimiser = gosolnp(fun = NegLogLikelihood_c,
Y = Y,
n.sim = 20,
LB = c(-0.5, 0.01, 0.1),
UB = c(0.5, 0.999, 300)
)
Par = optimiser$pars
LLK = -tail(optimiser$value, n=1)
# Here we run the filter using the optimal parameter values, to obtain the final estimates of mu.
FilteredValues = GASGAMMA_Filter_c(Par, Y)
iT = length(Y)
# Computation of Bayesian Information Criterion, using the fact that we estimate three parameters.
BIC = (log(iT) * 3 - 2 * LLK)
Output = list()
Output[["Par"]] = Par
Output[["BIC"]] = BIC
Output[["FilteredValues"]] = FilteredValues
return(Output)
}
## MEM-GAMMA ##
# The following function is the filter for the MEM-GAMMA model.
# Input: Par (Vector), the parameters in the model, which are kappa, eta, phi and a.
# Y (Vector), the returns of the relevant series.
# Output: Output (List), the means (Vector) and log likelihood (Double).
MEMGAMMA_Filter <- function(Par, Y) {
iT = length(Y)
Mu = numeric(iT)
Kappa = Par[1]
Eta = Par[2]
Phi = Par[3]
a = Par[4]
# The first mu is initialized as the unconditional expectation of mu.
Mu[1] = Kappa/(1 - Eta - Phi)
# In case mu gets negative or hits zero we adjust it.
if (Mu[1] < 1e-10) {Mu[1] = 1e-10}
# For the rest (T-1) mu's we use the updating equation.
for (t in 2:iT) {
Mu[t] = Kappa + Eta * Y[t-1] + Phi * Mu[t-1]
# In case mu gets negative or hits zero we adjust it.
if (Mu[t] < 1e-10) {Mu[t] = 1e-10}
}
LLK = iT * (a * log(a) - lgamma(a)) + (a - 1) * sum(log(Y)) - a * sum(Y / Mu + log(Mu))
Output = list()
Output[["Mu"]] = Mu
Output[["LLK"]] = LLK
return(Output)
}
# The following function evaluates the negative log likelihood for further use in the optimization proces.
# Input: Par (Vector), the parameters in the model, which are omega, alpha, beta and a.
# Y (Vector), the returns of the relevant series.
# Output: NLL (Double), the negative log likelihood.
NegLogLikelihood_MEM <- function(Par, Y) {
Filter = MEMGAMMA_Filter(Par, Y)
NLL = -Filter[["LLK"]]
# In case the negative log likelihood isn't finite we adjust it to a large value.
if (!is.finite(NLL)) {
NLL = 1e5
}
return(NLL)
}
# The following function estimates the MEM-GAMMA model by first finding maximum likelihood estimates of our parameters.
# Input: Y (Vector), the returns of the relevant series.
# Output: Output (List), the optimized parameters (Vector), the BIC (Double) and the filtered values of GASGAMMA_Filter.
Estimate_MEMGAMMA <- function(Y) {
# Use the gosolnp from the Rsolnp package to optimize the negative log likelihood. With random initialized starting values.
optimiser = gosolnp(fun = NegLogLikelihood_MEM,
Y = Y,
n.sim = 20,
LB = c(0.1, 0.01, 0.01, 0.1),
UB = c(10, 0.99, 0.99, 300)
)
Par = optimiser$pars
LLK = -tail(optimiser$value, n=1)
# Here we run the filter using the optimal parameter values, to obtain the final estimates of mu.
FilteredValues = MEMGAMMA_Filter(Par, Y)
iT = length(Y)
# Computation of Bayesian Information Criterion, using the fact that we estimate four parameters.
BIC = (log(iT) * 4 - 2 * LLK)
Output = list()
Output[["Par"]] = Par
Output[["BIC"]] = BIC
Output[["FilteredValues"]] = FilteredValues
return(Output)
}
# Fit all the models using VIX data.
Fit_GAS_GAMMA = Estimate_GASGAMMA(VIX$VIX.Adjusted)
Fit_GAS_GAMMA_c = Estimate_GASGAMMA_c(VIX$VIX.Adjusted)
Fit_MEM_GAMMA = Estimate_MEMGAMMA(VIX$VIX.Adjusted)
# PLOT OF MEM.
# Control graphics device, to obtain 3 X 1 grid.
par(mfrow = c(3,1))
plot(y = VIX$VIX.Adjusted,
x = index(VIX),
type = 'l',
xaxs = "i",
yaxs = "i",
xlab = 'Date',
ylab = 'VIX',
main = 'i) VIX Index')
plot(y = Fit_MEM_GAMMA$FilteredValues$Mu,
x = index(VIX),
type = 'l',
xaxs = "i",
yaxs = "i",
xlab = 'Date',
ylab = expression(mu),
main = 'ii) Mean')
plot(y = Fit_MEM_GAMMA$FilteredValues$Mu^2/Fit_MEM_GAMMA$Par[4],
x = index(VIX),
type = 'l',
xaxs = "i",
yaxs = "i",
xlab = 'Date',
ylab = expression(sigma^2),
main = 'iii) Variance')
# EXERCISE 2 #
# Author: Andreas Kracht Frandsen
# Date: 2020-01-16
# For documentation please read the Computational Part of the main document.
# Load required packages.
library(Rsolnp)
library(mvtnorm)
library(quantmod)
# Obtains the relevant updated dataset given in the assignment.
GSPC_DJI <- read.csv2('data/data.csv', sep = ';', dec = ',')
# Just to gather dates related to the dataset. Used for plotting.
getSymbols(Symbols = "^GSPC",
from = '2007-01-03',
to = '2019-01-01',
src = 'yahoo')
# First 5 observations of GSP_DJI.
head(GSPC_DJI)
# Plot of GSPC
plot(y = GSPC_DJI$GSPC,
x = index(head(GSPC$GSPC.Adjusted,-1)),
type = 'l',
xaxs = "i",
yaxs = "i",
xlab = 'Date',
ylab = 'Return',
main = 'SP500 Return Data')
# Plot of DJI
plot(y = GSPC_DJI$DJI,
x = index(head(GSPC$GSPC.Adjusted,-1)),
type = 'l',
xaxs = "i",
yaxs = "i",
xlab = 'Date',
ylab = 'Return',
main = 'DOW Return Data')
## GARCH(1,1) ##
# The following function is the filter for the GARCH(1,1) model.
# Input: Omega, Alpha and Beta, the parameters in the model (Double).
# Y (Vector), the returns of the relevant series.
# Output: Output (List), log likelihood (Double) and the variances (Vector).
GARCHFilter <- function(Y, Omega, Alpha, Beta) {
iT = length(Y)
Sigma2 = numeric(iT)
# The first variance is set to the empirical variance of the first 10 % of the observations.
Sigma2[1] = var(Y[1:round(iT * 0.1)])
# Compute the likelihood of the first observation.
LLK = dnorm(Y[1], 0, sqrt(Sigma2[1]), log = TRUE)
# For the rest (T-1) observations we use the updating equation.
for (t in 2:iT) {
Sigma2[t] = Omega + Alpha * Y[t-1]^2 + Beta * Sigma2[t - 1]
LLK = LLK + dnorm(Y[t], 0, sqrt(Sigma2[t]), log = TRUE)
}
Output = list()
Output[["LLK"]] = LLK
Output[["Sigma2"]] = Sigma2
return(Output)
}
# The following function evaluates the negative log likelihood for further use in the optimization proces.
# Input: Par (Vector), the parameters in the model, which are omega, alpha and beta.
# Y (Vector), the returns of the relevant series.
# Output: -LLK (Double), the negative log likelihood.
ObjectiveFunction <- function(Par, Y) {
Omega = Par[1]
Alpha = Par[2]
Beta = Par[3]
LLK = GARCHFilter(Y, Omega, Alpha, Beta)$LLK
return(-LLK)
}
# The following function serves as a basis to evaluate the inner part of the inequality constraints that need to be satisfied to impose weak stationarity.
# Input: Par (Vector), the parameters in the inner part of the inequality constraints, which are alpha and beta.
# Output: Alpha+Beta (Double), the inner part of the inequality constraints.
ineqfun_GARCH_WS <- function(Par, ...) {
Alpha = Par[2]
Beta = Par[3]
return(Alpha + Beta)
}
# The following function estimates the GARCH(1,1) model by first finding maximum likelihood estimates of our parameters.
# Input: Y (Vector), the returns of the relevant series.
# Output: Output (List), the optimized parameters (Vector), the BIC (Double), the variances (Vector), the log likelihood (Double)
# and the standardized residuals.
EstimateGARCH <- function(Y, ineqfun_GARCH = ineqfun_GARCH_WS, ineqLB = 0.00, ineqUB = 0.9999) {
# We set starting value for Alpha and Beta and set Omega to target the unconditional variance of the GARCH(1,1) model.
Alpha = 0.125
Beta = 0.85
Omega = var(Y) * (1.0 - Alpha - Beta)
Par = c(Omega, Alpha, Beta)
# Use the solnp from the Rsolnp package to optimize the negative log likelihood.
# By default we specity ineqLB = 0.00 and ineqUB = 0.9999 in order to match 0 < alpha + beta < 0.9999.
optimizer = solnp(Par,
fun = ObjectiveFunction,
Y = Y,
ineqfun = ineqfun_GARCH,
ineqLB = ineqLB,
ineqUB = ineqUB,
LB = c(0.00001, 0.0001, 0.0001),
UB = c(10.0, 0.999, 0.999)
)
Par = optimizer$pars
LLK = -tail(optimizer$values, 1)
# Here we run the filter using the optimal parameter values, to obtain the final estimates of the variance.
Sigma2 = GARCHFilter(Y, Par[1], Par[2], Par[3])$Sigma2
# Computation of Bayesian Information Criterion.
iT = length(Y)
BIC = (-2 * LLK + log(iT) * length(Par))
# Compute standardized residuals.
st_res <- Y/sqrt(Sigma2)
Output = list()
Output[["Par"]] = Par
Output[["LLK"]] = LLK
Output[["BIC"]] = BIC
Output[["Sigma2"]] = Sigma2
Output[["st_res"]] = st_res
return(Output)
}
# Fit GARCH(1,1) for GSPC.
Fit_GSPC = EstimateGARCH(GSPC_DJI$GSPC)
# Double check alpha+beta.
sum(Fit_GSPC$Par[-1])
# Fit GARCH(1,1) for DJI.
Fit_DJI = EstimateGARCH(GSPC_DJI$DJI)
# Double check alpha+beta.
sum(Fit_DJI$Par[-1])
# Make one list including both fits.
fit <- list(Fit_GSPC, Fit_DJI)
## DCC AND CCC ##
# The following function is the filter for the DCC (CCC) model.
# Input: A and B (Double), the parameters in the model, which are a and b.
# Eta (Matrix), the standardized residuals from GARCH(1,1).
# Q (Matrix), the unconditional correlation.
# Output: Output (List), the log likelihood (Double) and the correlation matrix R.
DCCFilter <- function(Eta, A, B, Q) {
iN = ncol(Eta)
iT = nrow(Eta)
Cor = array(0, dim = c(iN, iN, iT))
aQ = array(0, dim = c(iN, iN, iT))
## Initialize to the unconditional correlation.
Cor[ , , 1] = Q
aQ[ , , 1] = Q
# Compute the contribution to the likelihood of the first observation.
LLK = Eta[1, , drop = FALSE] %*% solve(Cor[,, 1]) %*% t(Eta[1, , drop = FALSE]) -
Eta[1, , drop = FALSE]%*% t(Eta[1, , drop = FALSE]) + log(det(Cor[,, 1]))
# For the rest (T-1) observations.
for (t in 2:iT) {
# Update the Q matrix.
aQ[,, t] = Q * (1 - A - B) + A * t(Eta[t - 1, , drop = FALSE]) %*% Eta[t - 1, , drop = FALSE] +
B * aQ[,,t - 1]
## Compute the correlation matrix R.
Cor[,, t] = diag(sqrt(1/diag(aQ[,, t]))) %*% aQ[,, t] %*% diag(sqrt(1/diag(aQ[,, t])))
LLK = LLK + Eta[t, , drop = FALSE] %*% solve(Cor[,, t]) %*% t(Eta[t, , drop = FALSE]) -
Eta[t, , drop = FALSE] %*% t(Eta[t, , drop = FALSE]) + log(det(Cor[,, t]))
}
Output = list()
Output[["LLK"]] = -0.5 * LLK
Output[["Cor"]] = Cor
return(Output)
}
# The following function estimates the DCC (CCC) model by first finding maximum likelihood estimates of our parameters.
# Input: Y (Matrix), the returns of the relevant series.
# fit (List), the fit of the Garch(1,1) models combined.
# CCC (Boolean), shall the CCC model be computed instead of the DCC.
# Output: Output (List), the optimized parameters (Vector), the BIC (Double), the total log likelihood (Double),
# the correlation matrix (Matrix), the standard deviations (Matrix), the parameters of the Garch(1,1) (Vector)
# and the standardized residuals (Vector).
Estimate_DCC <- function(Y, fit, CCC = FALSE) {
Eta <- cbind(unlist(fit[[1]]["st_res"]), unlist(fit[[2]]["st_res"]))
# Compute unconditional correlation.
Q = cor(Eta)
if(CCC == FALSE){
# Initial parameters of a and b.
Par = c(0.04, 0.9)
# Use the solnp from the Rsolnp package to optimize the negative log likelihood.
optimizer = solnp(Par, fun = function(Par, Eta, Q) {
Filter = DCCFilter(Eta, Par[1], Par[2], Q)
NLLK = -as.numeric(Filter$LLK)
return(NLLK)
}, ineqfun = function(Par, ...) {
sum(Par)
}, ineqLB = 1e-4, ineqUB = 0.999,
LB = c(1e-4, 1e-4), UB = c(0.999, 0.999),
Eta = Eta, Q = Q)
Par = optimizer$pars
# Likelihood contribution of correlation.
LLK_C = -tail(optimizer$values, 1)
# Here we run the filter using the optimal parameter values, to obtain the final estimates of the correlation matrix.
Filter = DCCFilter(Eta, Par[1], Par[2], Q)
}
else{
Filter = DCCFilter(Eta, 0, 0, Q)
LLK_C = Filter[["LLK"]]
}
Sigma = sqrt(cbind(unlist(fit[[1]]["Sigma2"]), unlist(fit[[2]]["Sigma2"])))
Coef = cbind(unlist(fit[[1]]["Par"]), unlist(fit[[2]]["Par"]))
# Likelihood contribution of volatility from GARCH(1,1)'s.
LLK_V = sum(unlist(fit[[1]]["LLK"]), unlist(fit[[2]]["LLK"]))
# Total likelihood.
LLK = LLK_V + LLK_C
Cor = Filter[["Cor"]]
iT = nrow(Y)
# Computation of Bayesian Information Criterion.
BIC = log(iT) * 8 - 2 * LLK
Output = list()
Output[["LLK"]] = LLK
Output[["Coef"]] = Coef
if(CCC == FALSE){
Output[["Par"]] = Par
}
Output[["Sigma"]] = Sigma
Output[["Cor"]] = Cor
Output[["Eta"]] = Eta
Output[["BIC"]] = BIC
return(Output)
}
# Fit DCC and CCC for our returns.
Fit_DCC = Estimate_DCC(GSPC_DJI, fit)
Fit_CCC = Estimate_DCC(GSPC_DJI, fit, CCC = TRUE)
## MINIMUM VARIANCE PORTFOLIO ##
# The following function computes the Minimum Variance Portfolio.
# Input: fit (List), the fit of the DCC or CCC model.
# Output: weight (Array), the optimal portfolio weights for the Minimum Variance Portfolio.
MVP <- function(fit){
iT = length(fit$Sigma[ , 1])
iN = ncol(Fit_DCC$Sigma)
D = array(0, dim = c(iN, iN, iT))
SIGMA_INV = array(0, dim = c(iN, iN, iT))
TOP = array(0, dim = c(1, iN, iT))
ell = array(1, dim = c(iN, 1))
BOTTOM = array(0, dim = c(1,1,iT))
weight = array(0, dim = c(1, iN, iT))
for (t in 1:iT) {
D[ , , t] = diag(fit$Sigma[t, ])
SIGMA_INV[ , , t] = solve(D[ , , t]) %*% solve(fit$Cor[ , , t]) %*% solve(D[ , , t])
TOP[ , , t] = SIGMA_INV[ , , t] %*% ell
BOTTOM[ , , t] = t(ell) %*% SIGMA_INV[ , , t] %*% ell
weight[ , , t] = TOP[ , , t] / BOTTOM[ , , t]
}
return(weight)
}
# Compute the weights for both DCC and CCC models.
weight_DCC <- MVP(Fit_DCC)
weight_CCC <- MVP(Fit_CCC)
# Plot MVP for DCC
plot(y = weight_DCC[1,1,],
x = index(head(GSPC$GSPC.Adjusted,-1)),
type ='l',
ylim =c(-5,5),
xaxs = "i",
yaxs = "i",
xlab = 'Date',
ylab = 'Weight',
main = 'Portfolio Weights for S&P500 and DOW using DCC'
)
lines(y = weight_DCC[1,2,],
x = index(head(GSPC$GSPC.Adjusted,-1)),
type ='l',
col = 'red',
lty = 'dashed')
# Plot MPV for CCC
plot(y = weight_CCC[1,1,],
x = index(head(GSPC$GSPC.Adjusted,-1)),
type ='l',
ylim =c(-5,5),
xaxs = "i",
yaxs = "i",
xlab = 'Date',
ylab = 'Weight',
main = 'Portfolio Weights for S&P500 and DOW using CCC'
)
lines(y = weight_CCC[1,2,],
x = index(head(GSPC$GSPC.Adjusted,-1)),
type = 'l',
col = 'red',
lty = 'dashed')
## CoVaR ##
# The following function computes the difference between the Multivariate Gaussian CDF and the squared significance level.
# Input: CoVar (Double), the CoVaR.
# VaR (Double), the VaR.
# sigma (Matrix), the standard deviation matrix.
# alpha (Double), the significance level.
# Output: target (Double), the value to optimize over.
bi_pnorm_t <- function(CoVaR, VaR, sigma, alpha){
func <- pmvnorm(upper = c(CoVaR, VaR), sigma = sigma)
target <- func - alpha^2
}
# The following function computes the CoVaR.
# Input: fit (List), the fit of either DCC or CCC.
# alpha (Double), the significance level.
# Output: CoVaR (Vector), the CoVaR over time.
covar <- function(fit, alpha){
iT <- length(fit$Sigma[,1])
D <- array(0, dim = c(2,2,iT))
CoVaR <- c()
for (t in 1:iT) {
D[,,t] = diag(fit$Sigma[t,])
SIGMA = D[,,t] %*% fit$Cor[,,t] %*% D[,,t]
sdY_2 <- sqrt(SIGMA[1, 2])
VaR <- qnorm(alpha, 0, sdY_2)
CoVaR[t] <- uniroot(bi_pnorm_t, interval = c(-10^4, 10), VaR = VaR, sigma = SIGMA, alpha=alpha)[[1]]
}
return(CoVaR)
}
# Compute the CoVaR at 0.01 and 0.05 significance level for both models.
DCC_CoVaR_1 <- covar(Fit_DCC, 0.01)
DCC_CoVaR_5 <- covar(Fit_DCC, 0.05)
CCC_CoVaR_1 <- covar(Fit_CCC, 0.01)
CCC_CoVaR_5 <- covar(Fit_CCC, 0.05)
# Plot of CoVaR for DCC
plot(y = DCC_CoVaR_1,
x = index(head(GSPC$GSPC.Adjusted,-1)),
type = 'l',
xaxs = "i",
yaxs = "i",
xlab = 'Date',
ylab = 'CoVaR',
main = 'CoVaR of DCC Model')
lines(y = DCC_CoVaR_5,
x = index(head(GSPC$GSPC.Adjusted,-1)),
type = 'l',
xaxs = "i",
yaxs = "i",
col = 'red',
lty = 'dashed')
# Plot of CoVaR for CCC
plot(y= CCC_CoVaR_1,
x = index(head(GSPC$GSPC.Adjusted,-1)),
type = 'l',
xaxs = "i",
yaxs = "i",
xlab = 'Date',
ylab = 'CoVaR',
main = 'CoVaR of CCC Model')
lines(y = CCC_CoVaR_5,
x = index(head(GSPC$GSPC.Adjusted,-1)),
type = 'l',
xaxs = "i",
yaxs = "i",
col = 'red',
lty = 'dashed')
|
# File: splitFiles.R
# Description: Splits up log files into separate datasets
# Author(s): Doekemeijer, R. A.
# Date created: 2019-10-28
# Last modified: 2019-11-05 (fixed path param)
if (!require("data.table")){install.packages("data.table")}
if (!require("dplyr")) {install.packages("dplyr")}; library(dplyr)
if (!require("Xmisc")) {install.packages("Xmisc")}; library(Xmisc)
library(data.table)
split_file <- function(path, filename, split_indicator, sep = "\t") {
# Splits a file for every header into a folder of the same name as the file
# Input: path = working directory (string); filename = name of file (string);
# split_indicator = element on which to split the files (string); sep = sep in files.
# Output: N/A.
# Saves: folder containing sections of the original file.
# SET-UP: Open connection to source and output file
if (filename == "NA") {return()}
wd <- getwd()
#setwd (path);
s = 1 # split number
sfilename = strsplit(filename, "[.]")[[1]][1]
extention = paste0(".", strsplit(filename, "[.]")[[1]][2])
sourcefile = file(paste0(path, "/", filename), "r")
cat(">> Splitting file:", filename, "\n")
if (!is.dir(sfilename)) {dir.create(sfilename)}
splitname = paste0(sfilename, "/", sfilename, "_s",
as.character(s), extention)
if (is.file(splitname)) {file.remove(splitname)}
file.create(splitname)
outputfile = file(splitname, open = "a")
# RUN: Split file based on lines containing split_indicator
s = 1; cat(">> Using output file (", s, ")\n")
while (TRUE) {
line = readLines(sourcefile, n = 1)
if (length(line) == 0) {cat(">>> DONE\n"); break}
# Write line to an outputfile
line_elements = strsplit(line, sep)[[1]]
if (split_indicator %in% line_elements) {
s = s + 1; cat(">> Using output file (", s, ")\n")
close(outputfile)
splitname = paste0(sfilename, "/", sfilename, "_s",
as.character(s), extention)
if (is.file(splitname)) {file.remove(splitname)}
file.create(splitname)
outputfile = file(splitname, open = "a")
}
writeLines(line, outputfile)
}
close(sourcefile)
close(outputfile)
setwd(wd)
}
| /splitFiles.R | no_license | roosadoekemeijer/Rscripts | R | false | false | 2,231 | r | # File: splitFiles.R
# Description: Splits up log files into separate datasets
# Author(s): Doekemeijer, R. A.
# Date created: 2019-10-28
# Last modified: 2019-11-05 (fixed path param)
if (!require("data.table")){install.packages("data.table")}
if (!require("dplyr")) {install.packages("dplyr")}; library(dplyr)
if (!require("Xmisc")) {install.packages("Xmisc")}; library(Xmisc)
library(data.table)
split_file <- function(path, filename, split_indicator, sep = "\t") {
# Splits a file for every header into a folder of the same name as the file
# Input: path = working directory (string); filename = name of file (string);
# split_indicator = element on which to split the files (string); sep = sep in files.
# Output: N/A.
# Saves: folder containing sections of the original file.
# SET-UP: Open connection to source and output file
if (filename == "NA") {return()}
wd <- getwd()
#setwd (path);
s = 1 # split number
sfilename = strsplit(filename, "[.]")[[1]][1]
extention = paste0(".", strsplit(filename, "[.]")[[1]][2])
sourcefile = file(paste0(path, "/", filename), "r")
cat(">> Splitting file:", filename, "\n")
if (!is.dir(sfilename)) {dir.create(sfilename)}
splitname = paste0(sfilename, "/", sfilename, "_s",
as.character(s), extention)
if (is.file(splitname)) {file.remove(splitname)}
file.create(splitname)
outputfile = file(splitname, open = "a")
# RUN: Split file based on lines containing split_indicator
s = 1; cat(">> Using output file (", s, ")\n")
while (TRUE) {
line = readLines(sourcefile, n = 1)
if (length(line) == 0) {cat(">>> DONE\n"); break}
# Write line to an outputfile
line_elements = strsplit(line, sep)[[1]]
if (split_indicator %in% line_elements) {
s = s + 1; cat(">> Using output file (", s, ")\n")
close(outputfile)
splitname = paste0(sfilename, "/", sfilename, "_s",
as.character(s), extention)
if (is.file(splitname)) {file.remove(splitname)}
file.create(splitname)
outputfile = file(splitname, open = "a")
}
writeLines(line, outputfile)
}
close(sourcefile)
close(outputfile)
setwd(wd)
}
|
####### MAIN
#' extracts features from tremor task handToNose accelerometer and gyroscope JSON data file
#'
#'
#' @param tremorJsonFileLoc path to tremor accelerometer json file
#' @return data frame of tremor features
#' @export
#' @examples
#' @author Thanneer Malai Perumal, Meghasyam Tummalacherla
getKineticTremorFeatures <- function(tremorJsonFileLoc, windowLen = 256, freqRange = c(1, 25), ovlp = 0.5) {
# If no json file exists
ftrs = data.frame(Window = NA, error = NA)
if(all(is.na(tremorJsonFileLoc))){ ftrs$error = 'No JSON file'; return(ftrs) }
# Read contents of JSON file
dat = tryCatch({ jsonlite::fromJSON(as.character(tremorJsonFileLoc)) },
error = function(e){ NA })
if(all(is.na(dat))){ ftrs$error = 'JSON file read error'; return(ftrs) }
# Get sampling rate
samplingRate = length(dat$timestamp)/(dat$timestamp[length(dat$timestamp)] - dat$timestamp[1])
# Get accelerometer features
ftrs.acc = getKineticTremorFeatures.userAccel(dat, windowLen = windowLen, freqRange = freqRange, ovlp = ovlp)
# Get accelerometer features
ftrs.gyro = getKineticTremorFeatures.rotRate(dat, windowLen = windowLen, freqRange = freqRange, ovlp = ovlp)
# Tag outliers windows based on phone rotation
gr.error = lapply(dat$gravity, function(x) {
accel = mpowertools:::windowSignal(x) %>%
as.data.frame() %>%
tidyr::gather(Window, value) %>%
dplyr::group_by(Window) %>%
dplyr::summarise(mx = max(value, na.rm = T),
mn = min(value, na.rm = T))
}) %>%
data.table::rbindlist(use.names = T, fill = T, idcol = 'axis') %>%
dplyr::mutate(error = sign(mx) != sign(mn)) %>%
dplyr::group_by(Window) %>%
dplyr::summarise(error = any(error, na.rm = T))
gr.error$error[gr.error$error == TRUE] = 'Phone rotated within window'
gr.error$error[gr.error$error == FALSE] = 'None'
# Combine all features
ftrs = list(accelerometer = ftrs.acc, gyroscope = ftrs.gyro) %>%
data.table::rbindlist(use.names = TRUE, fill = T, idcol = 'sensor') %>%
tidyr::separate(Window, c('IMF','Window'), sep = '\\.') %>%
dplyr::left_join(gr.error, by = 'Window')
return(ftrs)
}
# Function to extract kinetic tremor features from user acceleration from accelerometer
getKineticTremorFeatures.userAccel <- function(dat, windowLen = 256, freqRange = c(1, 25), ovlp = 0.5){
ftrs = data.frame(error = NA)
# Rotate acceleration data to earth co-ordinates
userAccel = tryCatch({
userAccel = cbind(timestamp = dat$timestamp-dat$timestamp[1],
mpowertools:::get_quaternary_rotated_userAccel(dat)) %>%
as.data.frame()
ind = order(userAccel$timestamp)
userAccel = userAccel[ind, ] %>%
tidyr::gather(axis, accel, -timestamp)
}, error = function(e){ NA })
if(all(is.na(userAccel))){ ftrs$error = 'userAccel rotation error'; return(ftrs) }
# Detrend data
userAccel = tryCatch({
userAccel %>%
plyr::ddply(.(axis), .fun = function(x){
x$accel = loess(x$accel~x$timestamp)$residual
return(x)
})
}, error = function(e){ NA })
if(all(is.na(userAccel))){ ftrs$error = 'Detrend error'; return(ftrs) }
# Band pass filter signal between freqRange
userAccel = tryCatch({
userAccel %>%
plyr::ddply(.(axis), .fun = function(x, windowLen, sl, freqRange){
bandPassFilt = signal::fir1(windowLen-1, c(freqRange[1] * 2/sl, freqRange[2] * 2/sl),
type="pass",
window = seewave::hamming.w(windowLen))
x$accel = signal::filtfilt(bandPassFilt, x$accel)
return(x)
}, windowLen, samplingRate, freqRange)
}, error = function(e){ NA })
if(all(is.na(userAccel))){ ftrs$error = 'Band pass filter error'; return(ftrs) }
# Filter signal between 1 and 9 sec
userAccel = tryCatch({
userAccel %>%
dplyr::filter(timestamp >= 1, timestamp <= 9)
}, error = function(e){ NA })
if(all(is.na(userAccel))){ ftrs$error = 'Not enough time samples'; return(ftrs) }
# Split user acceleration into EMDs and window
userAccel = userAccel %>%
plyr::dlply(.(axis), .fun = function(accel, windowLen, ovlp){
imf = EMD::emd(accel$accel, accel$timestamp, max.imf = 4)$imf %>%
as.data.frame()
colnames(imf) = paste0('IMF',1:dim(imf)[2])
a = lapply(imf, mpowertools:::windowSignal, windowLen = windowLen, ovlp = ovlp)
a = mapply(function(x,y){
colnames(x) = paste(y,colnames(x),sep = '.'); return(x)
}, a, names(a), SIMPLIFY = F) %>%
do.call(cbind,.)
}, windowLen, ovlp)
# Get user jerk
userJerk = userAccel %>%
lapply(function(accel, sl){
apply(accel,2, diff)*sl
}, samplingRate)
# Get user velocity
userVel = userAccel %>%
lapply(function(accel, sl){
apply(accel,2, diffinv)*sl
}, samplingRate)
# Get user displacement
userDisp = userVel %>%
lapply(function(accel, sl){
apply(accel,2, diffinv)*sl
}, samplingRate)
# Get acf of user accel
userACF = userAccel %>%
lapply(function(accel, sl){
apply(accel,2, function(x){acf(x, plot = F)$acf})
}, samplingRate)
# Get time and frequency domain features for angular velocity, acceleration, displacement, auto correlation of velocity
ftrs = list(ua = userAccel, uj = userJerk, uv = userVel, ud = userDisp, uaacf = userACF) %>%
plyr::ldply(.fun = function(userAccel){
plyr::ldply(userAccel, .fun = function(accel){
list(apply(accel, 2, getTimeDomainSummary, samplingRate) %>%
data.table::rbindlist(use.names = T, fill = T, idcol = 'Window'),
apply(accel, 2, getFrequencyDomainSummary, samplingRate = samplingRate, npeaks = 3) %>%
data.table::rbindlist(use.names = T, fill = T, idcol = 'Window'),
apply(accel, 2, getFrequencyDomainEnergy, samplingRate) %>%
data.table::rbindlist(use.names = T, fill = T, idcol = 'Window')) %>%
plyr::join_all(by = 'Window')
}, .id = 'axis')
}, .id = 'measurementType')
return(ftrs)
}
# Function to extract tremor features from user angular velocity from gyroscope
getKineticTremorFeatures.rotRate <- function(dat, windowLen = 256, freqRange = c(1, 25), ovlp = 0.5) {
ftrs = data.frame(error = NA)
# Get user angular velocity from gyro data
userAngVel = tryCatch({
userAngVel = cbind(timestamp = dat$timestamp-dat$timestamp[1],
dat$rotationRate) %>%
as.data.frame()
ind = order(userAngVel$timestamp)
userAngVel = userAngVel[ind, ] %>%
tidyr::gather(axis, angvel, -timestamp)
}, error = function(e){ NA })
if(all(is.na(userAngVel))){ ftrs$error = 'userAccel rotation error'; return(ftrs) }
# Detrend data
userAngVel = tryCatch({
userAngVel %>%
plyr::ddply(.(axis), .fun = function(x){
x$angvel = loess(x$angvel~x$timestamp)$residual
x <- return(x)
})
}, error = function(e){ NA })
if(all(is.na(userAngVel))){ ftrs$error = 'Detrend error'; return(ftrs) }
# Band pass filter signal between freqRange
userAngVel = tryCatch({
userAngVel %>%
plyr::ddply(.(axis), .fun = function(x, windowLen, sl, freqRange){
bandPassFilt = signal::fir1(windowLen-1, c(freqRange[1] * 2/sl, freqRange[2] * 2/sl),
type="pass",
window = seewave::hamming.w(windowLen))
x$angvel = signal::filtfilt(bandPassFilt, x$angvel)
return(x)
}, windowLen, samplingRate, freqRange)
}, error = function(e){ NA })
if(all(is.na(userAngVel))){ ftrs$error = 'Band pass filter error'; return(ftrs) }
# Filter signal between 1 and 9 sec
userAngVel = tryCatch({
userAngVel %>%
dplyr::filter(timestamp >= 1, timestamp <= 9)
}, error = function(e){ NA })
if(all(is.na(userAngVel))){ ftrs$error = 'Not enough time samples'; return(ftrs) }
# Split user acceleration into EMDs and window
userAngVel = userAngVel %>%
plyr::dlply(.(axis), .fun = function(accel, windowLen, ovlp){
imf = EMD::emd(accel$angvel, accel$timestamp, max.imf = 4)$imf %>%
as.data.frame()
colnames(imf) = paste0('IMF',1:dim(imf)[2])
a = lapply(imf, mpowertools:::windowSignal, windowLen = windowLen, ovlp = ovlp)
a = mapply(function(x,y){
colnames(x) = paste(y,colnames(x),sep = '.'); return(x)
}, a, names(a), SIMPLIFY = F) %>%
do.call(cbind,.)
}, windowLen, ovlp)
# Get user angular acceleration
userAngAcc = userAngVel %>%
lapply(function(angvel, sl){
apply(angvel, 2, diff)*sl
}, samplingRate)
# Get user angular displacement
userAngDis = userAngVel %>%
lapply(function(angvel, sl){
apply(angvel,2, diffinv)*sl
}, samplingRate)
# Get user acf (ACF of user angular velocity)
userACF = userAngVel %>%
lapply(function(angvel, sl){
apply(angvel,2, function(x){acf(x, plot = F)$acf})
}, samplingRate)
# Get time and frequency domain features for angular velocity, acceleration, displacement, auto correlation of velocity
ftrs = list(uav = userAngVel, uaa = userAngAcc, uad = userAngDis, uavacf = userACF) %>%
plyr::ldply(.fun = function(userAccel){
plyr::ldply(userAccel, .fun = function(accel){
list(apply(accel, 2, getTimeDomainSummary, samplingRate) %>%
data.table::rbindlist(use.names = T, fill = T, idcol = 'Window'),
apply(accel, 2, getFrequencyDomainSummary, samplingRate = samplingRate, npeaks = 3) %>%
data.table::rbindlist(use.names = T, fill = T, idcol = 'Window'),
apply(accel, 2, getFrequencyDomainEnergy, samplingRate) %>%
data.table::rbindlist(use.names = T, fill = T, idcol = 'Window')) %>%
plyr::join_all(by = 'Window')
}, .id = 'axis')
}, .id = 'measurementType')
return(ftrs)
} | /R/getKineticTremorFeatures.R | no_license | itismeghasyam/mpowertools | R | false | false | 9,983 | r | ####### MAIN
#' extracts features from tremor task handToNose accelerometer and gyroscope JSON data file
#'
#'
#' @param tremorJsonFileLoc path to tremor accelerometer json file
#' @return data frame of tremor features
#' @export
#' @examples
#' @author Thanneer Malai Perumal, Meghasyam Tummalacherla
getKineticTremorFeatures <- function(tremorJsonFileLoc, windowLen = 256, freqRange = c(1, 25), ovlp = 0.5) {
# If no json file exists
ftrs = data.frame(Window = NA, error = NA)
if(all(is.na(tremorJsonFileLoc))){ ftrs$error = 'No JSON file'; return(ftrs) }
# Read contents of JSON file
dat = tryCatch({ jsonlite::fromJSON(as.character(tremorJsonFileLoc)) },
error = function(e){ NA })
if(all(is.na(dat))){ ftrs$error = 'JSON file read error'; return(ftrs) }
# Get sampling rate
samplingRate = length(dat$timestamp)/(dat$timestamp[length(dat$timestamp)] - dat$timestamp[1])
# Get accelerometer features
ftrs.acc = getKineticTremorFeatures.userAccel(dat, windowLen = windowLen, freqRange = freqRange, ovlp = ovlp)
# Get accelerometer features
ftrs.gyro = getKineticTremorFeatures.rotRate(dat, windowLen = windowLen, freqRange = freqRange, ovlp = ovlp)
# Tag outliers windows based on phone rotation
gr.error = lapply(dat$gravity, function(x) {
accel = mpowertools:::windowSignal(x) %>%
as.data.frame() %>%
tidyr::gather(Window, value) %>%
dplyr::group_by(Window) %>%
dplyr::summarise(mx = max(value, na.rm = T),
mn = min(value, na.rm = T))
}) %>%
data.table::rbindlist(use.names = T, fill = T, idcol = 'axis') %>%
dplyr::mutate(error = sign(mx) != sign(mn)) %>%
dplyr::group_by(Window) %>%
dplyr::summarise(error = any(error, na.rm = T))
gr.error$error[gr.error$error == TRUE] = 'Phone rotated within window'
gr.error$error[gr.error$error == FALSE] = 'None'
# Combine all features
ftrs = list(accelerometer = ftrs.acc, gyroscope = ftrs.gyro) %>%
data.table::rbindlist(use.names = TRUE, fill = T, idcol = 'sensor') %>%
tidyr::separate(Window, c('IMF','Window'), sep = '\\.') %>%
dplyr::left_join(gr.error, by = 'Window')
return(ftrs)
}
# Function to extract kinetic tremor features from user acceleration from accelerometer
getKineticTremorFeatures.userAccel <- function(dat, windowLen = 256, freqRange = c(1, 25), ovlp = 0.5){
ftrs = data.frame(error = NA)
# Rotate acceleration data to earth co-ordinates
userAccel = tryCatch({
userAccel = cbind(timestamp = dat$timestamp-dat$timestamp[1],
mpowertools:::get_quaternary_rotated_userAccel(dat)) %>%
as.data.frame()
ind = order(userAccel$timestamp)
userAccel = userAccel[ind, ] %>%
tidyr::gather(axis, accel, -timestamp)
}, error = function(e){ NA })
if(all(is.na(userAccel))){ ftrs$error = 'userAccel rotation error'; return(ftrs) }
# Detrend data
userAccel = tryCatch({
userAccel %>%
plyr::ddply(.(axis), .fun = function(x){
x$accel = loess(x$accel~x$timestamp)$residual
return(x)
})
}, error = function(e){ NA })
if(all(is.na(userAccel))){ ftrs$error = 'Detrend error'; return(ftrs) }
# Band pass filter signal between freqRange
userAccel = tryCatch({
userAccel %>%
plyr::ddply(.(axis), .fun = function(x, windowLen, sl, freqRange){
bandPassFilt = signal::fir1(windowLen-1, c(freqRange[1] * 2/sl, freqRange[2] * 2/sl),
type="pass",
window = seewave::hamming.w(windowLen))
x$accel = signal::filtfilt(bandPassFilt, x$accel)
return(x)
}, windowLen, samplingRate, freqRange)
}, error = function(e){ NA })
if(all(is.na(userAccel))){ ftrs$error = 'Band pass filter error'; return(ftrs) }
# Filter signal between 1 and 9 sec
userAccel = tryCatch({
userAccel %>%
dplyr::filter(timestamp >= 1, timestamp <= 9)
}, error = function(e){ NA })
if(all(is.na(userAccel))){ ftrs$error = 'Not enough time samples'; return(ftrs) }
# Split user acceleration into EMDs and window
userAccel = userAccel %>%
plyr::dlply(.(axis), .fun = function(accel, windowLen, ovlp){
imf = EMD::emd(accel$accel, accel$timestamp, max.imf = 4)$imf %>%
as.data.frame()
colnames(imf) = paste0('IMF',1:dim(imf)[2])
a = lapply(imf, mpowertools:::windowSignal, windowLen = windowLen, ovlp = ovlp)
a = mapply(function(x,y){
colnames(x) = paste(y,colnames(x),sep = '.'); return(x)
}, a, names(a), SIMPLIFY = F) %>%
do.call(cbind,.)
}, windowLen, ovlp)
# Get user jerk
userJerk = userAccel %>%
lapply(function(accel, sl){
apply(accel,2, diff)*sl
}, samplingRate)
# Get user velocity
userVel = userAccel %>%
lapply(function(accel, sl){
apply(accel,2, diffinv)*sl
}, samplingRate)
# Get user displacement
userDisp = userVel %>%
lapply(function(accel, sl){
apply(accel,2, diffinv)*sl
}, samplingRate)
# Get acf of user accel
userACF = userAccel %>%
lapply(function(accel, sl){
apply(accel,2, function(x){acf(x, plot = F)$acf})
}, samplingRate)
# Get time and frequency domain features for angular velocity, acceleration, displacement, auto correlation of velocity
ftrs = list(ua = userAccel, uj = userJerk, uv = userVel, ud = userDisp, uaacf = userACF) %>%
plyr::ldply(.fun = function(userAccel){
plyr::ldply(userAccel, .fun = function(accel){
list(apply(accel, 2, getTimeDomainSummary, samplingRate) %>%
data.table::rbindlist(use.names = T, fill = T, idcol = 'Window'),
apply(accel, 2, getFrequencyDomainSummary, samplingRate = samplingRate, npeaks = 3) %>%
data.table::rbindlist(use.names = T, fill = T, idcol = 'Window'),
apply(accel, 2, getFrequencyDomainEnergy, samplingRate) %>%
data.table::rbindlist(use.names = T, fill = T, idcol = 'Window')) %>%
plyr::join_all(by = 'Window')
}, .id = 'axis')
}, .id = 'measurementType')
return(ftrs)
}
# Function to extract tremor features from user angular velocity from gyroscope
getKineticTremorFeatures.rotRate <- function(dat, windowLen = 256, freqRange = c(1, 25), ovlp = 0.5) {
ftrs = data.frame(error = NA)
# Get user angular velocity from gyro data
userAngVel = tryCatch({
userAngVel = cbind(timestamp = dat$timestamp-dat$timestamp[1],
dat$rotationRate) %>%
as.data.frame()
ind = order(userAngVel$timestamp)
userAngVel = userAngVel[ind, ] %>%
tidyr::gather(axis, angvel, -timestamp)
}, error = function(e){ NA })
if(all(is.na(userAngVel))){ ftrs$error = 'userAccel rotation error'; return(ftrs) }
# Detrend data
userAngVel = tryCatch({
userAngVel %>%
plyr::ddply(.(axis), .fun = function(x){
x$angvel = loess(x$angvel~x$timestamp)$residual
x <- return(x)
})
}, error = function(e){ NA })
if(all(is.na(userAngVel))){ ftrs$error = 'Detrend error'; return(ftrs) }
# Band pass filter signal between freqRange
userAngVel = tryCatch({
userAngVel %>%
plyr::ddply(.(axis), .fun = function(x, windowLen, sl, freqRange){
bandPassFilt = signal::fir1(windowLen-1, c(freqRange[1] * 2/sl, freqRange[2] * 2/sl),
type="pass",
window = seewave::hamming.w(windowLen))
x$angvel = signal::filtfilt(bandPassFilt, x$angvel)
return(x)
}, windowLen, samplingRate, freqRange)
}, error = function(e){ NA })
if(all(is.na(userAngVel))){ ftrs$error = 'Band pass filter error'; return(ftrs) }
# Filter signal between 1 and 9 sec
userAngVel = tryCatch({
userAngVel %>%
dplyr::filter(timestamp >= 1, timestamp <= 9)
}, error = function(e){ NA })
if(all(is.na(userAngVel))){ ftrs$error = 'Not enough time samples'; return(ftrs) }
# Split user acceleration into EMDs and window
userAngVel = userAngVel %>%
plyr::dlply(.(axis), .fun = function(accel, windowLen, ovlp){
imf = EMD::emd(accel$angvel, accel$timestamp, max.imf = 4)$imf %>%
as.data.frame()
colnames(imf) = paste0('IMF',1:dim(imf)[2])
a = lapply(imf, mpowertools:::windowSignal, windowLen = windowLen, ovlp = ovlp)
a = mapply(function(x,y){
colnames(x) = paste(y,colnames(x),sep = '.'); return(x)
}, a, names(a), SIMPLIFY = F) %>%
do.call(cbind,.)
}, windowLen, ovlp)
# Get user angular acceleration
userAngAcc = userAngVel %>%
lapply(function(angvel, sl){
apply(angvel, 2, diff)*sl
}, samplingRate)
# Get user angular displacement
userAngDis = userAngVel %>%
lapply(function(angvel, sl){
apply(angvel,2, diffinv)*sl
}, samplingRate)
# Get user acf (ACF of user angular velocity)
userACF = userAngVel %>%
lapply(function(angvel, sl){
apply(angvel,2, function(x){acf(x, plot = F)$acf})
}, samplingRate)
# Get time and frequency domain features for angular velocity, acceleration, displacement, auto correlation of velocity
ftrs = list(uav = userAngVel, uaa = userAngAcc, uad = userAngDis, uavacf = userACF) %>%
plyr::ldply(.fun = function(userAccel){
plyr::ldply(userAccel, .fun = function(accel){
list(apply(accel, 2, getTimeDomainSummary, samplingRate) %>%
data.table::rbindlist(use.names = T, fill = T, idcol = 'Window'),
apply(accel, 2, getFrequencyDomainSummary, samplingRate = samplingRate, npeaks = 3) %>%
data.table::rbindlist(use.names = T, fill = T, idcol = 'Window'),
apply(accel, 2, getFrequencyDomainEnergy, samplingRate) %>%
data.table::rbindlist(use.names = T, fill = T, idcol = 'Window')) %>%
plyr::join_all(by = 'Window')
}, .id = 'axis')
}, .id = 'measurementType')
return(ftrs)
} |
##### Sea Surface Temperature Data
# West Coast
# Run Rottnest (west coast) coords
Long <- 116
Lat <- 32
AllYears <- 2013:1931
system.time(source("SST_Data.R"))
write.csv(BigMat, file = "SST_Data_Rotto.csv")
# Run Rottnest (west coast) coords
Long <- 115
Lat <- 32
AllYears <- 2013:1931
system.time(source("SST_Data.R"))
write.csv(BigMat, file = "SST_Data_Rotto1.csv")
# Run Rottnest (west coast) coords
Long <- 115
Lat <- 33
AllYears <- 2013:1931
system.time(source("SST_Data.R"))
write.csv(BigMat, file = "SST_Data_Rotto2.csv")
# Run Rottnest (west coast) coords
Long <- 114
Lat <- 33
AllYears <- 2013:1931
system.time(source("SST_Data.R"))
write.csv(BigMat, file = "SST_Data_Rotto3.csv")
# Run Rottnest (west coast) coords
Long <- 116
Lat <- 33
AllYears <- 2013:1931
system.time(source("SST_Data.R"))
write.csv(BigMat, file = "SST_Data_Rotto4.csv")
# South Coast
# Run Augusta (south coast) coords
Long <- 114
Lat <- 34
AllYears <- 2013:1931
system.time(source("SST_Data.R"))
write.csv(BigMat, file = "SST_Data_Aug.csv")
# Run Augusta (south coast) coords
Long <- 115
Lat <- 34
AllYears <- 2013:1931
system.time(source("SST_Data.R"))
write.csv(BigMat, file = "SST_Data_Aug1.csv")
# Run Augusta (south coast) coords
Long <- 116
Lat <- 35
AllYears <- 2013:1931
system.time(source("SST_Data.R"))
write.csv(BigMat, file = "SST_Data_Aug2.csv")
# Run Albany (south coast) coords
Long <- 118
Lat <- 36
AllYears <- 2013:1931
system.time(source("SST_Data.R"))
write.csv(BigMat, file = "SST_Data_Albany.csv")
# Run Albany east 1 (south coast) coords
Long <- 119
Lat <- 35
AllYears <- 2013:1931
system.time(source("SST_Data.R"))
write.csv(BigMat, file = "SST_Data_Albany1.csv")
# Run Esperance (south coast) coords
Long <- 120
Lat <- 35
AllYears <- 2013:1931
system.time(source("SST_Data.R"))
write.csv(BigMat, file = "SST_Data_Esp.csv")
# Run Esperance east 1 (south coast) coords
Long <- 121
Lat <- 35
AllYears <- 2013:1931
system.time(source("SST_Data.R"))
write.csv(BigMat, file = "SST_Data_Esp1.csv") | /6. Environmental Data Collection/RunSource_SST.R | no_license | PachoAlvarez/Foxfish_chronology | R | false | false | 2,024 | r | ##### Sea Surface Temperature Data
# West Coast
# Run Rottnest (west coast) coords
Long <- 116
Lat <- 32
AllYears <- 2013:1931
system.time(source("SST_Data.R"))
write.csv(BigMat, file = "SST_Data_Rotto.csv")
# Run Rottnest (west coast) coords
Long <- 115
Lat <- 32
AllYears <- 2013:1931
system.time(source("SST_Data.R"))
write.csv(BigMat, file = "SST_Data_Rotto1.csv")
# Run Rottnest (west coast) coords
Long <- 115
Lat <- 33
AllYears <- 2013:1931
system.time(source("SST_Data.R"))
write.csv(BigMat, file = "SST_Data_Rotto2.csv")
# Run Rottnest (west coast) coords
Long <- 114
Lat <- 33
AllYears <- 2013:1931
system.time(source("SST_Data.R"))
write.csv(BigMat, file = "SST_Data_Rotto3.csv")
# Run Rottnest (west coast) coords
Long <- 116
Lat <- 33
AllYears <- 2013:1931
system.time(source("SST_Data.R"))
write.csv(BigMat, file = "SST_Data_Rotto4.csv")
# South Coast
# Run Augusta (south coast) coords
Long <- 114
Lat <- 34
AllYears <- 2013:1931
system.time(source("SST_Data.R"))
write.csv(BigMat, file = "SST_Data_Aug.csv")
# Run Augusta (south coast) coords
Long <- 115
Lat <- 34
AllYears <- 2013:1931
system.time(source("SST_Data.R"))
write.csv(BigMat, file = "SST_Data_Aug1.csv")
# Run Augusta (south coast) coords
Long <- 116
Lat <- 35
AllYears <- 2013:1931
system.time(source("SST_Data.R"))
write.csv(BigMat, file = "SST_Data_Aug2.csv")
# Run Albany (south coast) coords
Long <- 118
Lat <- 36
AllYears <- 2013:1931
system.time(source("SST_Data.R"))
write.csv(BigMat, file = "SST_Data_Albany.csv")
# Run Albany east 1 (south coast) coords
Long <- 119
Lat <- 35
AllYears <- 2013:1931
system.time(source("SST_Data.R"))
write.csv(BigMat, file = "SST_Data_Albany1.csv")
# Run Esperance (south coast) coords
Long <- 120
Lat <- 35
AllYears <- 2013:1931
system.time(source("SST_Data.R"))
write.csv(BigMat, file = "SST_Data_Esp.csv")
# Run Esperance east 1 (south coast) coords
Long <- 121
Lat <- 35
AllYears <- 2013:1931
system.time(source("SST_Data.R"))
write.csv(BigMat, file = "SST_Data_Esp1.csv") |
library(shiny)
library(shinythemes)
library(tidyverse)
library(DT)
library(ggrepel)
library(maps)
library(viridis)
##########################################################################
############################## import data ###############################
##########################################################################
mental <- read_csv("Ayo ShinyApp/map_mental_health.csv")
health <- read_csv("Ayo ShinyApp/map_health.csv")
provider <- read_csv("Ayo ShinyApp/map_provider.csv")
covid <- read_csv("Ayo ShinyApp/map_covid.csv")
##########################################################################
######## define choice values and labels for widgets (user inputs) #######
##########################################################################
# for TAB 1 (Spatial) widgets:
# for selectInput, 'choices' object should be a NAMED LIST
race_choice_values <- c("all_adults", "black", "white", "asian")
race_choice_names <- c("All Adults", "Black", "White", "Asian")
names(race_choice_values) <- race_choice_names
#for selectInput state
state_choices <- unique(usa_mental_health$state)
##########################################################################
################################ ui ################################
##########################################################################
ui <- navbarPage(
title="Racial Health Inequities",
tabPanel(
title = "Mental Health",
sidebarLayout(
sidebarPanel(
selectizeInput(inputId = "racevar"
, label = "Choose a racial group:"
, choices = race_choice_values
, selected = "all_adults")
),
mainPanel(
plotOutput(outputId = "mental")
)
)
),
tabPanel(
title = "Overall Health",
sidebarLayout(
sidebarPanel(
selectizeInput(inputId = "racevar1"
, label = "Choose a racial group:"
, choices = race_choice_values
, selected = "all_adults")
),
mainPanel(
plotOutput(outputId = "health")
)
)
),
tabPanel(
title = "Provider Access",
sidebarLayout(
sidebarPanel(
selectizeInput(inputId = "racevar2"
, label = "Choose a racial group:"
, choices = race_choice_values
, selected = "all_adults")
),
mainPanel(
plotOutput(outputId = "provider")
)
)
),
tabPanel(
title = "COVID",
sidebarLayout(
sidebarPanel(
selectizeInput(inputId = "racevar3"
, label = "Choose a racial group:"
, choices = race_choice_values
, selected = "all_adults")
),
mainPanel(
plotOutput(outputId = "covid")
)
)
)
)
##########################################################################
################################ server ################################
##########################################################################
server <- function(input,output){
# TAB Mental Health #:
output$mental <- renderPlot({
ggplot(data = mental
, aes_string(x = "long"
, y = "lat"
, group = "group"
, fill = input$racevar)) +
geom_polygon(colour = "grey") +
scale_fill_viridis(option = "D", direction = 1) +
theme_void() +
coord_fixed(ratio = 1.3)+
labs(x = race_choice_names[race_choice_values == input$racevar],
fill = "Average Number of Poor Mental Health Days Reported \n in the last 30 days",
caption = "* States with 0 days reported may have missing data",
title = "Looking at Mental Health by Racial Group",
subtitle= "in the United States in 2019")
})
# TAB Overall health #:
output$health <- renderPlot({
ggplot(data = health
, aes_string(x = "long"
, y = "lat"
, group = "group"
, fill = input$racevar1)) +
geom_polygon(colour = "grey") +
scale_fill_viridis(option = "D", direction = 1) +
theme_void() +
coord_fixed(ratio = 1.3)+
labs(x = race_choice_names[race_choice_values == input$racevar1],
fill = "Number of Fair or Poor Health Days",
caption = "* States with 0 days reported may have missing data",
title = "Adults Who Report Fair/Poor Mental Health Days",
subtitle= "in the United States in 2019")
})
# TAB provider #:
output$provider <- renderPlot({
ggplot(data = provider
, aes_string(x = "long"
, y = "lat"
, group = "group"
, fill = input$racevar2)) +
geom_polygon(colour = "grey") +
scale_fill_viridis(option = "D", direction = 1) +
theme_void() +
coord_fixed(ratio = 1.3)+
labs(x = race_choice_names[race_choice_values == input$racevar2],
fill = "Percentage of Adults",
caption = "* States with 0 adults reported may have missing data",
title = "Adults Who Report Not Seeing a Doctor n\ in the Past 12 Months Because of Cost",
subtitle= "In the United States in 2019")
})
# TAB COVID #:
output$covid <- renderPlot({
ggplot(data = covid
, aes_string(x = "long"
, y = "lat"
, group = "group"
, fill = input$racevar3)) +
geom_polygon(colour = "grey") +
scale_fill_viridis(option = "D", direction = 1) +
theme_void() +
coord_fixed(ratio = 1.3)+
labs(x = race_choice_names[race_choice_values == input$racevar3],
fill = "Number of COVID Deaths",
caption = "* States with 0 days reported may have missing data",
title = "Number of COVID Deaths",
subtitle= "in the United States as of May 2021")
})
}
##########################################################################
############################ call to shinyApp ############################
##########################################################################
shinyApp(ui = ui, server = server)
| /Ayo ShinyApp/App_ayo.R | no_license | stat231-s21/Blog-VIVACIOUS-VAXXERS | R | false | false | 6,411 | r | library(shiny)
library(shinythemes)
library(tidyverse)
library(DT)
library(ggrepel)
library(maps)
library(viridis)
##########################################################################
############################## import data ###############################
##########################################################################
mental <- read_csv("Ayo ShinyApp/map_mental_health.csv")
health <- read_csv("Ayo ShinyApp/map_health.csv")
provider <- read_csv("Ayo ShinyApp/map_provider.csv")
covid <- read_csv("Ayo ShinyApp/map_covid.csv")
##########################################################################
######## define choice values and labels for widgets (user inputs) #######
##########################################################################
# for TAB 1 (Spatial) widgets:
# for selectInput, 'choices' object should be a NAMED LIST
race_choice_values <- c("all_adults", "black", "white", "asian")
race_choice_names <- c("All Adults", "Black", "White", "Asian")
names(race_choice_values) <- race_choice_names
#for selectInput state
state_choices <- unique(usa_mental_health$state)
##########################################################################
################################ ui ################################
##########################################################################
ui <- navbarPage(
title="Racial Health Inequities",
tabPanel(
title = "Mental Health",
sidebarLayout(
sidebarPanel(
selectizeInput(inputId = "racevar"
, label = "Choose a racial group:"
, choices = race_choice_values
, selected = "all_adults")
),
mainPanel(
plotOutput(outputId = "mental")
)
)
),
tabPanel(
title = "Overall Health",
sidebarLayout(
sidebarPanel(
selectizeInput(inputId = "racevar1"
, label = "Choose a racial group:"
, choices = race_choice_values
, selected = "all_adults")
),
mainPanel(
plotOutput(outputId = "health")
)
)
),
tabPanel(
title = "Provider Access",
sidebarLayout(
sidebarPanel(
selectizeInput(inputId = "racevar2"
, label = "Choose a racial group:"
, choices = race_choice_values
, selected = "all_adults")
),
mainPanel(
plotOutput(outputId = "provider")
)
)
),
tabPanel(
title = "COVID",
sidebarLayout(
sidebarPanel(
selectizeInput(inputId = "racevar3"
, label = "Choose a racial group:"
, choices = race_choice_values
, selected = "all_adults")
),
mainPanel(
plotOutput(outputId = "covid")
)
)
)
)
##########################################################################
################################ server ################################
##########################################################################
server <- function(input,output){
# TAB Mental Health #:
output$mental <- renderPlot({
ggplot(data = mental
, aes_string(x = "long"
, y = "lat"
, group = "group"
, fill = input$racevar)) +
geom_polygon(colour = "grey") +
scale_fill_viridis(option = "D", direction = 1) +
theme_void() +
coord_fixed(ratio = 1.3)+
labs(x = race_choice_names[race_choice_values == input$racevar],
fill = "Average Number of Poor Mental Health Days Reported \n in the last 30 days",
caption = "* States with 0 days reported may have missing data",
title = "Looking at Mental Health by Racial Group",
subtitle= "in the United States in 2019")
})
# TAB Overall health #:
output$health <- renderPlot({
ggplot(data = health
, aes_string(x = "long"
, y = "lat"
, group = "group"
, fill = input$racevar1)) +
geom_polygon(colour = "grey") +
scale_fill_viridis(option = "D", direction = 1) +
theme_void() +
coord_fixed(ratio = 1.3)+
labs(x = race_choice_names[race_choice_values == input$racevar1],
fill = "Number of Fair or Poor Health Days",
caption = "* States with 0 days reported may have missing data",
title = "Adults Who Report Fair/Poor Mental Health Days",
subtitle= "in the United States in 2019")
})
# TAB provider #:
output$provider <- renderPlot({
ggplot(data = provider
, aes_string(x = "long"
, y = "lat"
, group = "group"
, fill = input$racevar2)) +
geom_polygon(colour = "grey") +
scale_fill_viridis(option = "D", direction = 1) +
theme_void() +
coord_fixed(ratio = 1.3)+
labs(x = race_choice_names[race_choice_values == input$racevar2],
fill = "Percentage of Adults",
caption = "* States with 0 adults reported may have missing data",
title = "Adults Who Report Not Seeing a Doctor n\ in the Past 12 Months Because of Cost",
subtitle= "In the United States in 2019")
})
# TAB COVID #:
output$covid <- renderPlot({
ggplot(data = covid
, aes_string(x = "long"
, y = "lat"
, group = "group"
, fill = input$racevar3)) +
geom_polygon(colour = "grey") +
scale_fill_viridis(option = "D", direction = 1) +
theme_void() +
coord_fixed(ratio = 1.3)+
labs(x = race_choice_names[race_choice_values == input$racevar3],
fill = "Number of COVID Deaths",
caption = "* States with 0 days reported may have missing data",
title = "Number of COVID Deaths",
subtitle= "in the United States as of May 2021")
})
}
##########################################################################
############################ call to shinyApp ############################
##########################################################################
shinyApp(ui = ui, server = server)
|
#'## Install packages needed for epigenomics tutorial
#' This should be run before the workshop
#' because of the need to download large datasets.
#'
#'## It is important that you have already updated R
#'you should be running version 3.5.0
R.version$version.string
#' this is because many packages update and change to fix bugs and add new features.
#' Local library
.libPaths("C:/EBC3/Rpackages")
#' Please consider updating your packages
#' this step requires agreeing for each update (y for yes)
update.packages(lib.loc="C:/EBC3/Rpackages")
#'# Installation of new packages
#' vector of packages we will need if not yet installed:
methpackagesCRAN <- c("Rcpp","openssl","CpGassoc", "rmarkdown", "knitr", "matrixStats","reshape","glmnet","statmod","XML",
"pryr", "data.table", "qqman", "RPMM", "MASS", "sandwich", "lmtest","foreach", "stringi","doParallel","magrittr","purrr")
methpackagesBioC <- c("minfi", "FlowSorted.Blood.450k", "missMethyl", "ENmix","IlluminaHumanMethylation450kanno.ilmn12.hg19",
"IlluminaHumanMethylation450kmanifest", "IlluminaHumanMethylationEPICmanifest",
"sva", "IlluminaHumanMethylationEPICanno.ilm10b2.hg19",
"DMRcate", "shinyMethyl","bumphunter","wateRmelon","FDb.InfiniumMethylation.hg19")
#' install these from CRAN:
toinstallCRAN <- setdiff(methpackagesCRAN, installed.packages()[,1])
if(length(toinstallCRAN >= 1)) {
install.packages(toinstallCRAN,dependencies=TRUE, lib="C:/EBC3/Rpackages")
cat("finished installing new packages from CRAN\n")
} else cat("packages we need from CRAN are already installed\n")
#' install these from BioConductor:
toinstallBioC <- setdiff(methpackagesBioC, installed.packages()[,1])
if(length(toinstallBioC >= 1)) {
source("https://bioconductor.org/biocLite.R")
biocLite(toinstallBioC, suppressUpdates = T,lib="C:/EBC3/Rpackages")
cat("finished installing new packages from BioConductor\n")
} else cat("packages we need from BioConductor are already installed\n")
devtools::install_github("hhhh5/ewastools@v1.5")
#' check that we were successful
if(!all(c(toinstallBioC, toinstallCRAN) %in% installed.packages()[,1])) stop(
"required packages not installed - please retry script carefully making sure you have already updated R and work through any error messages")
if(!as.numeric(sub("\\.[0-9]$", "", installed.packages()["minfi","Version"])) >= 1.24) stop(
"you don't have the minfi version needed for this workshop")
#' Session Information
#' If you cannot successfully work through this script, please run the following two commands
#' and send the output to the workshop organizers with your request for help:
#sessionInfo()
#installed.packages()[,c("Package", "Version")]
#' cleanup
rm(methpackagesCRAN, methpackagesBioC, toinstallCRAN, toinstallBioC)
#' End of script
| /code/install_packages.R | permissive | wangdi2014/methylation-lab | R | false | false | 2,874 | r | #'## Install packages needed for epigenomics tutorial
#' This should be run before the workshop
#' because of the need to download large datasets.
#'
#'## It is important that you have already updated R
#'you should be running version 3.5.0
R.version$version.string
#' this is because many packages update and change to fix bugs and add new features.
#' Local library
.libPaths("C:/EBC3/Rpackages")
#' Please consider updating your packages
#' this step requires agreeing for each update (y for yes)
update.packages(lib.loc="C:/EBC3/Rpackages")
#'# Installation of new packages
#' vector of packages we will need if not yet installed:
methpackagesCRAN <- c("Rcpp","openssl","CpGassoc", "rmarkdown", "knitr", "matrixStats","reshape","glmnet","statmod","XML",
"pryr", "data.table", "qqman", "RPMM", "MASS", "sandwich", "lmtest","foreach", "stringi","doParallel","magrittr","purrr")
methpackagesBioC <- c("minfi", "FlowSorted.Blood.450k", "missMethyl", "ENmix","IlluminaHumanMethylation450kanno.ilmn12.hg19",
"IlluminaHumanMethylation450kmanifest", "IlluminaHumanMethylationEPICmanifest",
"sva", "IlluminaHumanMethylationEPICanno.ilm10b2.hg19",
"DMRcate", "shinyMethyl","bumphunter","wateRmelon","FDb.InfiniumMethylation.hg19")
#' install these from CRAN:
toinstallCRAN <- setdiff(methpackagesCRAN, installed.packages()[,1])
if(length(toinstallCRAN >= 1)) {
install.packages(toinstallCRAN,dependencies=TRUE, lib="C:/EBC3/Rpackages")
cat("finished installing new packages from CRAN\n")
} else cat("packages we need from CRAN are already installed\n")
#' install these from BioConductor:
toinstallBioC <- setdiff(methpackagesBioC, installed.packages()[,1])
if(length(toinstallBioC >= 1)) {
source("https://bioconductor.org/biocLite.R")
biocLite(toinstallBioC, suppressUpdates = T,lib="C:/EBC3/Rpackages")
cat("finished installing new packages from BioConductor\n")
} else cat("packages we need from BioConductor are already installed\n")
devtools::install_github("hhhh5/ewastools@v1.5")
#' check that we were successful
if(!all(c(toinstallBioC, toinstallCRAN) %in% installed.packages()[,1])) stop(
"required packages not installed - please retry script carefully making sure you have already updated R and work through any error messages")
if(!as.numeric(sub("\\.[0-9]$", "", installed.packages()["minfi","Version"])) >= 1.24) stop(
"you don't have the minfi version needed for this workshop")
#' Session Information
#' If you cannot successfully work through this script, please run the following two commands
#' and send the output to the workshop organizers with your request for help:
#sessionInfo()
#installed.packages()[,c("Package", "Version")]
#' cleanup
rm(methpackagesCRAN, methpackagesBioC, toinstallCRAN, toinstallBioC)
#' End of script
|
library(tidyverse)
library(lubridate)
library(readxl)
library(tidyxl)
library(unpivotr)
library(janitor)
library(here)
#############################
# Figure out what's in the file
#############################
path <- here::here('data', 'raw_original', 'PAD.xlsx')
#########################
# read_excel plus other strategies
sheetnames <- tolower(excel_sheets(path))
sheetnames <- gsub(' ', '_', sheetnames)
# manually figuring out the last column of the main data table in each sheet:
lastcols <- c('M', 'F', 'P', 'M', 'P',
'M', 'P', 'P', 'P', 'M',
'P', 'P', 'Q', 'P', 'P',
'L', 'L', 'L', 'L', 'H',
'J', 'J', 'K', 'F')
# make sure that's the same length as the sheet names
length(sheetnames) == length(lastcols)
# generate data ranges for reading in sheets
ranges <- paste0('A1:', lastcols, '37')
#############################################
# read in all the sheets
#############################################
# set up the list
dat <- list()
# read the sheets in
for(i in seq_along(sheetnames)){
dat_in <- read_excel(path, sheet = i, range = ranges[i])
dat_tidied <- dat_in %>%
rename(arm_position = 'arm position',
pin_number = pin) %>%
gather(key = date, value = pin_height, -arm_position, -pin_number) %>%
mutate(date = excel_numeric_to_date(as.numeric(date)),
reserve = 'PDB',
set_id = sheetnames[i]) %>%
select(reserve, set_id, date, arm_position, pin_number, pin_height) %>%
arrange(set_id, date, arm_position, pin_number)
dat[[i]] <- dat_tidied
}
############################################
# glue everything together into one big list
############################################
dat_all <- reshape::merge_recurse(dat)
## if pin height isn't a number, make it an na (there are some comments)
dat_all <- dat_all %>%
mutate(pin_height_cm = as.numeric(pin_height)) %>% # introduces NAs and that's okay
select(-pin_height)
# spit it back out
path_out <- here('data', 'intermediate', 'PDB.csv')
write_csv(dat_all, path_out)
| /R/001a_data_reshaping-PDB.R | no_license | swmpkim/SETr_script_development | R | false | false | 2,116 | r | library(tidyverse)
library(lubridate)
library(readxl)
library(tidyxl)
library(unpivotr)
library(janitor)
library(here)
#############################
# Figure out what's in the file
#############################
path <- here::here('data', 'raw_original', 'PAD.xlsx')
#########################
# read_excel plus other strategies
sheetnames <- tolower(excel_sheets(path))
sheetnames <- gsub(' ', '_', sheetnames)
# manually figuring out the last column of the main data table in each sheet:
lastcols <- c('M', 'F', 'P', 'M', 'P',
'M', 'P', 'P', 'P', 'M',
'P', 'P', 'Q', 'P', 'P',
'L', 'L', 'L', 'L', 'H',
'J', 'J', 'K', 'F')
# make sure that's the same length as the sheet names
length(sheetnames) == length(lastcols)
# generate data ranges for reading in sheets
ranges <- paste0('A1:', lastcols, '37')
#############################################
# read in all the sheets
#############################################
# set up the list
dat <- list()
# read the sheets in
for(i in seq_along(sheetnames)){
dat_in <- read_excel(path, sheet = i, range = ranges[i])
dat_tidied <- dat_in %>%
rename(arm_position = 'arm position',
pin_number = pin) %>%
gather(key = date, value = pin_height, -arm_position, -pin_number) %>%
mutate(date = excel_numeric_to_date(as.numeric(date)),
reserve = 'PDB',
set_id = sheetnames[i]) %>%
select(reserve, set_id, date, arm_position, pin_number, pin_height) %>%
arrange(set_id, date, arm_position, pin_number)
dat[[i]] <- dat_tidied
}
############################################
# glue everything together into one big list
############################################
dat_all <- reshape::merge_recurse(dat)
## if pin height isn't a number, make it an na (there are some comments)
dat_all <- dat_all %>%
mutate(pin_height_cm = as.numeric(pin_height)) %>% # introduces NAs and that's okay
select(-pin_height)
# spit it back out
path_out <- here('data', 'intermediate', 'PDB.csv')
write_csv(dat_all, path_out)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fetchNASISWebReport.R
\name{fetchNASISWebReport}
\alias{fetchNASISWebReport}
\alias{get_project_from_NASISWebReport}
\alias{get_progress_from_NASISWebReport}
\alias{get_project_correlation_from_NASISWebReport}
\alias{get_legend_from_NASISWebReport}
\alias{get_mapunit_from_NASISWebReport}
\alias{get_projectmapunit_from_NASISWebReport}
\alias{get_projectmapunit2_from_NASISWebReport}
\alias{get_component_from_NASISWebReport}
\alias{get_chorizon_from_NASISWebReport}
\alias{get_cosoilmoist_from_NASISWebReport}
\alias{get_sitesoilmoist_from_NASISWebReport}
\alias{get_lmuaoverlap_from_NASISWebReport}
\title{Get component tables from NASIS Web Reports}
\usage{
fetchNASISWebReport(
projectname,
rmHzErrors = FALSE,
fill = FALSE,
stringsAsFactors = default.stringsAsFactors()
)
get_component_from_NASISWebReport(
projectname,
stringsAsFactors = default.stringsAsFactors()
)
get_chorizon_from_NASISWebReport(
projectname,
fill = FALSE,
stringsAsFactors = default.stringsAsFactors()
)
get_legend_from_NASISWebReport(
mlraoffice,
areasymbol,
droplevels = TRUE,
stringsAsFactors = default.stringsAsFactors()
)
get_lmuaoverlap_from_NASISWebReport(
areasymbol,
droplevels = TRUE,
stringsAsFactors = default.stringsAsFactors()
)
get_mapunit_from_NASISWebReport(
areasymbol,
droplevels = TRUE,
stringsAsFactors = default.stringsAsFactors()
)
get_projectmapunit_from_NASISWebReport(
projectname,
stringsAsFactors = default.stringsAsFactors()
)
get_projectmapunit2_from_NASISWebReport(
mlrassoarea,
fiscalyear,
projectname,
stringsAsFactors = default.stringsAsFactors()
)
get_project_from_NASISWebReport(mlrassoarea, fiscalyear)
get_progress_from_NASISWebReport(mlrassoarea, fiscalyear, projecttypename)
get_project_correlation_from_NASISWebReport(
mlrassoarea,
fiscalyear,
projectname
)
}
\arguments{
\item{projectname}{text string vector of project names to be inserted into a
SQL WHERE clause (default: \code{NA})}
\item{rmHzErrors}{should pedons with horizonation errors be removed from the
results? (default: \code{FALSE})}
\item{fill}{should rows with missing component ids be removed (default: \code{FALSE})}
\item{stringsAsFactors}{logical: should character vectors be converted to
factors? This argument is passed to the \code{uncode()} function. It does not
convert those vectors that have been set outside of \code{uncode()} (i.e. hard
coded). The 'factory-fresh' default is TRUE, but this can be changed by
setting options(\code{stringsAsFactors = FALSE})}
\item{mlraoffice}{text string value identifying the MLRA Regional Soil
Survey Office group name inserted into a SQL WHERE clause (default: \code{NA})}
\item{areasymbol}{text string value identifying the area symbol (e.g.
\code{IN001} or \verb{IN\%}) inserted into a SQL WHERE clause (default: \code{NA})
\code{NULL} (default: \code{TRUE})}
\item{droplevels}{logical: indicating whether to drop unused levels in
classifying factors. This is useful when a class has large number of unused
classes, which can waste space in tables and figures.}
\item{mlrassoarea}{text string value identifying the MLRA Soil Survey Office
areasymbol symbol inserted into a SQL WHERE clause (default: \code{NA})}
\item{fiscalyear}{text string value identifying the fiscal year inserted
into a SQL WHERE clause (default: \code{NA})}
\item{projecttypename}{text string value identifying the project type name
inserted into a SQL WHERE clause (default: \code{NA})}
}
\value{
A data.frame or list with the results.
}
\description{
Get component tables from NASIS Web Reports
}
\examples{
\donttest{
if (requireNamespace("curl") &
curl::has_internet() &
require("aqp") &
require("ggplot2") &
require("gridExtra")
) {
# query soil components by projectname
test = fetchNASISWebReport(
"EVAL - MLRA 111A - Ross silt loam, 0 to 2 percent slopes, frequently flooded",
stringsAsFactors = TRUE)
test = test$spc
# profile plot
plot(test)
# convert the data for depth plot
clay_slice = horizons(slice(test, 0:200 ~ claytotal_l + claytotal_r + claytotal_h))
names(clay_slice) <- gsub("claytotal_", "", names(clay_slice))
om_slice = horizons(slice(test, 0:200 ~ om_l + om_r + om_h))
names(om_slice) = gsub("om_", "", names(om_slice))
test2 = rbind(data.frame(clay_slice, var = "clay"),
data.frame(om_slice, var = "om")
)
h = merge(test2, site(test)[c("dmuiid", "coiid", "compname", "comppct_r")],
by = "coiid",
all.x = TRUE
)
# depth plot of clay content by soil component
gg_comp <- function(x) {
ggplot(x) +
geom_line(aes(y = r, x = hzdept_r)) +
geom_line(aes(y = r, x = hzdept_r)) +
geom_ribbon(aes(ymin = l, ymax = h, x = hzdept_r), alpha = 0.2) +
xlim(200, 0) +
xlab("depth (cm)") +
facet_grid(var ~ dmuiid + paste(compname, comppct_r)) +
coord_flip()
}
g1 <- gg_comp(subset(h, var == "clay"))
g2 <- gg_comp(subset(h, var == "om"))
grid.arrange(g1, g2)
# query cosoilmoist (e.g. water table data) by mukey
# NA depths are interpreted as (???) with impute=TRUE argument
x <- get_cosoilmoist_from_NASISWebReport(
"EVAL - MLRA 111A - Ross silt loam, 0 to 2 percent slopes, frequently flooded",
stringsAsFactors = TRUE)
ggplot(x, aes(x = as.integer(month), y = dept_r, lty = status)) +
geom_rect(aes(xmin = as.integer(month), xmax = as.integer(month) + 1,
ymin = 0, ymax = max(x$depb_r),
fill = flodfreqcl)) +
geom_line(cex = 1) +
geom_point() +
geom_ribbon(aes(ymin = dept_l, ymax = dept_h), alpha = 0.2) +
ylim(max(x$depb_r), 0) +
xlab("month") + ylab("depth (cm)") +
scale_x_continuous(breaks = 1:12, labels = month.abb, name="Month") +
facet_wrap(~ paste0(compname, ' (', comppct_r , ')')) +
ggtitle(paste0(x$nationalmusym[1],
': Water Table Levels from Component Soil Moisture Month Data'))
}
}
}
\author{
Stephen Roecker
}
\keyword{manip}
| /man/fetchNASISWebReport.Rd | no_license | Emory-ENVS-SihiLab/soilDB | R | false | true | 6,096 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fetchNASISWebReport.R
\name{fetchNASISWebReport}
\alias{fetchNASISWebReport}
\alias{get_project_from_NASISWebReport}
\alias{get_progress_from_NASISWebReport}
\alias{get_project_correlation_from_NASISWebReport}
\alias{get_legend_from_NASISWebReport}
\alias{get_mapunit_from_NASISWebReport}
\alias{get_projectmapunit_from_NASISWebReport}
\alias{get_projectmapunit2_from_NASISWebReport}
\alias{get_component_from_NASISWebReport}
\alias{get_chorizon_from_NASISWebReport}
\alias{get_cosoilmoist_from_NASISWebReport}
\alias{get_sitesoilmoist_from_NASISWebReport}
\alias{get_lmuaoverlap_from_NASISWebReport}
\title{Get component tables from NASIS Web Reports}
\usage{
fetchNASISWebReport(
projectname,
rmHzErrors = FALSE,
fill = FALSE,
stringsAsFactors = default.stringsAsFactors()
)
get_component_from_NASISWebReport(
projectname,
stringsAsFactors = default.stringsAsFactors()
)
get_chorizon_from_NASISWebReport(
projectname,
fill = FALSE,
stringsAsFactors = default.stringsAsFactors()
)
get_legend_from_NASISWebReport(
mlraoffice,
areasymbol,
droplevels = TRUE,
stringsAsFactors = default.stringsAsFactors()
)
get_lmuaoverlap_from_NASISWebReport(
areasymbol,
droplevels = TRUE,
stringsAsFactors = default.stringsAsFactors()
)
get_mapunit_from_NASISWebReport(
areasymbol,
droplevels = TRUE,
stringsAsFactors = default.stringsAsFactors()
)
get_projectmapunit_from_NASISWebReport(
projectname,
stringsAsFactors = default.stringsAsFactors()
)
get_projectmapunit2_from_NASISWebReport(
mlrassoarea,
fiscalyear,
projectname,
stringsAsFactors = default.stringsAsFactors()
)
get_project_from_NASISWebReport(mlrassoarea, fiscalyear)
get_progress_from_NASISWebReport(mlrassoarea, fiscalyear, projecttypename)
get_project_correlation_from_NASISWebReport(
mlrassoarea,
fiscalyear,
projectname
)
}
\arguments{
\item{projectname}{text string vector of project names to be inserted into a
SQL WHERE clause (default: \code{NA})}
\item{rmHzErrors}{should pedons with horizonation errors be removed from the
results? (default: \code{FALSE})}
\item{fill}{should rows with missing component ids be removed (default: \code{FALSE})}
\item{stringsAsFactors}{logical: should character vectors be converted to
factors? This argument is passed to the \code{uncode()} function. It does not
convert those vectors that have been set outside of \code{uncode()} (i.e. hard
coded). The 'factory-fresh' default is TRUE, but this can be changed by
setting options(\code{stringsAsFactors = FALSE})}
\item{mlraoffice}{text string value identifying the MLRA Regional Soil
Survey Office group name inserted into a SQL WHERE clause (default: \code{NA})}
\item{areasymbol}{text string value identifying the area symbol (e.g.
\code{IN001} or \verb{IN\%}) inserted into a SQL WHERE clause (default: \code{NA})
\code{NULL} (default: \code{TRUE})}
\item{droplevels}{logical: indicating whether to drop unused levels in
classifying factors. This is useful when a class has large number of unused
classes, which can waste space in tables and figures.}
\item{mlrassoarea}{text string value identifying the MLRA Soil Survey Office
areasymbol symbol inserted into a SQL WHERE clause (default: \code{NA})}
\item{fiscalyear}{text string value identifying the fiscal year inserted
into a SQL WHERE clause (default: \code{NA})}
\item{projecttypename}{text string value identifying the project type name
inserted into a SQL WHERE clause (default: \code{NA})}
}
\value{
A data.frame or list with the results.
}
\description{
Get component tables from NASIS Web Reports
}
\examples{
\donttest{
if (requireNamespace("curl") &
curl::has_internet() &
require("aqp") &
require("ggplot2") &
require("gridExtra")
) {
# query soil components by projectname
test = fetchNASISWebReport(
"EVAL - MLRA 111A - Ross silt loam, 0 to 2 percent slopes, frequently flooded",
stringsAsFactors = TRUE)
test = test$spc
# profile plot
plot(test)
# convert the data for depth plot
clay_slice = horizons(slice(test, 0:200 ~ claytotal_l + claytotal_r + claytotal_h))
names(clay_slice) <- gsub("claytotal_", "", names(clay_slice))
om_slice = horizons(slice(test, 0:200 ~ om_l + om_r + om_h))
names(om_slice) = gsub("om_", "", names(om_slice))
test2 = rbind(data.frame(clay_slice, var = "clay"),
data.frame(om_slice, var = "om")
)
h = merge(test2, site(test)[c("dmuiid", "coiid", "compname", "comppct_r")],
by = "coiid",
all.x = TRUE
)
# depth plot of clay content by soil component
gg_comp <- function(x) {
ggplot(x) +
geom_line(aes(y = r, x = hzdept_r)) +
geom_line(aes(y = r, x = hzdept_r)) +
geom_ribbon(aes(ymin = l, ymax = h, x = hzdept_r), alpha = 0.2) +
xlim(200, 0) +
xlab("depth (cm)") +
facet_grid(var ~ dmuiid + paste(compname, comppct_r)) +
coord_flip()
}
g1 <- gg_comp(subset(h, var == "clay"))
g2 <- gg_comp(subset(h, var == "om"))
grid.arrange(g1, g2)
# query cosoilmoist (e.g. water table data) by mukey
# NA depths are interpreted as (???) with impute=TRUE argument
x <- get_cosoilmoist_from_NASISWebReport(
"EVAL - MLRA 111A - Ross silt loam, 0 to 2 percent slopes, frequently flooded",
stringsAsFactors = TRUE)
ggplot(x, aes(x = as.integer(month), y = dept_r, lty = status)) +
geom_rect(aes(xmin = as.integer(month), xmax = as.integer(month) + 1,
ymin = 0, ymax = max(x$depb_r),
fill = flodfreqcl)) +
geom_line(cex = 1) +
geom_point() +
geom_ribbon(aes(ymin = dept_l, ymax = dept_h), alpha = 0.2) +
ylim(max(x$depb_r), 0) +
xlab("month") + ylab("depth (cm)") +
scale_x_continuous(breaks = 1:12, labels = month.abb, name="Month") +
facet_wrap(~ paste0(compname, ' (', comppct_r , ')')) +
ggtitle(paste0(x$nationalmusym[1],
': Water Table Levels from Component Soil Moisture Month Data'))
}
}
}
\author{
Stephen Roecker
}
\keyword{manip}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/topN.R
\name{topN}
\alias{topN}
\alias{topN_new}
\alias{topN_new.CMF}
\alias{topN_new.CMF_implicit}
\alias{topN_new.ContentBased}
\alias{topN_new.OMF_explicit}
\alias{topN_new.OMF_implicit}
\title{Calulate top-N predictions for a new or existing user}
\usage{
topN(
model,
user = NULL,
n = 10L,
include = NULL,
exclude = NULL,
output_score = FALSE,
nthreads = model$info$nthreads
)
topN_new(model, ...)
\method{topN_new}{CMF}(
model,
X = NULL,
X_col = NULL,
X_val = NULL,
U = NULL,
U_col = NULL,
U_val = NULL,
U_bin = NULL,
weight = NULL,
n = 10L,
include = NULL,
exclude = NULL,
output_score = FALSE,
nthreads = model$info$nthreads,
...
)
\method{topN_new}{CMF_implicit}(
model,
X = NULL,
X_col = NULL,
X_val = NULL,
U = NULL,
U_col = NULL,
U_val = NULL,
n = 10L,
include = NULL,
exclude = NULL,
output_score = FALSE,
nthreads = model$info$nthreads,
...
)
\method{topN_new}{ContentBased}(
model,
U = NULL,
U_col = NULL,
U_val = NULL,
I = NULL,
n = 10L,
include = NULL,
exclude = NULL,
output_score = FALSE,
nthreads = model$info$nthreads,
...
)
\method{topN_new}{OMF_explicit}(
model,
X = NULL,
X_col = NULL,
X_val = NULL,
U = NULL,
U_col = NULL,
U_val = NULL,
weight = NULL,
exact = FALSE,
n = 10L,
include = NULL,
exclude = NULL,
output_score = FALSE,
nthreads = model$info$nthreads,
...
)
\method{topN_new}{OMF_implicit}(
model,
X = NULL,
X_col = NULL,
X_val = NULL,
U = NULL,
U_col = NULL,
U_val = NULL,
n = 10L,
include = NULL,
exclude = NULL,
output_score = FALSE,
nthreads = model$info$nthreads,
...
)
}
\arguments{
\item{model}{A collective matrix factorization model from this package - see
\link{fit_models} for details.}
\item{user}{User (row of `X`) for which to rank items. If `X` to which the model
was fit was a `data.frame`, should pass an ID matching to the first column of `X`
(the user indices), otherwise should pass a row number for `X`, with numeration
starting at 1.
This is optional for the \link{MostPopular} model, but must be passed for all others.
For making recommendations about new users (that were not present in the `X` to
which the model was fit), should use `topN_new` and pass either `X` or `U` data.
For example usage, see the main section \link{fit_models}.}
\item{n}{Number of top-predicted items to output.}
\item{include}{If passing this, will only make a ranking among the item IDs
provided here. See the documentation for `user` for how the IDs should be passed.
This should be an integer or character vector, or alternatively, as a sparse vector
from the `Matrix` package (inheriting from class `sparseVector`),
from which the non-missing entries will be taken as those to include.
Cannot be used together with `exclude`.}
\item{exclude}{If passing this, will exclude from the ranking all the item IDs
provided here. See the documentation for `user` for how the IDs should be passed.
This should be an integer or character vector, or alternatively, as a sparse vector
from the `Matrix` package (inheriting from class `sparseVector`),
from which the non-missing entries will be taken as those to exclude.
Cannot be used together with `include`.}
\item{output_score}{Whether to also output the predicted values, in addition
to the indices of the top-predicted items.}
\item{nthreads}{Number of parallel threads to use.}
\item{...}{Not used.}
\item{X}{`X` data for a new user for which to make recommendations,
either as a numeric vector (class `numeric`), or as
a sparse vector from package `Matrix` (class `dsparseVector`). If the `X` to
which the model was fit was a `data.frame`, the column/item indices will have
been reindexed internally, and the numeration can be found under
`model$info$item_mapping`. Alternatively, can instead pass the column indices
and values and let the model reindex them (see `X_col` and `X_val`).
Should pass at most one of `X` or `X_col`+`X_val`.
Dense `X` data is not supported for `CMF_implicit` or `OMF_implicit`.}
\item{X_col}{`X` data for a new user for which to make recommendations,
in sparse vector format, with `X_col` denoting the
items/columns which are not missing. If the `X` to which the model was fit was
a `data.frame`, here should pass IDs matching to the second column of that `X`,
which will be reindexed internally. Otherwise, should have column indices with
numeration starting at 1 (passed as an integer vector).
Should pass at most one of `X` or `X_col`+`X_val`.}
\item{X_val}{`X` data for a new user for which to make recommendations,
in sparse vector format, with `X_val` denoting the
associated values to each entry in `X_col`
(should be a numeric vector of the same length as `X_col`).
Should pass at most one of `X` or `X_col`+`X_val`.}
\item{U}{`U` data for a new user for which to make recommendations,
either as a numeric vector (class `numeric`), or as a
sparse vector from package `Matrix` (class `dsparseVector`). Alternatively,
if `U` is sparse, can instead pass the indices of the non-missing columns
and their values separately (see `U_col`).
Should pass at most one of `U` or `U_col`+`U_val`.}
\item{U_col}{`U` data for a new user for which to make recommendations,
in sparse vector format, with `U_col` denoting the
attributes/columns which are not missing. Should have numeration starting at 1
(should be an integer vector).
Should pass at most one of `U` or `U_col`+`U_val`.}
\item{U_val}{`U` data for a new user for which to make recommendations,
in sparse vector format, with `U_val` denoting the
associated values to each entry in `U_col`
(should be a numeric vector of the same length as `U_col`).
Should pass at most one of `U` or `U_col`+`U_val`.}
\item{U_bin}{Binary columns of `U` for a new user for which to make recommendations,
on which a sigmoid transformation will be
applied. Should be passed as a numeric vector. Note that `U` and `U_bin` are
not mutually exclusive.}
\item{weight}{(Only for the explicit-feedback models)
Associated weight to each non-missing observation in `X`. Must have the same
number of entries as `X` - that is, if passing a dense vector of length `n`,
`weight` should be a numeric vector of length `n` too, if passing a sparse
vector, should have a lenght corresponding to the number of non-missing elements.}
\item{I}{(Only for the `ContentBased` model)
New `I` data to rank for the given user, with rows denoting new columns of the `X` matrix.
Can be passed in the following formats:\itemize{
\item A sparse COO/triplets matrix, either from package
`Matrix` (class `dgTMatrix`), or from package `SparseM` (class `matrix.coo`).
\item A sparse matrix in CSR format, either from package
`Matrix` (class `dgRMatrix`), or from package `SparseM` (class `matrix.csr`).
Passing the input as CSR is faster than COO as it will be converted internally.
\item A sparse row vector from package `Matrix` (class `dsparseVector`).
\item A dense matrix from base R (class `matrix`), with missing entries set as NA.
\item A dense vector from base R (class `numeric`).
\item A `data.frame`.
}
When passing `I`, the item indices in `include`, `exclude`, and in the resulting
output refer to rows of `I`, and the ranking will be made only among the
rows of `I` (that is, they will not be compared against the old `X` data).}
\item{exact}{(In the `OMF_explicit` model)
Whether to calculate `A` and `Am` with the regularization applied
to `A` instead of to `Am` (if using the L-BFGS method, this is how the model
was fit). This is usually a slower procedure.}
}
\value{
If passing `output_score=FALSE` (the default), will output the
indices of the top-predicted elements. If passing `output_score=TRUE`,
will pass a list with two elements:\itemize{
\item `item`: The indices of the top-predicted elements.
\item `score`: The predicted value for each corresponding element in `item`.
}
If the `X` to which the model was fit was a `data.frame` (and unless passing `I`),
the item indices will be taken from the same IDs in `X` (its second column) - but
be aware that in this case they will usually be returned as `character`.
Otherwise, will return the indices of the top-predicted columns of `X`
(or rows of `I` if passing it) with numeration starting at 1.
}
\description{
Determine top-ranked items for a user according to their predicted
values, among the items to which the model was fit.
Can produce rankings for existing users (which where in the `X` data to which
the model was fit) through function `topN`, or for new users (which were not
in the `X` data to which the model was fit, but for which there is now new
data) through function `topN_new`, assuming there is either `X` data, `U` data,
or both (i.e. can do cold-start and warm-start rankings).
For the \link{CMF} model, depending on parameter `include_all_X`, might recommend
items which had only side information if their predictions are high enough.
For the \link{ContentBased} model, might be used to rank new items (not present
in the `X` or `I` data to which the model was fit) given their
`I` data, for new users given their `U` data. For the other models, will only
rank existing items (columns of the `X` to which the model was fit) - see
\link{predict_new_items} for an alternative for the other models.
\bold{Important:} the model does not keep any copies of the original data, and
as such, it might recommend items that were already seen/rated/consumed by the
user. In order to avoid this, must manually pass the seen/rated/consumed entries
to the argument `exclude` (see details below).
This method produces an exact ranking by computing all item predictions
for a given user. As the number of items grows, this can become a rather
slow operation - for model serving purposes, it's usually a better idea
to obtain an an approximate top-N ranking through software such as
"hnsw" or "Milvus" from the calculated user factors and item factors.
}
\details{
Be aware that this function is multi-threaded. As such, if a large batch
of top-N predictions is to be calculated in parallel for different users
(through e.g. `mclapply` or similar), it's recommended to decrease the number
of threads in the model to 1 (e.g. `model$info$nthreads <- 1L`) and to set the
number of BLAS threads to 1 (through e.g. `RhpcBLASctl` or environment variables).
For better cold-start recommendations with \link{CMF_implicit}, one can also add
item biases by using the `CMF` model with parameters that would mimic `CMF_implicit`
plus the biases.
}
\seealso{
\link{factors_single} \link{predict.cmfrec} \link{predict_new}
}
| /man/topN.Rd | permissive | david-cortes/cmfrec | R | false | true | 10,672 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/topN.R
\name{topN}
\alias{topN}
\alias{topN_new}
\alias{topN_new.CMF}
\alias{topN_new.CMF_implicit}
\alias{topN_new.ContentBased}
\alias{topN_new.OMF_explicit}
\alias{topN_new.OMF_implicit}
\title{Calulate top-N predictions for a new or existing user}
\usage{
topN(
model,
user = NULL,
n = 10L,
include = NULL,
exclude = NULL,
output_score = FALSE,
nthreads = model$info$nthreads
)
topN_new(model, ...)
\method{topN_new}{CMF}(
model,
X = NULL,
X_col = NULL,
X_val = NULL,
U = NULL,
U_col = NULL,
U_val = NULL,
U_bin = NULL,
weight = NULL,
n = 10L,
include = NULL,
exclude = NULL,
output_score = FALSE,
nthreads = model$info$nthreads,
...
)
\method{topN_new}{CMF_implicit}(
model,
X = NULL,
X_col = NULL,
X_val = NULL,
U = NULL,
U_col = NULL,
U_val = NULL,
n = 10L,
include = NULL,
exclude = NULL,
output_score = FALSE,
nthreads = model$info$nthreads,
...
)
\method{topN_new}{ContentBased}(
model,
U = NULL,
U_col = NULL,
U_val = NULL,
I = NULL,
n = 10L,
include = NULL,
exclude = NULL,
output_score = FALSE,
nthreads = model$info$nthreads,
...
)
\method{topN_new}{OMF_explicit}(
model,
X = NULL,
X_col = NULL,
X_val = NULL,
U = NULL,
U_col = NULL,
U_val = NULL,
weight = NULL,
exact = FALSE,
n = 10L,
include = NULL,
exclude = NULL,
output_score = FALSE,
nthreads = model$info$nthreads,
...
)
\method{topN_new}{OMF_implicit}(
model,
X = NULL,
X_col = NULL,
X_val = NULL,
U = NULL,
U_col = NULL,
U_val = NULL,
n = 10L,
include = NULL,
exclude = NULL,
output_score = FALSE,
nthreads = model$info$nthreads,
...
)
}
\arguments{
\item{model}{A collective matrix factorization model from this package - see
\link{fit_models} for details.}
\item{user}{User (row of `X`) for which to rank items. If `X` to which the model
was fit was a `data.frame`, should pass an ID matching to the first column of `X`
(the user indices), otherwise should pass a row number for `X`, with numeration
starting at 1.
This is optional for the \link{MostPopular} model, but must be passed for all others.
For making recommendations about new users (that were not present in the `X` to
which the model was fit), should use `topN_new` and pass either `X` or `U` data.
For example usage, see the main section \link{fit_models}.}
\item{n}{Number of top-predicted items to output.}
\item{include}{If passing this, will only make a ranking among the item IDs
provided here. See the documentation for `user` for how the IDs should be passed.
This should be an integer or character vector, or alternatively, as a sparse vector
from the `Matrix` package (inheriting from class `sparseVector`),
from which the non-missing entries will be taken as those to include.
Cannot be used together with `exclude`.}
\item{exclude}{If passing this, will exclude from the ranking all the item IDs
provided here. See the documentation for `user` for how the IDs should be passed.
This should be an integer or character vector, or alternatively, as a sparse vector
from the `Matrix` package (inheriting from class `sparseVector`),
from which the non-missing entries will be taken as those to exclude.
Cannot be used together with `include`.}
\item{output_score}{Whether to also output the predicted values, in addition
to the indices of the top-predicted items.}
\item{nthreads}{Number of parallel threads to use.}
\item{...}{Not used.}
\item{X}{`X` data for a new user for which to make recommendations,
either as a numeric vector (class `numeric`), or as
a sparse vector from package `Matrix` (class `dsparseVector`). If the `X` to
which the model was fit was a `data.frame`, the column/item indices will have
been reindexed internally, and the numeration can be found under
`model$info$item_mapping`. Alternatively, can instead pass the column indices
and values and let the model reindex them (see `X_col` and `X_val`).
Should pass at most one of `X` or `X_col`+`X_val`.
Dense `X` data is not supported for `CMF_implicit` or `OMF_implicit`.}
\item{X_col}{`X` data for a new user for which to make recommendations,
in sparse vector format, with `X_col` denoting the
items/columns which are not missing. If the `X` to which the model was fit was
a `data.frame`, here should pass IDs matching to the second column of that `X`,
which will be reindexed internally. Otherwise, should have column indices with
numeration starting at 1 (passed as an integer vector).
Should pass at most one of `X` or `X_col`+`X_val`.}
\item{X_val}{`X` data for a new user for which to make recommendations,
in sparse vector format, with `X_val` denoting the
associated values to each entry in `X_col`
(should be a numeric vector of the same length as `X_col`).
Should pass at most one of `X` or `X_col`+`X_val`.}
\item{U}{`U` data for a new user for which to make recommendations,
either as a numeric vector (class `numeric`), or as a
sparse vector from package `Matrix` (class `dsparseVector`). Alternatively,
if `U` is sparse, can instead pass the indices of the non-missing columns
and their values separately (see `U_col`).
Should pass at most one of `U` or `U_col`+`U_val`.}
\item{U_col}{`U` data for a new user for which to make recommendations,
in sparse vector format, with `U_col` denoting the
attributes/columns which are not missing. Should have numeration starting at 1
(should be an integer vector).
Should pass at most one of `U` or `U_col`+`U_val`.}
\item{U_val}{`U` data for a new user for which to make recommendations,
in sparse vector format, with `U_val` denoting the
associated values to each entry in `U_col`
(should be a numeric vector of the same length as `U_col`).
Should pass at most one of `U` or `U_col`+`U_val`.}
\item{U_bin}{Binary columns of `U` for a new user for which to make recommendations,
on which a sigmoid transformation will be
applied. Should be passed as a numeric vector. Note that `U` and `U_bin` are
not mutually exclusive.}
\item{weight}{(Only for the explicit-feedback models)
Associated weight to each non-missing observation in `X`. Must have the same
number of entries as `X` - that is, if passing a dense vector of length `n`,
`weight` should be a numeric vector of length `n` too, if passing a sparse
vector, should have a lenght corresponding to the number of non-missing elements.}
\item{I}{(Only for the `ContentBased` model)
New `I` data to rank for the given user, with rows denoting new columns of the `X` matrix.
Can be passed in the following formats:\itemize{
\item A sparse COO/triplets matrix, either from package
`Matrix` (class `dgTMatrix`), or from package `SparseM` (class `matrix.coo`).
\item A sparse matrix in CSR format, either from package
`Matrix` (class `dgRMatrix`), or from package `SparseM` (class `matrix.csr`).
Passing the input as CSR is faster than COO as it will be converted internally.
\item A sparse row vector from package `Matrix` (class `dsparseVector`).
\item A dense matrix from base R (class `matrix`), with missing entries set as NA.
\item A dense vector from base R (class `numeric`).
\item A `data.frame`.
}
When passing `I`, the item indices in `include`, `exclude`, and in the resulting
output refer to rows of `I`, and the ranking will be made only among the
rows of `I` (that is, they will not be compared against the old `X` data).}
\item{exact}{(In the `OMF_explicit` model)
Whether to calculate `A` and `Am` with the regularization applied
to `A` instead of to `Am` (if using the L-BFGS method, this is how the model
was fit). This is usually a slower procedure.}
}
\value{
If passing `output_score=FALSE` (the default), will output the
indices of the top-predicted elements. If passing `output_score=TRUE`,
will pass a list with two elements:\itemize{
\item `item`: The indices of the top-predicted elements.
\item `score`: The predicted value for each corresponding element in `item`.
}
If the `X` to which the model was fit was a `data.frame` (and unless passing `I`),
the item indices will be taken from the same IDs in `X` (its second column) - but
be aware that in this case they will usually be returned as `character`.
Otherwise, will return the indices of the top-predicted columns of `X`
(or rows of `I` if passing it) with numeration starting at 1.
}
\description{
Determine top-ranked items for a user according to their predicted
values, among the items to which the model was fit.
Can produce rankings for existing users (which where in the `X` data to which
the model was fit) through function `topN`, or for new users (which were not
in the `X` data to which the model was fit, but for which there is now new
data) through function `topN_new`, assuming there is either `X` data, `U` data,
or both (i.e. can do cold-start and warm-start rankings).
For the \link{CMF} model, depending on parameter `include_all_X`, might recommend
items which had only side information if their predictions are high enough.
For the \link{ContentBased} model, might be used to rank new items (not present
in the `X` or `I` data to which the model was fit) given their
`I` data, for new users given their `U` data. For the other models, will only
rank existing items (columns of the `X` to which the model was fit) - see
\link{predict_new_items} for an alternative for the other models.
\bold{Important:} the model does not keep any copies of the original data, and
as such, it might recommend items that were already seen/rated/consumed by the
user. In order to avoid this, must manually pass the seen/rated/consumed entries
to the argument `exclude` (see details below).
This method produces an exact ranking by computing all item predictions
for a given user. As the number of items grows, this can become a rather
slow operation - for model serving purposes, it's usually a better idea
to obtain an an approximate top-N ranking through software such as
"hnsw" or "Milvus" from the calculated user factors and item factors.
}
\details{
Be aware that this function is multi-threaded. As such, if a large batch
of top-N predictions is to be calculated in parallel for different users
(through e.g. `mclapply` or similar), it's recommended to decrease the number
of threads in the model to 1 (e.g. `model$info$nthreads <- 1L`) and to set the
number of BLAS threads to 1 (through e.g. `RhpcBLASctl` or environment variables).
For better cold-start recommendations with \link{CMF_implicit}, one can also add
item biases by using the `CMF` model with parameters that would mimic `CMF_implicit`
plus the biases.
}
\seealso{
\link{factors_single} \link{predict.cmfrec} \link{predict_new}
}
|
context("lwgeom")
test_that("st_make_valid works", {
library(sf)
x = st_sfc(st_polygon(list(rbind(c(0,0),c(0.5,0),c(0.5,0.5),c(0.5,0),c(1,0),c(1,1),c(0,1),c(0,0)))))
fls = suppressWarnings(sf::st_is_valid(x, FALSE))
expect_false(fls)
y = st_make_valid(x)
expect_true(st_is_valid(y))
expect_true(st_is_valid(lwgeom::st_make_valid(x[[1]])))
expect_true(st_is_valid(lwgeom::st_make_valid(st_sf(a = 1, geom = x))))
expect_equal(lwgeom::st_geohash(st_sfc(st_point(c(1.5,3.5)), st_point(c(0,90))), 2), c( "s0","up"))
expect_equal(lwgeom::st_geohash(st_sfc(st_point(c(1.5,3.5)), st_point(c(0,90))), 10),
c("s095fjhkbx","upbpbpbpbp"))
l = st_as_sfc('MULTILINESTRING((10 10, 190 190), (15 15, 30 30, 100 90))')
pt = st_sfc(st_point(c(30,30)))
expect_silent(lwgeom::st_split(l, pt)) # sfc
expect_silent(lwgeom::st_split(l[[1]], pt)) # sfg
expect_silent(lwgeom::st_split(st_sf(a = 1, geom = l), pt)) # sf
# https://github.com/r-spatial/sf/issues/509 :
p1 = st_point(c(7,52))
geom.sf = st_sfc(p1, crs = 4326)
x <- st_transform_proj(geom.sf, "+proj=wintri")
p = st_crs(4326)$proj4string
x <- st_transform_proj(structure(geom.sf[[1]], proj4string = p), "+proj=wintri")
nc = st_read(system.file("shape/nc.shp", package="sf"), quiet = TRUE)
st_transform_proj(nc[1,], "+proj=wintri +over")
lwgeom_extSoftVersion()
})
test_that("st_minimum_bounding_circle works", {
library(sf)
x = st_multipoint(matrix(c(0,1,0,1),2,2))
y = st_multipoint(matrix(c(0,0,1,0,1,1),3,2))
plot(st_minimum_bounding_circle(x), axes=TRUE); plot(x, add=TRUE)
plot(st_minimum_bounding_circle(y), axes=TRUE); plot(y, add=TRUE)
nc = st_read(system.file("shape/nc.shp", package="sf"), quiet = TRUE)
state = st_union(st_geometry(nc))
st_minimum_bounding_circle(state)
st_minimum_bounding_circle(st_sf(st = "nc", geom = state))
})
| /tests/testthat/test_lwgeom.R | no_license | rundel/lwgeom | R | false | false | 1,836 | r | context("lwgeom")
test_that("st_make_valid works", {
library(sf)
x = st_sfc(st_polygon(list(rbind(c(0,0),c(0.5,0),c(0.5,0.5),c(0.5,0),c(1,0),c(1,1),c(0,1),c(0,0)))))
fls = suppressWarnings(sf::st_is_valid(x, FALSE))
expect_false(fls)
y = st_make_valid(x)
expect_true(st_is_valid(y))
expect_true(st_is_valid(lwgeom::st_make_valid(x[[1]])))
expect_true(st_is_valid(lwgeom::st_make_valid(st_sf(a = 1, geom = x))))
expect_equal(lwgeom::st_geohash(st_sfc(st_point(c(1.5,3.5)), st_point(c(0,90))), 2), c( "s0","up"))
expect_equal(lwgeom::st_geohash(st_sfc(st_point(c(1.5,3.5)), st_point(c(0,90))), 10),
c("s095fjhkbx","upbpbpbpbp"))
l = st_as_sfc('MULTILINESTRING((10 10, 190 190), (15 15, 30 30, 100 90))')
pt = st_sfc(st_point(c(30,30)))
expect_silent(lwgeom::st_split(l, pt)) # sfc
expect_silent(lwgeom::st_split(l[[1]], pt)) # sfg
expect_silent(lwgeom::st_split(st_sf(a = 1, geom = l), pt)) # sf
# https://github.com/r-spatial/sf/issues/509 :
p1 = st_point(c(7,52))
geom.sf = st_sfc(p1, crs = 4326)
x <- st_transform_proj(geom.sf, "+proj=wintri")
p = st_crs(4326)$proj4string
x <- st_transform_proj(structure(geom.sf[[1]], proj4string = p), "+proj=wintri")
nc = st_read(system.file("shape/nc.shp", package="sf"), quiet = TRUE)
st_transform_proj(nc[1,], "+proj=wintri +over")
lwgeom_extSoftVersion()
})
test_that("st_minimum_bounding_circle works", {
library(sf)
x = st_multipoint(matrix(c(0,1,0,1),2,2))
y = st_multipoint(matrix(c(0,0,1,0,1,1),3,2))
plot(st_minimum_bounding_circle(x), axes=TRUE); plot(x, add=TRUE)
plot(st_minimum_bounding_circle(y), axes=TRUE); plot(y, add=TRUE)
nc = st_read(system.file("shape/nc.shp", package="sf"), quiet = TRUE)
state = st_union(st_geometry(nc))
st_minimum_bounding_circle(state)
st_minimum_bounding_circle(st_sf(st = "nc", geom = state))
})
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/geom-polygon.r
\name{geom_polygon}
\alias{geom_polygon}
\title{Polygon, a filled path.}
\usage{
geom_polygon(mapping = NULL, data = NULL, stat = "identity",
position = "identity", show.legend = NA, inherit.aes = TRUE, ...)
}
\arguments{
\item{mapping}{Set of aesthetic mappings created by \code{\link{aes}} or
\code{\link{aes_}}. If specified and \code{inherit.aes = TRUE} (the
default), is combined with the default mapping at the top level of the
plot. You only need to supply \code{mapping} if there isn't a mapping
defined for the plot.}
\item{data}{A data frame. If specified, overrides the default data frame
defined at the top level of the plot.}
\item{stat}{The statistical transformation to use on the data for this
layer, as a string.}
\item{position}{Position adjustment, either as a string, or the result of
a call to a position adjustment function.}
\item{show.legend}{logical. Should this layer be included in the legends?
\code{NA}, the default, includes if any aesthetics are mapped.
\code{FALSE} never includes, and \code{TRUE} always includes.}
\item{inherit.aes}{If \code{FALSE}, overrides the default aesthetics,
rather than combining with them. This is most useful for helper functions
that define both data and aesthetics and shouldn't inherit behaviour from
the default plot specification, e.g. \code{\link{borders}}.}
\item{...}{other arguments passed on to \code{\link{layer}}. There are
three types of arguments you can use here:
\itemize{
\item Aesthetics: to set an aesthetic to a fixed value, like
\code{color = "red"} or \code{size = 3}.
\item Other arguments to the layer, for example you override the
default \code{stat} associated with the layer.
\item Other arguments passed on to the stat.
}}
}
\description{
Polygon, a filled path.
}
\section{Aesthetics}{
\Sexpr[results=rd,stage=build]{ggplot2:::rd_aesthetics("geom", "polygon")}
}
\examples{
# When using geom_polygon, you will typically need two data frames:
# one contains the coordinates of each polygon (positions), and the
# other the values associated with each polygon (values). An id
# variable links the two together
ids <- factor(c("1.1", "2.1", "1.2", "2.2", "1.3", "2.3"))
values <- data.frame(
id = ids,
value = c(3, 3.1, 3.1, 3.2, 3.15, 3.5)
)
positions <- data.frame(
id = rep(ids, each = 4),
x = c(2, 1, 1.1, 2.2, 1, 0, 0.3, 1.1, 2.2, 1.1, 1.2, 2.5, 1.1, 0.3,
0.5, 1.2, 2.5, 1.2, 1.3, 2.7, 1.2, 0.5, 0.6, 1.3),
y = c(-0.5, 0, 1, 0.5, 0, 0.5, 1.5, 1, 0.5, 1, 2.1, 1.7, 1, 1.5,
2.2, 2.1, 1.7, 2.1, 3.2, 2.8, 2.1, 2.2, 3.3, 3.2)
)
# Currently we need to manually merge the two together
datapoly <- merge(values, positions, by=c("id"))
(p <- ggplot(datapoly, aes(x=x, y=y)) + geom_polygon(aes(fill=value, group=id)))
# Which seems like a lot of work, but then it's easy to add on
# other features in this coordinate system, e.g.:
stream <- data.frame(
x = cumsum(runif(50, max = 0.1)),
y = cumsum(runif(50,max = 0.1))
)
p + geom_line(data = stream, colour="grey30", size = 5)
# And if the positions are in longitude and latitude, you can use
# coord_map to produce different map projections.
}
\seealso{
\code{\link{geom_path}} for an unfilled polygon,
\code{\link{geom_ribbon}} for a polygon anchored on the x-axis
}
| /man/geom_polygon.Rd | no_license | pricky2903/ggplot2 | R | false | false | 3,366 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/geom-polygon.r
\name{geom_polygon}
\alias{geom_polygon}
\title{Polygon, a filled path.}
\usage{
geom_polygon(mapping = NULL, data = NULL, stat = "identity",
position = "identity", show.legend = NA, inherit.aes = TRUE, ...)
}
\arguments{
\item{mapping}{Set of aesthetic mappings created by \code{\link{aes}} or
\code{\link{aes_}}. If specified and \code{inherit.aes = TRUE} (the
default), is combined with the default mapping at the top level of the
plot. You only need to supply \code{mapping} if there isn't a mapping
defined for the plot.}
\item{data}{A data frame. If specified, overrides the default data frame
defined at the top level of the plot.}
\item{stat}{The statistical transformation to use on the data for this
layer, as a string.}
\item{position}{Position adjustment, either as a string, or the result of
a call to a position adjustment function.}
\item{show.legend}{logical. Should this layer be included in the legends?
\code{NA}, the default, includes if any aesthetics are mapped.
\code{FALSE} never includes, and \code{TRUE} always includes.}
\item{inherit.aes}{If \code{FALSE}, overrides the default aesthetics,
rather than combining with them. This is most useful for helper functions
that define both data and aesthetics and shouldn't inherit behaviour from
the default plot specification, e.g. \code{\link{borders}}.}
\item{...}{other arguments passed on to \code{\link{layer}}. There are
three types of arguments you can use here:
\itemize{
\item Aesthetics: to set an aesthetic to a fixed value, like
\code{color = "red"} or \code{size = 3}.
\item Other arguments to the layer, for example you override the
default \code{stat} associated with the layer.
\item Other arguments passed on to the stat.
}}
}
\description{
Polygon, a filled path.
}
\section{Aesthetics}{
\Sexpr[results=rd,stage=build]{ggplot2:::rd_aesthetics("geom", "polygon")}
}
\examples{
# When using geom_polygon, you will typically need two data frames:
# one contains the coordinates of each polygon (positions), and the
# other the values associated with each polygon (values). An id
# variable links the two together
ids <- factor(c("1.1", "2.1", "1.2", "2.2", "1.3", "2.3"))
values <- data.frame(
id = ids,
value = c(3, 3.1, 3.1, 3.2, 3.15, 3.5)
)
positions <- data.frame(
id = rep(ids, each = 4),
x = c(2, 1, 1.1, 2.2, 1, 0, 0.3, 1.1, 2.2, 1.1, 1.2, 2.5, 1.1, 0.3,
0.5, 1.2, 2.5, 1.2, 1.3, 2.7, 1.2, 0.5, 0.6, 1.3),
y = c(-0.5, 0, 1, 0.5, 0, 0.5, 1.5, 1, 0.5, 1, 2.1, 1.7, 1, 1.5,
2.2, 2.1, 1.7, 2.1, 3.2, 2.8, 2.1, 2.2, 3.3, 3.2)
)
# Currently we need to manually merge the two together
datapoly <- merge(values, positions, by=c("id"))
(p <- ggplot(datapoly, aes(x=x, y=y)) + geom_polygon(aes(fill=value, group=id)))
# Which seems like a lot of work, but then it's easy to add on
# other features in this coordinate system, e.g.:
stream <- data.frame(
x = cumsum(runif(50, max = 0.1)),
y = cumsum(runif(50,max = 0.1))
)
p + geom_line(data = stream, colour="grey30", size = 5)
# And if the positions are in longitude and latitude, you can use
# coord_map to produce different map projections.
}
\seealso{
\code{\link{geom_path}} for an unfilled polygon,
\code{\link{geom_ribbon}} for a polygon anchored on the x-axis
}
|
#' Computes Intermediate Correlation Matrix
#'
#' \code{CmatStarGpois} computes an intermediate correlation matrix that will be used to obtain
#' the target correlation matrix using the inverse CDF transformation method in \code{GenMVGpois}.
#' If the intermediate correlation matrix is not positive definite, the nearest positive definite
#' matrix is used.
#'
#' @param corMat target correlation matrix.
#' @param theta.vec rate parameters in the generalized Poisson distribution. It is assumed that the
#' length of the vector is at least two, and each value has to be a positive number.
#' @param lambda.vec dispersion parameters in the generalized Poisson distribution. It is assumed that the length
#' of the vector is at least two. All lambda values have to be less than 1.
#' For lambda < 0, lambda must be greater than or equal to -theta/4.
#' @param verbose logical variable that determines whether to display the traces. Default is set to TRUE.
#' @return Intermediate correlation matrix.
#' @examples
#' \donttest{
#' lambda.vec = c(-0.2, 0.2, -0.3)
#' theta.vec = c(1, 3, 4)
#' M = c(0.352, 0.265, 0.342)
#' N = diag(3)
#' N[lower.tri(N)] = M
#' TV = N + t(N)
#' diag(TV) = 1
#' cstar = CmatStarGpois(TV, theta.vec, lambda.vec, verbose = TRUE)
#' cstar}
#' @references
#' Yahav, I. and Shmueli, G. (2012). On generating multivariate Poisson data in management science applications.
#' \emph{Applied Stochastic Models in Business and Industry}, \bold{28(1)}, 91-102.
#' @export
CmatStarGpois = function(corMat, theta.vec, lambda.vec, verbose = TRUE) {
no.gpois = length(theta.vec)
if (ValidCorrGpois(corMat, theta.vec, lambda.vec, verbose)) {
corMat.star = diag(nrow(corMat))
# lower matrix index
g = expand.grid(row = 1:nrow(corMat), col = 1:ncol(corMat))
g.lower.tri = g[lower.tri(corMat, diag = TRUE),]
corMat.lower.index = g.lower.tri[-which(g.lower.tri[,1]==g.lower.tri[,2]),]
for (i in 1:nrow(corMat.lower.index)) {
i.temp = corMat.lower.index[i,1]
j.temp = corMat.lower.index[i,2]
corMat.star[i.temp,j.temp] = CorrNNGpois(c(theta.vec[i.temp], theta.vec[j.temp]),
c(lambda.vec[i.temp], lambda.vec[j.temp]),
corMat[i.temp,j.temp])
if (verbose == TRUE) {
cat(".")
}
}
# upper matrix index
g.upper.tri = g[upper.tri(corMat, diag = TRUE),]
corMat.upper.index <- g.upper.tri[-which(g.upper.tri[,1]==g.upper.tri[,2]),]
for (i in 1:nrow(corMat.upper.index)) {
i.temp = corMat.upper.index[i,1]
j.temp = corMat.upper.index[i,2]
sym.index = intersect(which(corMat.lower.index[,2] == i.temp), which(corMat.lower.index[,1] == j.temp))
corMat.star[i.temp, j.temp] = corMat.star[corMat.lower.index[sym.index,1], corMat.lower.index[sym.index,2]]
}
}
if (verbose == TRUE) {
cat("\n")
}
if (!is.positive.definite(corMat.star)) {
warning("Intermediate correlation matrix is not positive definite. Nearest positive definite matrix is used!")
corMat.star = as.matrix(nearPD(corMat.star, corr = TRUE, keepDiag = TRUE)$mat)
}
corMat.star = (corMat.star + t(corMat.star))/2
return(corMat.star)
}
| /R/CmatStarGpois.R | no_license | cran/RNGforGPD | R | false | false | 3,242 | r | #' Computes Intermediate Correlation Matrix
#'
#' \code{CmatStarGpois} computes an intermediate correlation matrix that will be used to obtain
#' the target correlation matrix using the inverse CDF transformation method in \code{GenMVGpois}.
#' If the intermediate correlation matrix is not positive definite, the nearest positive definite
#' matrix is used.
#'
#' @param corMat target correlation matrix.
#' @param theta.vec rate parameters in the generalized Poisson distribution. It is assumed that the
#' length of the vector is at least two, and each value has to be a positive number.
#' @param lambda.vec dispersion parameters in the generalized Poisson distribution. It is assumed that the length
#' of the vector is at least two. All lambda values have to be less than 1.
#' For lambda < 0, lambda must be greater than or equal to -theta/4.
#' @param verbose logical variable that determines whether to display the traces. Default is set to TRUE.
#' @return Intermediate correlation matrix.
#' @examples
#' \donttest{
#' lambda.vec = c(-0.2, 0.2, -0.3)
#' theta.vec = c(1, 3, 4)
#' M = c(0.352, 0.265, 0.342)
#' N = diag(3)
#' N[lower.tri(N)] = M
#' TV = N + t(N)
#' diag(TV) = 1
#' cstar = CmatStarGpois(TV, theta.vec, lambda.vec, verbose = TRUE)
#' cstar}
#' @references
#' Yahav, I. and Shmueli, G. (2012). On generating multivariate Poisson data in management science applications.
#' \emph{Applied Stochastic Models in Business and Industry}, \bold{28(1)}, 91-102.
#' @export
CmatStarGpois = function(corMat, theta.vec, lambda.vec, verbose = TRUE) {
no.gpois = length(theta.vec)
if (ValidCorrGpois(corMat, theta.vec, lambda.vec, verbose)) {
corMat.star = diag(nrow(corMat))
# lower matrix index
g = expand.grid(row = 1:nrow(corMat), col = 1:ncol(corMat))
g.lower.tri = g[lower.tri(corMat, diag = TRUE),]
corMat.lower.index = g.lower.tri[-which(g.lower.tri[,1]==g.lower.tri[,2]),]
for (i in 1:nrow(corMat.lower.index)) {
i.temp = corMat.lower.index[i,1]
j.temp = corMat.lower.index[i,2]
corMat.star[i.temp,j.temp] = CorrNNGpois(c(theta.vec[i.temp], theta.vec[j.temp]),
c(lambda.vec[i.temp], lambda.vec[j.temp]),
corMat[i.temp,j.temp])
if (verbose == TRUE) {
cat(".")
}
}
# upper matrix index
g.upper.tri = g[upper.tri(corMat, diag = TRUE),]
corMat.upper.index <- g.upper.tri[-which(g.upper.tri[,1]==g.upper.tri[,2]),]
for (i in 1:nrow(corMat.upper.index)) {
i.temp = corMat.upper.index[i,1]
j.temp = corMat.upper.index[i,2]
sym.index = intersect(which(corMat.lower.index[,2] == i.temp), which(corMat.lower.index[,1] == j.temp))
corMat.star[i.temp, j.temp] = corMat.star[corMat.lower.index[sym.index,1], corMat.lower.index[sym.index,2]]
}
}
if (verbose == TRUE) {
cat("\n")
}
if (!is.positive.definite(corMat.star)) {
warning("Intermediate correlation matrix is not positive definite. Nearest positive definite matrix is used!")
corMat.star = as.matrix(nearPD(corMat.star, corr = TRUE, keepDiag = TRUE)$mat)
}
corMat.star = (corMat.star + t(corMat.star))/2
return(corMat.star)
}
|
library(gridExtra)
library(grid)
library(ggplot2)
library(lattice)
library(corrplot)
library(ggcorrplot)
##### Library Comparison ####
ProteinProspector_AIEIVQALDR <- read.csv("H:/Projects/Proteomics/Zymomona/FAIMS/DataAnalysis/ProteinProspector_AIEIVDQALDR.csv",
header = TRUE, sep = ",", stringsAsFactors = FALSE)
ProteinProspector_ETDIGVTGGGQGK <- read.csv("H:/Projects/Proteomics/Zymomona/FAIMS/DataAnalysis/ProteinProspector_ETDIGVTGGGQGK.csv",
header = TRUE, sep = ",", stringsAsFactors = FALSE)
#Library Spectra
#PROSIT
PROSIT_IspH_AIIEIVDALDR_Spectra <- read.csv("H:/Projects/Proteomics/Zymomona/FAIMS/DataAnalysis/LibraryComparison/PROSIT_LibrarySpectra_AIEIVDQALDR.csv",
header = TRUE, sep = ",", stringsAsFactors = FALSE)
colnames(PROSIT_IspH_AIIEIVDALDR_Spectra) <- c("m.z", "Relative.Abundance")
PROSIT_IspG_ETDIGVTGGGGQGK_Spectra <- read.csv("H:/Projects/Proteomics/Zymomona/FAIMS/DataAnalysis/LibraryComparison/PROSIT_LibrarySpectra_ETDIGVTGGGGQGK.csv",
header = TRUE, sep = ",", stringsAsFactors = FALSE)
colnames(PROSIT_IspG_ETDIGVTGGGGQGK_Spectra) <- c("m.z", "Relative.Abundance")
#### HIGH_Res data upload ####
High_Res_IspH_AIIEIVDALDR_Spectra <- read.csv("H:/Projects/Proteomics/Zymomona/FAIMS/DataAnalysis/LibraryComparison/HighRes_Intensity_LibrarySpectra_AIEIVDQALDR.csv",
header = TRUE, sep = ",", stringsAsFactors = FALSE)
High_Res_IspH_AIIEIVDALDR_Spectra.RelativeAbundance <- High_Res_IspH_AIIEIVDALDR_Spectra
High_Res_IspH_AIIEIVDALDR_Spectra.RelativeAbundance$Relative.Abundance <- High_Res_IspH_AIIEIVDALDR_Spectra.RelativeAbundance$Intensity/(max(High_Res_IspH_AIIEIVDALDR_Spectra.RelativeAbundance$Intensity))
colnames(High_Res_IspH_AIIEIVDALDR_Spectra.RelativeAbundance) <- c("m.z", "Intensity", "Relative.Abundance")
High_Res_IspG_ETDIGVTGGGGQGK_Spectra <- read.csv("H:/Projects/Proteomics/Zymomona/FAIMS/DataAnalysis/LibraryComparison/HighRes_LibrarySpectra_ETDIGVTGGGGQGK.csv",
header = TRUE, sep = ",", stringsAsFactors = FALSE)
High_Res_IspG_ETDIGVTGGGGQGK_Spectra.RelativeAbundance = High_Res_IspG_ETDIGVTGGGGQGK_Spectra
High_Res_IspG_ETDIGVTGGGGQGK_Spectra.RelativeAbundance$Relative.Abundance <- High_Res_IspG_ETDIGVTGGGGQGK_Spectra.RelativeAbundance$Intensity/(max(High_Res_IspG_ETDIGVTGGGGQGK_Spectra.RelativeAbundance$Intensity))
colnames(High_Res_IspG_ETDIGVTGGGGQGK_Spectra.RelativeAbundance) <- c("m.z", "Intensity", "Relative.Abundance")
#### LOW_Res data upload ####
Low_Res_IspH_AIIEIVDALDR_Spectra <- read.csv("H:/Projects/Proteomics/Zymomona/FAIMS/DataAnalysis/LibraryComparison/LowRes_LibrarySpectra_AIEIVDQALDR.csv",
header = TRUE, sep = ",", stringsAsFactors = FALSE)
Low_Res_IspH_AIIEIVDALDR_Spectra.RelaiveAbundance <- Low_Res_IspH_AIIEIVDALDR_Spectra
Low_Res_IspH_AIIEIVDALDR_Spectra.RelaiveAbundance$Relative.Abundance <- Low_Res_IspH_AIIEIVDALDR_Spectra.RelaiveAbundance$Intensity/(max(Low_Res_IspH_AIIEIVDALDR_Spectra.RelaiveAbundance$Intensity))
colnames(Low_Res_IspH_AIIEIVDALDR_Spectra.RelaiveAbundance) <- c("m.z", "Intensity", "Relative.Abundance")
Low_Res_IspG_ETDIGVTGGGGQGK_Spectra <- read.csv("H:/Projects/Proteomics/Zymomona/FAIMS/DataAnalysis/LibraryComparison/LowRes_LibrarySpectra_ETDIGVTGGGGQGK.csv",
header = TRUE, sep = ",", stringsAsFactors = FALSE)
Low_Res_IspG_ETDIGVTGGGGQGK_Spectra.RelativeAbundance <- Low_Res_IspG_ETDIGVTGGGGQGK_Spectra
Low_Res_IspG_ETDIGVTGGGGQGK_Spectra.RelativeAbundance$Relative.Abundance <- Low_Res_IspG_ETDIGVTGGGGQGK_Spectra.RelativeAbundance$Intensity/(max(Low_Res_IspG_ETDIGVTGGGGQGK_Spectra.RelativeAbundance$Intensity))
colnames(Low_Res_IspG_ETDIGVTGGGGQGK_Spectra.RelativeAbundance) <- c("m.z", "Intensity", "Relative.Abundance")
ggplot(PROSIT_IspH_AIIEIVDALDR_Spectra, aes(x=m.z, y= Relative.Abundance)) +
#geom_line()+
geom_linerange(aes(x=m.z, ymax=Relative.Abundance, ymin=0),
position = position_jitter(height = 0L, seed = 1L))+
xlim(150,1250)+
theme_classic()+
labs(title = "PROSIT AIEIVDQDALDR Spectra", y = "Relative Abundance", x ="m/z")
ggplot(PROSIT_IspG_ETDIGVTGGGGQGK_Spectra, aes(x=m.z, y= Relative.Abundance)) +
#geom_line()+
geom_linerange(aes(x=m.z, ymax=Relative.Abundance, ymin=0),
position = position_jitter(height = 0L, seed = 1L))+
xlim(150,1250)+
theme_classic()+
labs(title = "PROSIT ETDIGVTGGGGQGK Spectra", y = "Relative Abundance", x ="m/z")
##################################################
# df_library = ProteinProspector_ETDIGVTGGGQGK
# df = FAIMS_ETDIGVTGGGQGK_Spectra_RelativeAbundance
spectra_plotting <- function(df_library, df){
# extract the library peptide ions into object fragment
fragment <- numeric()
for (i in 1:nrow(df_library)){
if(df_library[i,2] != 0 ){
fragment <- append(fragment,as.numeric(df_library[i,1]))
}
}
# Print the name of the dataframe that will be compared to library peptides
print(deparse(substitute(df)))
# Create an empty vector to put the transitions that were found in the experimental spectra
transitions <- numeric()
# Ion comparison has to be exact, there for I will truncate m/z values before the decimal
# and then turn them into characters for proper comparison
df.mass.to.charge.char <- as.character(trunc(df$m.z))
# iterate through library ions - fragment vector
for(i in 1:length(fragment)){
# truncate and turn fragment ion into a character
frag_char <- as.character(trunc(fragment[i]))
# assign a new variable for exact pattern matching
pat <- paste0("^",frag_char,"$")
# append matched ions to transition vector
# transitions object will be populated with the index of where the match was found
transitions <- append(transitions,grep(pat,
df.mass.to.charge.char))
}
# extract the m/z values using the index were pattern matching was found
mass.to.charge <- df$m.z[transitions]
print(mass.to.charge)
# reduce to only the unique m/z
unique.m.t.c <- unique(trunc(mass.to.charge))
final_list <- vector()
# The following will be used to match the m/z based on intensity. If there is more than one m/z
# value being compared, the m/z with the greatest intensity is chosen.
for(i in 1:length(unique.m.t.c)){
comparison_intensity <- which(trunc(mass.to.charge) %in% unique.m.t.c[i])
if(length(comparison_intensity) <= 1){
keep <- mass.to.charge[comparison_intensity]
final_list <- append(final_list,keep)
print(final_list)
}else if(length(comparison_intensity) > 1){
keep.intensity <- max(df$Intensity[which(df$m.z %in% mass.to.charge[comparison_intensity])])
keep <- df$m.z[which(df$Intensity == keep.intensity)]
final_list <- append(final_list, keep)
print(final_list)
}
}
# Differentiate between the ions that are associatted to the peptide and those that are background.
# Color the peptide ions red and the background black
ion_type <- vector()
for (i in 1:nrow(df)) {
if(length(which(i %in% which(df$m.z %in% final_list))) == 1){
ion_type <- append(ion_type, "peptide")
}else{
ion_type <- append(ion_type, "background")
}
}
background <- df[grep("background",ion_type),]
peptide <- df[grep("peptide",ion_type),]
# produce the ggplot that will be returned with the function
spectra <- ggplot(background, aes(x=m.z, y= Relative.Abundance)) +
#geom_line()+
geom_linerange(aes(x=m.z, ymax=Relative.Abundance, ymin=0),
position = position_jitter(height = 0L, seed = 1L))+
geom_linerange(data = peptide,
aes(x=m.z, ymax = Relative.Abundance, ymin =0),
position = position_jitter(height = 0L, seed = 1L), color = "red")+
xlim(150,1250)+
theme_classic()
return <- spectra
}
##### Plot spectra #####
prosit_AIEIVDQALDR <- spectra_plotting(ProteinProspector_AIEIVQALDR, PROSIT_IspH_AIIEIVDALDR_Spectra)
PA <- prosit_AIEIVDQALDR + labs(title = "Prosit Theoretical Spectra - AIEVDQALDR", y = "Relative Abundance", x = "m/z")
high_res_AIEIDVDQDALDR <- spectra_plotting(ProteinProspector_AIEIVQALDR, High_Res_IspH_AIIEIVDALDR_Spectra.RelativeAbundance)
HA <- high_res_AIEIDVDQDALDR + labs(title = "High Resolution AIEIVDQDALDR Spectra", y = "Relative Abundance", x ="m/z")
low_res_AIEIDVDQDALDR <- spectra_plotting(ProteinProspector_AIEIVQALDR, Low_Res_IspH_AIIEIVDALDR_Spectra.RelaiveAbundance)
LA <- low_res_AIEIDVDQDALDR + labs(title = "Low Resolution AIEIVDQDALDR Spectra", y = "Relative Abundance", x ="m/z")
#labs(title = "Low Resolution ETDIGVTGGGGQGK Spectra", y = "Relative Abundance", x ="m/z")
prosit_ETDIGVTGGGGQGK <- spectra_plotting(ProteinProspector_ETDIGVTGGGQGK, PROSIT_IspG_ETDIGVTGGGGQGK_Spectra)
PE <- prosit_ETDIGVTGGGGQGK + labs(title = "Prosit Theoretical Spectra - ETDIGVTGGGQGK", y= "Relative Abundance", x="m/z")
high_res_ETDIGVTGGGGQGK <- spectra_plotting(ProteinProspector_ETDIGVTGGGQGK, High_Res_IspG_ETDIGVTGGGGQGK_Spectra.RelativeAbundance)
HE <- high_res_ETDIGVTGGGGQGK + labs(title = "High Resolution ETDIGVTGGGGQGK Spectra", y = "Relative Abundance", x ="m/z")
low_res_ETDIGVTGGGGQGK <- spectra_plotting(PROSIT_IspG_ETDIGVTGGGGQGK_Spectra, Low_Res_IspG_ETDIGVTGGGGQGK_Spectra.RelativeAbundance)
LE <- low_res_ETDIGVTGGGGQGK + labs(title = "Low Resolution ETDIGVTGGGGQGK Spectra", y = "Relative Abundance", x ="m/z")
##### Infusion Comparison to Library ######
# The following functin infusion_spectra_prep is meant to format the infusion spectra for generating
# the spectral plots
# Infusion_df <- noFAIMS_AIIEIVDALDR_Spectra
# max_Intensity <- 200000
infusion_spectra_prep <- function(Infusion_df, max_Intensity){
colnames(Infusion_df) <- c("m.z", "Intensity")
row_sub <- apply(Infusion_df, 1, function(row) all(row !=0))
Infusion_df <- Infusion_df[row_sub,]
Infusion_df <- Infusion_df[order(Infusion_df$m.z),]
Infusion_df_RelativeAbundance <- Infusion_df[(Infusion_df$Intensity < max_Intensity),]
Infusion_df_RelativeAbundance$Relative.Abundance <- Infusion_df_RelativeAbundance$Intensity/(max(Infusion_df_RelativeAbundance$Intensity))
return(Infusion_df_RelativeAbundance)
}
##### Infusion data upload #####
#AIEIVDQALDR
#FAIMS
FAIMS_AIIEIVDALDR_Spectra <- read.csv("H:/Projects/Proteomics/Zymomona/FAIMS/DataAnalysis/LibraryComparison/20200126_ZymoFAIMS_IW2_R500K_MI502_AGC1e06_2.csv",
header = TRUE, sep = ",", stringsAsFactors = FALSE)
FAIMS_AIIEIVDALDR_Spectra_RelativeAbundance <- infusion_spectra_prep(FAIMS_AIIEIVDALDR_Spectra, 200000)
FAIMS_AIEIDVDQDALDR <- spectra_plotting(ProteinProspector_AIEIVQALDR, FAIMS_AIIEIVDALDR_Spectra_RelativeAbundance)
A1 <- FAIMS_AIEIDVDQDALDR + ylim(0,1) + labs(title = "FAIMS AIEIVDQDALDR Spectra", y = "Relative Abundance", x ="m/z")
#with out FAIMS
noFAIMS_AIIEIVDALDR_Spectra <- read.csv("H:/Projects/Proteomics/Zymomona/FAIMS/DataAnalysis/LibraryComparison/20200125_Zymo_IW2_R500K_MI502_AGC1e06_1.csv",
header = TRUE, sep = ",", stringsAsFactors = FALSE)
noFAIMS_AIIEIVDALDR_Spectra_RelativeAbundance <- infusion_spectra_prep(noFAIMS_AIIEIVDALDR_Spectra, 200000)
noFAIMS_AIEIDVDQDALDR <- spectra_plotting(ProteinProspector_AIEIVQALDR, noFAIMS_AIIEIVDALDR_Spectra_RelativeAbundance)
A2 <- noFAIMS_AIEIDVDQDALDR + ylim(0,1) + labs(title = "no FAIMS AIEIVDQDALDR Spectra", y = "Relative Abundance", x ="m/z")
# ETDIGVTGGGQGK #
#FAIMS
FAIMS_ETDIGVTGGGQGK_Spectra <- read.csv("H:/Projects/Proteomics/Zymomona/FAIMS/DataAnalysis/LibraryComparison/20200126_ZymoFAIMS_IW2_R500K_MI502_AGC1e06_2_ETDIGVTGGGQGK.csv",
header = TRUE, sep = ",", stringsAsFactors = FALSE)
FAIMS_ETDIGVTGGGQGK_Spectra_RelativeAbundance <- infusion_spectra_prep(FAIMS_ETDIGVTGGGQGK_Spectra, 200000)
FAIMS_ETDIGVTGGGQGK <- spectra_plotting(ProteinProspector_ETDIGVTGGGQGK, FAIMS_ETDIGVTGGGQGK_Spectra_RelativeAbundance)
E1 <- FAIMS_ETDIGVTGGGQGK + ylim(0,1) + labs(title = "FAIMS ETDIGVTGGGQGK Spectra", y = "Relative Abundance", x ="m/z")
#with out FAIMS
noFAIMS_ETDIGVTGGGQGK_Spectra <- read.csv("H:/Projects/Proteomics/Zymomona/FAIMS/DataAnalysis/LibraryComparison/20200125_Zymo_IW2_R500K_MI502_AGC1e06_1_ETDIGVTGGGQGK.csv",
header = TRUE, sep = ",", stringsAsFactors = FALSE)
noFAIMS_ETDIGVTGGGQGK_Spectra_RelativeAbundance <- infusion_spectra_prep(noFAIMS_ETDIGVTGGGQGK_Spectra, 200000)
noFAIMS_ETDIGVTGGGGQGK <- spectra_plotting(PROSIT_IspG_ETDIGVTGGGGQGK_Spectra, noFAIMS_ETDIGVTGGGQGK_Spectra_RelativeAbundance)
E2 <- noFAIMS_ETDIGVTGGGGQGK + ylim(0,1) + labs(title = "no FAIMS ETDIGVTGGGQGK Spectra", y = "Relative Abundance", x ="m/z")
# Use the ggplotGrob function to prepare plots to be arranged for pdf printing
PAgrob <- ggplotGrob(PA)
HAgrob <- ggplotGrob(HA)
LAgrob <- ggplotGrob(LA)
PEgrob <- ggplotGrob(PE)
HEgrob <- ggplotGrob(HE)
LEgrob <- ggplotGrob(LE)
A1grob <- ggplotGrob(A1)
A2grob <- ggplotGrob(A2)
E1grob <- ggplotGrob(E1)
E2grob <- ggplotGrob(E2)
grid.arrange(PAgrob, PEgrob, HAgrob, HEgrob, LAgrob, LEgrob, ncol =2)
grid.arrange(A1grob, E1grob, A2grob, E2grob, ncol = 2)
pdf("H:/Projects/Proteomics/Zymomona/FAIMS/Figures/FromR/SpectraComparisons_PROSIT_HIGHRES_LOWRES.pdf")
grid.arrange(PAgrob, PEgrob, HAgrob, HEgrob, LAgrob, LEgrob, ncol =2)
dev.off()
ggsave("H:/Projects/Proteomics/Zymomona/FAIMS/Figures/FromR/SpectraComparisons_PROSIT_HIGHRES_LOWRES.pdf",
grid.arrange(PAgrob, PEgrob, HAgrob, HEgrob, LAgrob, LEgrob, ncol =2),
width = 10,
height = 11.5,
units = "in")
ggsave("H:/Projects/Proteomics/Zymomona/FAIMS/Figures/FromR/SpectraComparisons_FAIMS_noFAIMS.pdf",
grid.arrange(A1grob, E1grob, A2grob, E2grob, ncol =2),
width = 10,
height = 8.5,
units = "in")
ggsave("H:/Projects/Proteomics/Zymomona/FAIMS/Figures/FromR/SpectraComparisons_all.pdf",
grid.arrange(PAgrob, PEgrob, HAgrob, HEgrob, LAgrob, LEgrob, A1grob, E1grob, A2grob, E2grob, ncol =2),
width = 10,
height = 14.5,
units = "in")
#### TIC Explained ######
# To explain the TIC associated to the peptide fragments the function tic_explained_function
# df_proteinProspect <- ProteinProspector_AIEIVQALDR
# df <- FAIMS_AIIEIVDALDR_Spectra_RelativeAbundance
tic_explained_function <- function(df_proteinProspect, df){
transitions <- numeric()
df.mass.to.charge.char <- as.character(trunc(df_proteinProspect$mass.to.charge))
for(i in 1:length(df.mass.to.charge.char)){
frag_char <- df.mass.to.charge.char[i]
pat <- paste0("^",frag_char,"$")
transitions <- append(transitions,grep(pat,as.character(trunc(df$m.z))))
}
mass.to.charge <- df$m.z[transitions]
print(mass.to.charge)
unique.m.t.c <- unique(trunc(mass.to.charge))
final_list <- vector()
for(i in 1:length(unique.m.t.c)){
comparison_intensity <- which(trunc(mass.to.charge) %in% unique.m.t.c[i])
if(length(comparison_intensity) <= 1){
keep <- mass.to.charge[comparison_intensity]
final_list <- append(final_list,keep)
print(final_list)
}else if(length(comparison_intensity) > 1){
keep.intensity <- max(df$Intensity[which(df$m.z %in% mass.to.charge[comparison_intensity])])
keep <- df$m.z[which(df$Intensity == keep.intensity)]
final_list <- append(final_list, keep)
print(final_list)
}
}
intensity_peptide <- df$Intensity[which(df$m.z %in% final_list)]
intensity_peptide_sum <- sum(intensity_peptide)
intensity_TIC <- sum(df$Intensity)
Tic_explained <- intensity_peptide_sum/intensity_TIC*100
return(Tic_explained)
}
Tic_explained_High_Res_AIEIDVQALDR <- tic_explained_function(ProteinProspector_AIEIVQALDR, High_Res_IspH_AIIEIVDALDR_Spectra)
Tic_explained_Low_Res_AIEIDVQALDR <- tic_explained_function(ProteinProspector_AIEIVQALDR,Low_Res_IspH_AIIEIVDALDR_Spectra)
Tic_explained_High_Res_ETDIGVTGGGQGK <- tic_explained_function(ProteinProspector_ETDIGVTGGGQGK, High_Res_IspG_ETDIGVTGGGGQGK_Spectra)
Tic_explained_Low_Res_ETDIGVTGGGQGK <- tic_explained_function(ProteinProspector_ETDIGVTGGGQGK, Low_Res_IspG_ETDIGVTGGGGQGK_Spectra)
### AIEIVDQALDR ###
colnames(FAIMS_AIIEIVDALDR_Spectra_RelativeAbundance) <- c("m.z", "Intensity", "Relative.Abundance")
Tic_explained_FAIMS_AIEIDVQALDR <- tic_explained_function(ProteinProspector_AIEIVQALDR,FAIMS_AIIEIVDALDR_Spectra_RelativeAbundance)
colnames(noFAIMS_AIIEIVDALDR_Spectra_RelativeAbundance) <- c("m.z", "Intensity", "Relative.Abundance")
Tic_explained_noFAIMS_AIEIDVQALDR <- tic_explained_function(ProteinProspector_AIEIVQALDR, noFAIMS_AIIEIVDALDR_Spectra_RelativeAbundance)
### ETDIGVTGGGQGK ###
colnames(FAIMS_ETDIGVTGGGQGK_Spectra_RelativeAbundance) <- c("m.z", "Intensity", "Relative.Abundance")
Tic_explained_FAIMS_ETDIGVTGGGQGK <- tic_explained_function(ProteinProspector_ETDIGVTGGGQGK,FAIMS_ETDIGVTGGGQGK_Spectra_RelativeAbundance)
colnames(noFAIMS_ETDIGVTGGGQGK_Spectra_RelativeAbundance) <- c("m.z", "Intensity", "Relative.Abundance")
Tic_explained_noFAIMS_ETDIGVTGGGQGK <- tic_explained_function(ProteinProspector_ETDIGVTGGGQGK, noFAIMS_ETDIGVTGGGQGK_Spectra_RelativeAbundance)
##### Dot Product ####
# df_proteinProspect <- ProteinProspector_AIEIVQALDR
# df <- High_Res_IspH_AIIEIVDALDR_Spectra
peptide_ions_function <- function(df_proteinProspect, df){
transitions <- numeric()
df.mass.to.charge.char <- as.character(trunc(df_proteinProspect$mass.to.charge))
for(i in 1:length(df.mass.to.charge.char)){
frag_char <- df.mass.to.charge.char[i]
pat <- paste0("^",frag_char,"$")
transitions <- append(transitions,grep(pat,as.character(trunc(df$m.z))))
}
mass.to.charge <- df$m.z[transitions]
#print(mass.to.charge)
unique.m.t.c <- unique(trunc(mass.to.charge))
final_list <- vector()
for(i in 1:length(unique.m.t.c)){
comparison_intensity <- which(trunc(mass.to.charge) %in% unique.m.t.c[i])
if(length(comparison_intensity) <= 1){
keep <- mass.to.charge[comparison_intensity]
final_list <- append(final_list,keep)
#print(final_list)
}else if(length(comparison_intensity) > 1){
keep.intensity <- max(df$Intensity[which(df$m.z %in% mass.to.charge[comparison_intensity])])
keep <- df$m.z[which(df$Intensity == keep.intensity)]
final_list <- append(final_list, keep)
#print(final_list)
}
}
peptide <- df[which(df$m.z %in% final_list),]
return(peptide)
}
# df_proteinProspect <- ProteinProspector_AIEIVQALDR
# df <- High_Res_IspH_AIIEIVDALDR_Spectra
intensity_function <- function(df_proteinProspect, df){
transitions <- numeric()
df.mass.to.charge.char <- as.character(trunc(df_proteinProspect$mass.to.charge))
for(i in 1:length(df.mass.to.charge.char)){
frag_char <- df.mass.to.charge.char[i]
pat <- paste0("^",frag_char,"$")
transitions <- append(transitions,grep(pat,as.character(trunc(df$m.z))))
}
mass.to.charge <- df$m.z[transitions]
#print(mass.to.charge)
unique.m.t.c <- unique(trunc(mass.to.charge))
final_list <- vector()
for(i in 1:length(unique.m.t.c)){
comparison_intensity <- which(trunc(mass.to.charge) %in% unique.m.t.c[i])
if(length(comparison_intensity) <= 1){
keep <- mass.to.charge[comparison_intensity]
final_list <- append(final_list,keep)
#print(final_list)
}else if(length(comparison_intensity) > 1){
keep.intensity <- max(df$Intensity[which(df$m.z %in% mass.to.charge[comparison_intensity])])
keep <- df$m.z[which(df$Intensity == keep.intensity)]
final_list <- append(final_list, keep)
#print(final_list)
}
}
intensity_peptide <- df$Intensity[which(df$m.z %in% final_list)]
#intensity_peptide_sum <- sum(intensity_peptide)
return(intensity_peptide)
}
# df_1 <- High_Res_IspG_ETDIGVTGGGGQGK_Spectra
# df_2 <- noFAIMS_ETDIGVTGGGQGK_Spectra_RelativeAbundance
# df_ProteinProspector <- ProteinProspector_ETDIGVTGGGQGK
DP_function <- function(df_1, df_2, df_ProteinProspector){
df1_peptide <- peptide_ions_function(df_ProteinProspector,df_1)
df2_peptide <- peptide_ions_function(df_ProteinProspector, df_2)
if(nrow(df1_peptide) >= nrow(df2_peptide)){
spec1 = df1_peptide
spec2 = df2_peptide
}else{
spec1 = df2_peptide
spec2 = df1_peptide
}
sum_intensity_combined = vector()
not_included = vector()
for(i in 1:nrow(spec1)){
print(i)
if(is.na(trunc(spec2$m.z[i]))){
print(paste0("i = ",i," Ion NOt here"))
print(spec1$m.z[i])
print(spec2$m.z[i])
}
if(length(which(trunc(spec1$m.z[i]) %in% trunc(spec2$m.z))) != 0){
spec1_intensity = spec1$Intensity[i]
spec2_intensity = spec2$Intensity[which(trunc(spec2$m.z) == trunc(spec1$m.z[i]))]
combined_intensity = spec1_intensity*spec2_intensity
sum_intensity_combined = append(sum_intensity_combined, combined_intensity)
print(sum_intensity_combined)
} else{
print(paste0("i = ",i," Ion NOt here"))
}
}
I_spec1 <- intensity_function(df_ProteinProspector, df_1)
I_spec2 <- intensity_function(df_ProteinProspector, df_2)
numerator <- sum(sum_intensity_combined)
denomenator <- sum(I_spec1^2)*sum(I_spec2^2)
DP <- numerator/sqrt(denomenator)
return(DP)
}
DP_function(High_Res_IspH_AIIEIVDALDR_Spectra,noFAIMS_AIIEIVDALDR_Spectra_RelativeAbundance, ProteinProspector_AIEIVQALDR)
DP_function(High_Res_IspG_ETDIGVTGGGGQGK_Spectra,noFAIMS_ETDIGVTGGGQGK_Spectra_RelativeAbundance, ProteinProspector_ETDIGVTGGGQGK)
df.list <- list(High_Res_IspH_AIIEIVDALDR_Spectra, Low_Res_IspH_AIIEIVDALDR_Spectra,
FAIMS_AIIEIVDALDR_Spectra_RelativeAbundance, noFAIMS_AIIEIVDALDR_Spectra_RelativeAbundance)
df.list_ETDIGVTGGGGQGK <- list(High_Res_IspH_ETDIGVTGGGGQGK_Spectra, Low_Res_IspH_ETDIGVTGGGGQGK_Spectra,
FAIMS_ETDIGVTGGGQGK_Spectra_RelativeAbundance, noFAIMS_ETDIGVTGGGQGK_Spectra_RelativeAbundance)
colnames(df.list_ETDIGVTGGGGQGK[[1]]) <- c("m.z","Intensity")
colnames(df.list_ETDIGVTGGGGQGK[[2]]) <- c("m.z","Intensity")
df_matrix <- matrix(nrow = 4, ncol = 4)
df_matrix_ETDIGVTGGGGQGK <- matrix(nrow = 4, ncol = 4)
for(i in 1:length(df.list)){
df_n <- df.list[[i]]
for (j in 1:length(df.list)) {
dot_product = DP_function(df_n,df.list[[j]],ProteinProspector_AIEIVQALDR)
df_matrix[i,j] <- dot_product
}
}
for(i in 1:length(df.list_ETDIGVTGGGGQGK)){
df_n <- df.list_ETDIGVTGGGGQGK[[i]]
for (j in 1:length(df.list_ETDIGVTGGGGQGK)) {
dot_product = DP_function(df_n,df.list_ETDIGVTGGGGQGK[[j]], ProteinProspector_ETDIGVTGGGQGK)
df_matrix_ETDIGVTGGGGQGK[i,j] <- dot_product
}
}
colnames(df_matrix) <- c("High Res", "Low Res", "Infusion FAIMS", "Infusion")
rownames(df_matrix) <- c("High Res", "Low Res", "Infusion FAIMS", "Infusion")
colnames(df_matrix_ETDIGVTGGGGQGK) <- c("High Res", "Low Res", "Infusion FAIMS", "Infusion")
rownames(df_matrix_ETDIGVTGGGGQGK) <- c("High Res", "Low Res", "Infusion FAIMS", "Infusion")
library(RColorBrewer)
#cols <- brewer.pal(4,"BrBG")
scaleRYG <- colorRampPalette(c("#F3A5BF","#15688E"), space = "rgb")(100)
pdf("H:/Projects/Proteomics/Zymomona/FAIMS/Figures/FromR/dotProduct_AIEIVDQALDR_ETDIGVTGGGQGK_v2.pdf",height = 10, width = 15)
par(mfrow = c(1,2))
corrplot(df_matrix, type = "lower", method = "color", #col = scaleRYG,
addCoef.col = "white",
tl.col = "black", tl.srt = 45,
cl.lim = c(0,1))
corrplot(df_matrix_ETDIGVTGGGGQGK, type = "lower", method = "color", #col = scaleRYG,
addCoef.col = "white",
tl.col = "black", tl.srt = 45,
cl.lim = c(0,1))
dev.off()
#ggcorrplot(df_matrix, type = "lower", outline.color = "white")
#### SIMilarity Score ####
# df_1 <- High_Res_IspH_AIIEIVDALDR_Spectra
# df_2 <- High_Res_IspH_AIIEIVDALDR_Spectra
SIM_function <- function(df_1, df_2, df_ProteinProspector){
df1_peptide <- peptide_ions_function(df_ProteinProspector,df_1)
df2_peptide <- peptide_ions_function(df_ProteinProspector, df_2)
if(nrow(df1_peptide) >= nrow(df2_peptide)){
spec1 = df1_peptide
spec2 = df2_peptide
}else{
spec1 = df2_peptide
spec2 = df1_peptide
}
sqrt_intensity = vector()
not_included = vector()
for(i in 1:nrow(spec1)){
print(i)
if(is.na(trunc(spec2$m.z[i]))){
print(paste0("i = ",i," Ion NOt here"))
print(spec1$m.z[i])
print(spec2$m.z[i])
}
if(length(which(trunc(spec1$m.z[i]) %in% trunc(spec2$m.z))) != 0){
spec1_intensity = spec1$Intensity[i]
spec2_intensity = spec2$Intensity[which(trunc(spec2$m.z) == trunc(spec1$m.z[i]))]
combined_intensity = sqrt(spec1_intensity*spec2_intensity)
sqrt_intensity = append(sqrt_intensity, combined_intensity)
} else{
print(paste0("i = ",i," Ion NOt here"))
}
}
I_spec1 <- intensity_function(df_ProteinProspector, df_1)
I_spec2 <- intensity_function(df_ProteinProspector, df_2)
numerator = sum(sqrt_intensity)
denomenator = sqrt(sum(I_spec1)*sum(I_spec2))
SIM = numerator/denomenator
return(SIM)
}
SIM_function(High_Res_IspH_AIIEIVDALDR_Spectra,High_Res_IspH_AIIEIVDALDR_Spectra, ProteinProspector_AIEIVQALDR)
SIM_function(High_Res_IspH_AIIEIVDALDR_Spectra,Low_Res_IspH_AIIEIVDALDR_Spectra,ProteinProspector_AIEIVQALDR)
SIM_function(High_Res_IspH_AIIEIVDALDR_Spectra,FAIMS_AIIEIVDALDR_Spectra_RelativeAbundance, ProteinProspector_AIEIVQALDR)
SIM_function(High_Res_IspH_AIIEIVDALDR_Spectra,noFAIMS_AIIEIVDALDR_Spectra_RelativeAbundance)
df_matrix <- matrix(nrow = 4, ncol = 4)
df_matrix_ETDIGVTGGGGQGK <- matrix(nrow = 4, ncol = 4)
for(i in 1:length(df.list)){
df_n <- df.list[[i]]
for (j in 1:length(df.list)) {
SIM_score = SIM_function(df_n,df.list[[j]], ProteinProspector_AIEIVQALDR)
df_matrix[i,j] <- SIM_score
}
}
for(i in 1:length(df.list_ETDIGVTGGGGQGK)){
df_n <- df.list_ETDIGVTGGGGQGK[[i]]
for (j in 1:length(df.list_ETDIGVTGGGGQGK)) {
SIM_score = SIM_function(df_n,df.list_ETDIGVTGGGGQGK[[j]], ProteinProspector_ETDIGVTGGGQGK)
df_matrix_ETDIGVTGGGGQGK[i,j] <- SIM_score
}
}
colnames(df_matrix) <- c("High Res", "Low Res", "Infusion FAIMS", "Infusion")
rownames(df_matrix) <- c("High Res", "Low Res", "Infusion FAIMS", "Infusion")
colnames(df_matrix_ETDIGVTGGGGQGK) <- c("High Res", "Low Res", "Infusion FAIMS", "Infusion")
rownames(df_matrix_ETDIGVTGGGGQGK) <- c("High Res", "Low Res", "Infusion FAIMS", "Infusion")
library(RColorBrewer)
#cols <- brewer.pal(4,"BrBG")
scaleRYG <- colorRampPalette(c("#F3A5BF","#15688E"), space = "rgb")(100)
pdf("H:/Projects/Proteomics/Zymomona/FAIMS/Figures/FromR/SimilarityScore_AIEIVDQALDR_ETDIGVTGGGQGK_v2.pdf",height = 10, width = 15)
par(mfrow = c(1,2))
corrplot(df_matrix, type = "lower", method = "color", #col = scaleRYG,
addCoef.col = "white",
tl.col = "black", tl.srt = 45,
cl.lim = c(0,1))
corrplot(df_matrix_ETDIGVTGGGGQGK, type = "lower", method = "color", #col = scaleRYG,
addCoef.col = "white",
tl.col = "black", tl.srt = 45,
cl.lim = c(0,1))
dev.off()
| /LibraryComparison_DotPoduct.R | no_license | anjitrue/Zymomona | R | false | false | 28,038 | r | library(gridExtra)
library(grid)
library(ggplot2)
library(lattice)
library(corrplot)
library(ggcorrplot)
##### Library Comparison ####
ProteinProspector_AIEIVQALDR <- read.csv("H:/Projects/Proteomics/Zymomona/FAIMS/DataAnalysis/ProteinProspector_AIEIVDQALDR.csv",
header = TRUE, sep = ",", stringsAsFactors = FALSE)
ProteinProspector_ETDIGVTGGGQGK <- read.csv("H:/Projects/Proteomics/Zymomona/FAIMS/DataAnalysis/ProteinProspector_ETDIGVTGGGQGK.csv",
header = TRUE, sep = ",", stringsAsFactors = FALSE)
#Library Spectra
#PROSIT
PROSIT_IspH_AIIEIVDALDR_Spectra <- read.csv("H:/Projects/Proteomics/Zymomona/FAIMS/DataAnalysis/LibraryComparison/PROSIT_LibrarySpectra_AIEIVDQALDR.csv",
header = TRUE, sep = ",", stringsAsFactors = FALSE)
colnames(PROSIT_IspH_AIIEIVDALDR_Spectra) <- c("m.z", "Relative.Abundance")
PROSIT_IspG_ETDIGVTGGGGQGK_Spectra <- read.csv("H:/Projects/Proteomics/Zymomona/FAIMS/DataAnalysis/LibraryComparison/PROSIT_LibrarySpectra_ETDIGVTGGGGQGK.csv",
header = TRUE, sep = ",", stringsAsFactors = FALSE)
colnames(PROSIT_IspG_ETDIGVTGGGGQGK_Spectra) <- c("m.z", "Relative.Abundance")
#### HIGH_Res data upload ####
High_Res_IspH_AIIEIVDALDR_Spectra <- read.csv("H:/Projects/Proteomics/Zymomona/FAIMS/DataAnalysis/LibraryComparison/HighRes_Intensity_LibrarySpectra_AIEIVDQALDR.csv",
header = TRUE, sep = ",", stringsAsFactors = FALSE)
High_Res_IspH_AIIEIVDALDR_Spectra.RelativeAbundance <- High_Res_IspH_AIIEIVDALDR_Spectra
High_Res_IspH_AIIEIVDALDR_Spectra.RelativeAbundance$Relative.Abundance <- High_Res_IspH_AIIEIVDALDR_Spectra.RelativeAbundance$Intensity/(max(High_Res_IspH_AIIEIVDALDR_Spectra.RelativeAbundance$Intensity))
colnames(High_Res_IspH_AIIEIVDALDR_Spectra.RelativeAbundance) <- c("m.z", "Intensity", "Relative.Abundance")
High_Res_IspG_ETDIGVTGGGGQGK_Spectra <- read.csv("H:/Projects/Proteomics/Zymomona/FAIMS/DataAnalysis/LibraryComparison/HighRes_LibrarySpectra_ETDIGVTGGGGQGK.csv",
header = TRUE, sep = ",", stringsAsFactors = FALSE)
High_Res_IspG_ETDIGVTGGGGQGK_Spectra.RelativeAbundance = High_Res_IspG_ETDIGVTGGGGQGK_Spectra
High_Res_IspG_ETDIGVTGGGGQGK_Spectra.RelativeAbundance$Relative.Abundance <- High_Res_IspG_ETDIGVTGGGGQGK_Spectra.RelativeAbundance$Intensity/(max(High_Res_IspG_ETDIGVTGGGGQGK_Spectra.RelativeAbundance$Intensity))
colnames(High_Res_IspG_ETDIGVTGGGGQGK_Spectra.RelativeAbundance) <- c("m.z", "Intensity", "Relative.Abundance")
#### LOW_Res data upload ####
Low_Res_IspH_AIIEIVDALDR_Spectra <- read.csv("H:/Projects/Proteomics/Zymomona/FAIMS/DataAnalysis/LibraryComparison/LowRes_LibrarySpectra_AIEIVDQALDR.csv",
header = TRUE, sep = ",", stringsAsFactors = FALSE)
Low_Res_IspH_AIIEIVDALDR_Spectra.RelaiveAbundance <- Low_Res_IspH_AIIEIVDALDR_Spectra
Low_Res_IspH_AIIEIVDALDR_Spectra.RelaiveAbundance$Relative.Abundance <- Low_Res_IspH_AIIEIVDALDR_Spectra.RelaiveAbundance$Intensity/(max(Low_Res_IspH_AIIEIVDALDR_Spectra.RelaiveAbundance$Intensity))
colnames(Low_Res_IspH_AIIEIVDALDR_Spectra.RelaiveAbundance) <- c("m.z", "Intensity", "Relative.Abundance")
Low_Res_IspG_ETDIGVTGGGGQGK_Spectra <- read.csv("H:/Projects/Proteomics/Zymomona/FAIMS/DataAnalysis/LibraryComparison/LowRes_LibrarySpectra_ETDIGVTGGGGQGK.csv",
header = TRUE, sep = ",", stringsAsFactors = FALSE)
Low_Res_IspG_ETDIGVTGGGGQGK_Spectra.RelativeAbundance <- Low_Res_IspG_ETDIGVTGGGGQGK_Spectra
Low_Res_IspG_ETDIGVTGGGGQGK_Spectra.RelativeAbundance$Relative.Abundance <- Low_Res_IspG_ETDIGVTGGGGQGK_Spectra.RelativeAbundance$Intensity/(max(Low_Res_IspG_ETDIGVTGGGGQGK_Spectra.RelativeAbundance$Intensity))
colnames(Low_Res_IspG_ETDIGVTGGGGQGK_Spectra.RelativeAbundance) <- c("m.z", "Intensity", "Relative.Abundance")
ggplot(PROSIT_IspH_AIIEIVDALDR_Spectra, aes(x=m.z, y= Relative.Abundance)) +
#geom_line()+
geom_linerange(aes(x=m.z, ymax=Relative.Abundance, ymin=0),
position = position_jitter(height = 0L, seed = 1L))+
xlim(150,1250)+
theme_classic()+
labs(title = "PROSIT AIEIVDQDALDR Spectra", y = "Relative Abundance", x ="m/z")
ggplot(PROSIT_IspG_ETDIGVTGGGGQGK_Spectra, aes(x=m.z, y= Relative.Abundance)) +
#geom_line()+
geom_linerange(aes(x=m.z, ymax=Relative.Abundance, ymin=0),
position = position_jitter(height = 0L, seed = 1L))+
xlim(150,1250)+
theme_classic()+
labs(title = "PROSIT ETDIGVTGGGGQGK Spectra", y = "Relative Abundance", x ="m/z")
##################################################
# df_library = ProteinProspector_ETDIGVTGGGQGK
# df = FAIMS_ETDIGVTGGGQGK_Spectra_RelativeAbundance
spectra_plotting <- function(df_library, df){
# extract the library peptide ions into object fragment
fragment <- numeric()
for (i in 1:nrow(df_library)){
if(df_library[i,2] != 0 ){
fragment <- append(fragment,as.numeric(df_library[i,1]))
}
}
# Print the name of the dataframe that will be compared to library peptides
print(deparse(substitute(df)))
# Create an empty vector to put the transitions that were found in the experimental spectra
transitions <- numeric()
# Ion comparison has to be exact, there for I will truncate m/z values before the decimal
# and then turn them into characters for proper comparison
df.mass.to.charge.char <- as.character(trunc(df$m.z))
# iterate through library ions - fragment vector
for(i in 1:length(fragment)){
# truncate and turn fragment ion into a character
frag_char <- as.character(trunc(fragment[i]))
# assign a new variable for exact pattern matching
pat <- paste0("^",frag_char,"$")
# append matched ions to transition vector
# transitions object will be populated with the index of where the match was found
transitions <- append(transitions,grep(pat,
df.mass.to.charge.char))
}
# extract the m/z values using the index were pattern matching was found
mass.to.charge <- df$m.z[transitions]
print(mass.to.charge)
# reduce to only the unique m/z
unique.m.t.c <- unique(trunc(mass.to.charge))
final_list <- vector()
# The following will be used to match the m/z based on intensity. If there is more than one m/z
# value being compared, the m/z with the greatest intensity is chosen.
for(i in 1:length(unique.m.t.c)){
comparison_intensity <- which(trunc(mass.to.charge) %in% unique.m.t.c[i])
if(length(comparison_intensity) <= 1){
keep <- mass.to.charge[comparison_intensity]
final_list <- append(final_list,keep)
print(final_list)
}else if(length(comparison_intensity) > 1){
keep.intensity <- max(df$Intensity[which(df$m.z %in% mass.to.charge[comparison_intensity])])
keep <- df$m.z[which(df$Intensity == keep.intensity)]
final_list <- append(final_list, keep)
print(final_list)
}
}
# Differentiate between the ions that are associatted to the peptide and those that are background.
# Color the peptide ions red and the background black
ion_type <- vector()
for (i in 1:nrow(df)) {
if(length(which(i %in% which(df$m.z %in% final_list))) == 1){
ion_type <- append(ion_type, "peptide")
}else{
ion_type <- append(ion_type, "background")
}
}
background <- df[grep("background",ion_type),]
peptide <- df[grep("peptide",ion_type),]
# produce the ggplot that will be returned with the function
spectra <- ggplot(background, aes(x=m.z, y= Relative.Abundance)) +
#geom_line()+
geom_linerange(aes(x=m.z, ymax=Relative.Abundance, ymin=0),
position = position_jitter(height = 0L, seed = 1L))+
geom_linerange(data = peptide,
aes(x=m.z, ymax = Relative.Abundance, ymin =0),
position = position_jitter(height = 0L, seed = 1L), color = "red")+
xlim(150,1250)+
theme_classic()
return <- spectra
}
##### Plot spectra #####
prosit_AIEIVDQALDR <- spectra_plotting(ProteinProspector_AIEIVQALDR, PROSIT_IspH_AIIEIVDALDR_Spectra)
PA <- prosit_AIEIVDQALDR + labs(title = "Prosit Theoretical Spectra - AIEVDQALDR", y = "Relative Abundance", x = "m/z")
high_res_AIEIDVDQDALDR <- spectra_plotting(ProteinProspector_AIEIVQALDR, High_Res_IspH_AIIEIVDALDR_Spectra.RelativeAbundance)
HA <- high_res_AIEIDVDQDALDR + labs(title = "High Resolution AIEIVDQDALDR Spectra", y = "Relative Abundance", x ="m/z")
low_res_AIEIDVDQDALDR <- spectra_plotting(ProteinProspector_AIEIVQALDR, Low_Res_IspH_AIIEIVDALDR_Spectra.RelaiveAbundance)
LA <- low_res_AIEIDVDQDALDR + labs(title = "Low Resolution AIEIVDQDALDR Spectra", y = "Relative Abundance", x ="m/z")
#labs(title = "Low Resolution ETDIGVTGGGGQGK Spectra", y = "Relative Abundance", x ="m/z")
prosit_ETDIGVTGGGGQGK <- spectra_plotting(ProteinProspector_ETDIGVTGGGQGK, PROSIT_IspG_ETDIGVTGGGGQGK_Spectra)
PE <- prosit_ETDIGVTGGGGQGK + labs(title = "Prosit Theoretical Spectra - ETDIGVTGGGQGK", y= "Relative Abundance", x="m/z")
high_res_ETDIGVTGGGGQGK <- spectra_plotting(ProteinProspector_ETDIGVTGGGQGK, High_Res_IspG_ETDIGVTGGGGQGK_Spectra.RelativeAbundance)
HE <- high_res_ETDIGVTGGGGQGK + labs(title = "High Resolution ETDIGVTGGGGQGK Spectra", y = "Relative Abundance", x ="m/z")
low_res_ETDIGVTGGGGQGK <- spectra_plotting(PROSIT_IspG_ETDIGVTGGGGQGK_Spectra, Low_Res_IspG_ETDIGVTGGGGQGK_Spectra.RelativeAbundance)
LE <- low_res_ETDIGVTGGGGQGK + labs(title = "Low Resolution ETDIGVTGGGGQGK Spectra", y = "Relative Abundance", x ="m/z")
##### Infusion Comparison to Library ######
# The following functin infusion_spectra_prep is meant to format the infusion spectra for generating
# the spectral plots
# Infusion_df <- noFAIMS_AIIEIVDALDR_Spectra
# max_Intensity <- 200000
infusion_spectra_prep <- function(Infusion_df, max_Intensity){
colnames(Infusion_df) <- c("m.z", "Intensity")
row_sub <- apply(Infusion_df, 1, function(row) all(row !=0))
Infusion_df <- Infusion_df[row_sub,]
Infusion_df <- Infusion_df[order(Infusion_df$m.z),]
Infusion_df_RelativeAbundance <- Infusion_df[(Infusion_df$Intensity < max_Intensity),]
Infusion_df_RelativeAbundance$Relative.Abundance <- Infusion_df_RelativeAbundance$Intensity/(max(Infusion_df_RelativeAbundance$Intensity))
return(Infusion_df_RelativeAbundance)
}
##### Infusion data upload #####
#AIEIVDQALDR
#FAIMS
FAIMS_AIIEIVDALDR_Spectra <- read.csv("H:/Projects/Proteomics/Zymomona/FAIMS/DataAnalysis/LibraryComparison/20200126_ZymoFAIMS_IW2_R500K_MI502_AGC1e06_2.csv",
header = TRUE, sep = ",", stringsAsFactors = FALSE)
FAIMS_AIIEIVDALDR_Spectra_RelativeAbundance <- infusion_spectra_prep(FAIMS_AIIEIVDALDR_Spectra, 200000)
FAIMS_AIEIDVDQDALDR <- spectra_plotting(ProteinProspector_AIEIVQALDR, FAIMS_AIIEIVDALDR_Spectra_RelativeAbundance)
A1 <- FAIMS_AIEIDVDQDALDR + ylim(0,1) + labs(title = "FAIMS AIEIVDQDALDR Spectra", y = "Relative Abundance", x ="m/z")
#with out FAIMS
noFAIMS_AIIEIVDALDR_Spectra <- read.csv("H:/Projects/Proteomics/Zymomona/FAIMS/DataAnalysis/LibraryComparison/20200125_Zymo_IW2_R500K_MI502_AGC1e06_1.csv",
header = TRUE, sep = ",", stringsAsFactors = FALSE)
noFAIMS_AIIEIVDALDR_Spectra_RelativeAbundance <- infusion_spectra_prep(noFAIMS_AIIEIVDALDR_Spectra, 200000)
noFAIMS_AIEIDVDQDALDR <- spectra_plotting(ProteinProspector_AIEIVQALDR, noFAIMS_AIIEIVDALDR_Spectra_RelativeAbundance)
A2 <- noFAIMS_AIEIDVDQDALDR + ylim(0,1) + labs(title = "no FAIMS AIEIVDQDALDR Spectra", y = "Relative Abundance", x ="m/z")
# ETDIGVTGGGQGK #
#FAIMS
FAIMS_ETDIGVTGGGQGK_Spectra <- read.csv("H:/Projects/Proteomics/Zymomona/FAIMS/DataAnalysis/LibraryComparison/20200126_ZymoFAIMS_IW2_R500K_MI502_AGC1e06_2_ETDIGVTGGGQGK.csv",
header = TRUE, sep = ",", stringsAsFactors = FALSE)
FAIMS_ETDIGVTGGGQGK_Spectra_RelativeAbundance <- infusion_spectra_prep(FAIMS_ETDIGVTGGGQGK_Spectra, 200000)
FAIMS_ETDIGVTGGGQGK <- spectra_plotting(ProteinProspector_ETDIGVTGGGQGK, FAIMS_ETDIGVTGGGQGK_Spectra_RelativeAbundance)
E1 <- FAIMS_ETDIGVTGGGQGK + ylim(0,1) + labs(title = "FAIMS ETDIGVTGGGQGK Spectra", y = "Relative Abundance", x ="m/z")
#with out FAIMS
noFAIMS_ETDIGVTGGGQGK_Spectra <- read.csv("H:/Projects/Proteomics/Zymomona/FAIMS/DataAnalysis/LibraryComparison/20200125_Zymo_IW2_R500K_MI502_AGC1e06_1_ETDIGVTGGGQGK.csv",
header = TRUE, sep = ",", stringsAsFactors = FALSE)
noFAIMS_ETDIGVTGGGQGK_Spectra_RelativeAbundance <- infusion_spectra_prep(noFAIMS_ETDIGVTGGGQGK_Spectra, 200000)
noFAIMS_ETDIGVTGGGGQGK <- spectra_plotting(PROSIT_IspG_ETDIGVTGGGGQGK_Spectra, noFAIMS_ETDIGVTGGGQGK_Spectra_RelativeAbundance)
E2 <- noFAIMS_ETDIGVTGGGGQGK + ylim(0,1) + labs(title = "no FAIMS ETDIGVTGGGQGK Spectra", y = "Relative Abundance", x ="m/z")
# Use the ggplotGrob function to prepare plots to be arranged for pdf printing
PAgrob <- ggplotGrob(PA)
HAgrob <- ggplotGrob(HA)
LAgrob <- ggplotGrob(LA)
PEgrob <- ggplotGrob(PE)
HEgrob <- ggplotGrob(HE)
LEgrob <- ggplotGrob(LE)
A1grob <- ggplotGrob(A1)
A2grob <- ggplotGrob(A2)
E1grob <- ggplotGrob(E1)
E2grob <- ggplotGrob(E2)
grid.arrange(PAgrob, PEgrob, HAgrob, HEgrob, LAgrob, LEgrob, ncol =2)
grid.arrange(A1grob, E1grob, A2grob, E2grob, ncol = 2)
pdf("H:/Projects/Proteomics/Zymomona/FAIMS/Figures/FromR/SpectraComparisons_PROSIT_HIGHRES_LOWRES.pdf")
grid.arrange(PAgrob, PEgrob, HAgrob, HEgrob, LAgrob, LEgrob, ncol =2)
dev.off()
ggsave("H:/Projects/Proteomics/Zymomona/FAIMS/Figures/FromR/SpectraComparisons_PROSIT_HIGHRES_LOWRES.pdf",
grid.arrange(PAgrob, PEgrob, HAgrob, HEgrob, LAgrob, LEgrob, ncol =2),
width = 10,
height = 11.5,
units = "in")
ggsave("H:/Projects/Proteomics/Zymomona/FAIMS/Figures/FromR/SpectraComparisons_FAIMS_noFAIMS.pdf",
grid.arrange(A1grob, E1grob, A2grob, E2grob, ncol =2),
width = 10,
height = 8.5,
units = "in")
ggsave("H:/Projects/Proteomics/Zymomona/FAIMS/Figures/FromR/SpectraComparisons_all.pdf",
grid.arrange(PAgrob, PEgrob, HAgrob, HEgrob, LAgrob, LEgrob, A1grob, E1grob, A2grob, E2grob, ncol =2),
width = 10,
height = 14.5,
units = "in")
#### TIC Explained ######
# To explain the TIC associated to the peptide fragments the function tic_explained_function
# df_proteinProspect <- ProteinProspector_AIEIVQALDR
# df <- FAIMS_AIIEIVDALDR_Spectra_RelativeAbundance
tic_explained_function <- function(df_proteinProspect, df){
transitions <- numeric()
df.mass.to.charge.char <- as.character(trunc(df_proteinProspect$mass.to.charge))
for(i in 1:length(df.mass.to.charge.char)){
frag_char <- df.mass.to.charge.char[i]
pat <- paste0("^",frag_char,"$")
transitions <- append(transitions,grep(pat,as.character(trunc(df$m.z))))
}
mass.to.charge <- df$m.z[transitions]
print(mass.to.charge)
unique.m.t.c <- unique(trunc(mass.to.charge))
final_list <- vector()
for(i in 1:length(unique.m.t.c)){
comparison_intensity <- which(trunc(mass.to.charge) %in% unique.m.t.c[i])
if(length(comparison_intensity) <= 1){
keep <- mass.to.charge[comparison_intensity]
final_list <- append(final_list,keep)
print(final_list)
}else if(length(comparison_intensity) > 1){
keep.intensity <- max(df$Intensity[which(df$m.z %in% mass.to.charge[comparison_intensity])])
keep <- df$m.z[which(df$Intensity == keep.intensity)]
final_list <- append(final_list, keep)
print(final_list)
}
}
intensity_peptide <- df$Intensity[which(df$m.z %in% final_list)]
intensity_peptide_sum <- sum(intensity_peptide)
intensity_TIC <- sum(df$Intensity)
Tic_explained <- intensity_peptide_sum/intensity_TIC*100
return(Tic_explained)
}
Tic_explained_High_Res_AIEIDVQALDR <- tic_explained_function(ProteinProspector_AIEIVQALDR, High_Res_IspH_AIIEIVDALDR_Spectra)
Tic_explained_Low_Res_AIEIDVQALDR <- tic_explained_function(ProteinProspector_AIEIVQALDR,Low_Res_IspH_AIIEIVDALDR_Spectra)
Tic_explained_High_Res_ETDIGVTGGGQGK <- tic_explained_function(ProteinProspector_ETDIGVTGGGQGK, High_Res_IspG_ETDIGVTGGGGQGK_Spectra)
Tic_explained_Low_Res_ETDIGVTGGGQGK <- tic_explained_function(ProteinProspector_ETDIGVTGGGQGK, Low_Res_IspG_ETDIGVTGGGGQGK_Spectra)
### AIEIVDQALDR ###
colnames(FAIMS_AIIEIVDALDR_Spectra_RelativeAbundance) <- c("m.z", "Intensity", "Relative.Abundance")
Tic_explained_FAIMS_AIEIDVQALDR <- tic_explained_function(ProteinProspector_AIEIVQALDR,FAIMS_AIIEIVDALDR_Spectra_RelativeAbundance)
colnames(noFAIMS_AIIEIVDALDR_Spectra_RelativeAbundance) <- c("m.z", "Intensity", "Relative.Abundance")
Tic_explained_noFAIMS_AIEIDVQALDR <- tic_explained_function(ProteinProspector_AIEIVQALDR, noFAIMS_AIIEIVDALDR_Spectra_RelativeAbundance)
### ETDIGVTGGGQGK ###
colnames(FAIMS_ETDIGVTGGGQGK_Spectra_RelativeAbundance) <- c("m.z", "Intensity", "Relative.Abundance")
Tic_explained_FAIMS_ETDIGVTGGGQGK <- tic_explained_function(ProteinProspector_ETDIGVTGGGQGK,FAIMS_ETDIGVTGGGQGK_Spectra_RelativeAbundance)
colnames(noFAIMS_ETDIGVTGGGQGK_Spectra_RelativeAbundance) <- c("m.z", "Intensity", "Relative.Abundance")
Tic_explained_noFAIMS_ETDIGVTGGGQGK <- tic_explained_function(ProteinProspector_ETDIGVTGGGQGK, noFAIMS_ETDIGVTGGGQGK_Spectra_RelativeAbundance)
##### Dot Product ####
# df_proteinProspect <- ProteinProspector_AIEIVQALDR
# df <- High_Res_IspH_AIIEIVDALDR_Spectra
peptide_ions_function <- function(df_proteinProspect, df){
transitions <- numeric()
df.mass.to.charge.char <- as.character(trunc(df_proteinProspect$mass.to.charge))
for(i in 1:length(df.mass.to.charge.char)){
frag_char <- df.mass.to.charge.char[i]
pat <- paste0("^",frag_char,"$")
transitions <- append(transitions,grep(pat,as.character(trunc(df$m.z))))
}
mass.to.charge <- df$m.z[transitions]
#print(mass.to.charge)
unique.m.t.c <- unique(trunc(mass.to.charge))
final_list <- vector()
for(i in 1:length(unique.m.t.c)){
comparison_intensity <- which(trunc(mass.to.charge) %in% unique.m.t.c[i])
if(length(comparison_intensity) <= 1){
keep <- mass.to.charge[comparison_intensity]
final_list <- append(final_list,keep)
#print(final_list)
}else if(length(comparison_intensity) > 1){
keep.intensity <- max(df$Intensity[which(df$m.z %in% mass.to.charge[comparison_intensity])])
keep <- df$m.z[which(df$Intensity == keep.intensity)]
final_list <- append(final_list, keep)
#print(final_list)
}
}
peptide <- df[which(df$m.z %in% final_list),]
return(peptide)
}
# df_proteinProspect <- ProteinProspector_AIEIVQALDR
# df <- High_Res_IspH_AIIEIVDALDR_Spectra
intensity_function <- function(df_proteinProspect, df){
transitions <- numeric()
df.mass.to.charge.char <- as.character(trunc(df_proteinProspect$mass.to.charge))
for(i in 1:length(df.mass.to.charge.char)){
frag_char <- df.mass.to.charge.char[i]
pat <- paste0("^",frag_char,"$")
transitions <- append(transitions,grep(pat,as.character(trunc(df$m.z))))
}
mass.to.charge <- df$m.z[transitions]
#print(mass.to.charge)
unique.m.t.c <- unique(trunc(mass.to.charge))
final_list <- vector()
for(i in 1:length(unique.m.t.c)){
comparison_intensity <- which(trunc(mass.to.charge) %in% unique.m.t.c[i])
if(length(comparison_intensity) <= 1){
keep <- mass.to.charge[comparison_intensity]
final_list <- append(final_list,keep)
#print(final_list)
}else if(length(comparison_intensity) > 1){
keep.intensity <- max(df$Intensity[which(df$m.z %in% mass.to.charge[comparison_intensity])])
keep <- df$m.z[which(df$Intensity == keep.intensity)]
final_list <- append(final_list, keep)
#print(final_list)
}
}
intensity_peptide <- df$Intensity[which(df$m.z %in% final_list)]
#intensity_peptide_sum <- sum(intensity_peptide)
return(intensity_peptide)
}
# df_1 <- High_Res_IspG_ETDIGVTGGGGQGK_Spectra
# df_2 <- noFAIMS_ETDIGVTGGGQGK_Spectra_RelativeAbundance
# df_ProteinProspector <- ProteinProspector_ETDIGVTGGGQGK
DP_function <- function(df_1, df_2, df_ProteinProspector){
df1_peptide <- peptide_ions_function(df_ProteinProspector,df_1)
df2_peptide <- peptide_ions_function(df_ProteinProspector, df_2)
if(nrow(df1_peptide) >= nrow(df2_peptide)){
spec1 = df1_peptide
spec2 = df2_peptide
}else{
spec1 = df2_peptide
spec2 = df1_peptide
}
sum_intensity_combined = vector()
not_included = vector()
for(i in 1:nrow(spec1)){
print(i)
if(is.na(trunc(spec2$m.z[i]))){
print(paste0("i = ",i," Ion NOt here"))
print(spec1$m.z[i])
print(spec2$m.z[i])
}
if(length(which(trunc(spec1$m.z[i]) %in% trunc(spec2$m.z))) != 0){
spec1_intensity = spec1$Intensity[i]
spec2_intensity = spec2$Intensity[which(trunc(spec2$m.z) == trunc(spec1$m.z[i]))]
combined_intensity = spec1_intensity*spec2_intensity
sum_intensity_combined = append(sum_intensity_combined, combined_intensity)
print(sum_intensity_combined)
} else{
print(paste0("i = ",i," Ion NOt here"))
}
}
I_spec1 <- intensity_function(df_ProteinProspector, df_1)
I_spec2 <- intensity_function(df_ProteinProspector, df_2)
numerator <- sum(sum_intensity_combined)
denomenator <- sum(I_spec1^2)*sum(I_spec2^2)
DP <- numerator/sqrt(denomenator)
return(DP)
}
DP_function(High_Res_IspH_AIIEIVDALDR_Spectra,noFAIMS_AIIEIVDALDR_Spectra_RelativeAbundance, ProteinProspector_AIEIVQALDR)
DP_function(High_Res_IspG_ETDIGVTGGGGQGK_Spectra,noFAIMS_ETDIGVTGGGQGK_Spectra_RelativeAbundance, ProteinProspector_ETDIGVTGGGQGK)
df.list <- list(High_Res_IspH_AIIEIVDALDR_Spectra, Low_Res_IspH_AIIEIVDALDR_Spectra,
FAIMS_AIIEIVDALDR_Spectra_RelativeAbundance, noFAIMS_AIIEIVDALDR_Spectra_RelativeAbundance)
df.list_ETDIGVTGGGGQGK <- list(High_Res_IspH_ETDIGVTGGGGQGK_Spectra, Low_Res_IspH_ETDIGVTGGGGQGK_Spectra,
FAIMS_ETDIGVTGGGQGK_Spectra_RelativeAbundance, noFAIMS_ETDIGVTGGGQGK_Spectra_RelativeAbundance)
colnames(df.list_ETDIGVTGGGGQGK[[1]]) <- c("m.z","Intensity")
colnames(df.list_ETDIGVTGGGGQGK[[2]]) <- c("m.z","Intensity")
df_matrix <- matrix(nrow = 4, ncol = 4)
df_matrix_ETDIGVTGGGGQGK <- matrix(nrow = 4, ncol = 4)
for(i in 1:length(df.list)){
df_n <- df.list[[i]]
for (j in 1:length(df.list)) {
dot_product = DP_function(df_n,df.list[[j]],ProteinProspector_AIEIVQALDR)
df_matrix[i,j] <- dot_product
}
}
for(i in 1:length(df.list_ETDIGVTGGGGQGK)){
df_n <- df.list_ETDIGVTGGGGQGK[[i]]
for (j in 1:length(df.list_ETDIGVTGGGGQGK)) {
dot_product = DP_function(df_n,df.list_ETDIGVTGGGGQGK[[j]], ProteinProspector_ETDIGVTGGGQGK)
df_matrix_ETDIGVTGGGGQGK[i,j] <- dot_product
}
}
colnames(df_matrix) <- c("High Res", "Low Res", "Infusion FAIMS", "Infusion")
rownames(df_matrix) <- c("High Res", "Low Res", "Infusion FAIMS", "Infusion")
colnames(df_matrix_ETDIGVTGGGGQGK) <- c("High Res", "Low Res", "Infusion FAIMS", "Infusion")
rownames(df_matrix_ETDIGVTGGGGQGK) <- c("High Res", "Low Res", "Infusion FAIMS", "Infusion")
library(RColorBrewer)
#cols <- brewer.pal(4,"BrBG")
scaleRYG <- colorRampPalette(c("#F3A5BF","#15688E"), space = "rgb")(100)
pdf("H:/Projects/Proteomics/Zymomona/FAIMS/Figures/FromR/dotProduct_AIEIVDQALDR_ETDIGVTGGGQGK_v2.pdf",height = 10, width = 15)
par(mfrow = c(1,2))
corrplot(df_matrix, type = "lower", method = "color", #col = scaleRYG,
addCoef.col = "white",
tl.col = "black", tl.srt = 45,
cl.lim = c(0,1))
corrplot(df_matrix_ETDIGVTGGGGQGK, type = "lower", method = "color", #col = scaleRYG,
addCoef.col = "white",
tl.col = "black", tl.srt = 45,
cl.lim = c(0,1))
dev.off()
#ggcorrplot(df_matrix, type = "lower", outline.color = "white")
#### SIMilarity Score ####
# df_1 <- High_Res_IspH_AIIEIVDALDR_Spectra
# df_2 <- High_Res_IspH_AIIEIVDALDR_Spectra
SIM_function <- function(df_1, df_2, df_ProteinProspector){
df1_peptide <- peptide_ions_function(df_ProteinProspector,df_1)
df2_peptide <- peptide_ions_function(df_ProteinProspector, df_2)
if(nrow(df1_peptide) >= nrow(df2_peptide)){
spec1 = df1_peptide
spec2 = df2_peptide
}else{
spec1 = df2_peptide
spec2 = df1_peptide
}
sqrt_intensity = vector()
not_included = vector()
for(i in 1:nrow(spec1)){
print(i)
if(is.na(trunc(spec2$m.z[i]))){
print(paste0("i = ",i," Ion NOt here"))
print(spec1$m.z[i])
print(spec2$m.z[i])
}
if(length(which(trunc(spec1$m.z[i]) %in% trunc(spec2$m.z))) != 0){
spec1_intensity = spec1$Intensity[i]
spec2_intensity = spec2$Intensity[which(trunc(spec2$m.z) == trunc(spec1$m.z[i]))]
combined_intensity = sqrt(spec1_intensity*spec2_intensity)
sqrt_intensity = append(sqrt_intensity, combined_intensity)
} else{
print(paste0("i = ",i," Ion NOt here"))
}
}
I_spec1 <- intensity_function(df_ProteinProspector, df_1)
I_spec2 <- intensity_function(df_ProteinProspector, df_2)
numerator = sum(sqrt_intensity)
denomenator = sqrt(sum(I_spec1)*sum(I_spec2))
SIM = numerator/denomenator
return(SIM)
}
SIM_function(High_Res_IspH_AIIEIVDALDR_Spectra,High_Res_IspH_AIIEIVDALDR_Spectra, ProteinProspector_AIEIVQALDR)
SIM_function(High_Res_IspH_AIIEIVDALDR_Spectra,Low_Res_IspH_AIIEIVDALDR_Spectra,ProteinProspector_AIEIVQALDR)
SIM_function(High_Res_IspH_AIIEIVDALDR_Spectra,FAIMS_AIIEIVDALDR_Spectra_RelativeAbundance, ProteinProspector_AIEIVQALDR)
SIM_function(High_Res_IspH_AIIEIVDALDR_Spectra,noFAIMS_AIIEIVDALDR_Spectra_RelativeAbundance)
df_matrix <- matrix(nrow = 4, ncol = 4)
df_matrix_ETDIGVTGGGGQGK <- matrix(nrow = 4, ncol = 4)
for(i in 1:length(df.list)){
df_n <- df.list[[i]]
for (j in 1:length(df.list)) {
SIM_score = SIM_function(df_n,df.list[[j]], ProteinProspector_AIEIVQALDR)
df_matrix[i,j] <- SIM_score
}
}
for(i in 1:length(df.list_ETDIGVTGGGGQGK)){
df_n <- df.list_ETDIGVTGGGGQGK[[i]]
for (j in 1:length(df.list_ETDIGVTGGGGQGK)) {
SIM_score = SIM_function(df_n,df.list_ETDIGVTGGGGQGK[[j]], ProteinProspector_ETDIGVTGGGQGK)
df_matrix_ETDIGVTGGGGQGK[i,j] <- SIM_score
}
}
colnames(df_matrix) <- c("High Res", "Low Res", "Infusion FAIMS", "Infusion")
rownames(df_matrix) <- c("High Res", "Low Res", "Infusion FAIMS", "Infusion")
colnames(df_matrix_ETDIGVTGGGGQGK) <- c("High Res", "Low Res", "Infusion FAIMS", "Infusion")
rownames(df_matrix_ETDIGVTGGGGQGK) <- c("High Res", "Low Res", "Infusion FAIMS", "Infusion")
library(RColorBrewer)
#cols <- brewer.pal(4,"BrBG")
scaleRYG <- colorRampPalette(c("#F3A5BF","#15688E"), space = "rgb")(100)
pdf("H:/Projects/Proteomics/Zymomona/FAIMS/Figures/FromR/SimilarityScore_AIEIVDQALDR_ETDIGVTGGGQGK_v2.pdf",height = 10, width = 15)
par(mfrow = c(1,2))
corrplot(df_matrix, type = "lower", method = "color", #col = scaleRYG,
addCoef.col = "white",
tl.col = "black", tl.srt = 45,
cl.lim = c(0,1))
corrplot(df_matrix_ETDIGVTGGGGQGK, type = "lower", method = "color", #col = scaleRYG,
addCoef.col = "white",
tl.col = "black", tl.srt = 45,
cl.lim = c(0,1))
dev.off()
|
test_that("ECV_SS Works", {
Lambda <- matrix(c( 0, .82, .10, 0,
0, .77, .35, 0,
0, .79, .32, 0,
0, .66, .39, 0,
0, .51, 0, .71,
0, .56, 0, .43,
0, .68, 0, .13,
0, .60, 0, .50,
.47, .83, 0, 0,
.27, .60, 0, 0,
.28, .78, 0, 0,
.75, .55, 0, 0),
ncol = 4, byrow = TRUE)
ECV_SS_Expected <- c(.3229228, .7138154, .1426991, .4064201)
## First with no names
expect_equal(ECV_SS(Lambda), ECV_SS_Expected, tolerance = .000001)
## Now with names
colnames(Lambda) = c("A", "B", "C", "D")
names(ECV_SS_Expected) = c("A", "B", "C", "D")
expect_equal(ECV_SS(Lambda), ECV_SS_Expected, tolerance = .000001)
## Now let's do one with no general factor
Lambda2 <- matrix(c(.3, 0, 0,
.4, 0, 0,
.5, 0, 0,
0, .4, 0,
0, .6, 0,
0, .7, 0,
0, 0, .4,
0, 0, .5,
0, 0, .3),
ncol = 3, byrow = TRUE)
ECV_SS_Expected2 <- c(1, 1, 1)
expect_equal(ECV_SS(Lambda2), ECV_SS_Expected2, tolerance = .000001)
})
test_that("ECV_SG Works", {
Lambda <- matrix(c( 0, .82, .10, 0,
0, .77, .35, 0,
0, .79, .32, 0,
0, .66, .39, 0,
0, .51, 0, .71,
0, .56, 0, .43,
0, .68, 0, .13,
0, .60, 0, .50,
.47, .83, 0, 0,
.27, .60, 0, 0,
.28, .78, 0, 0,
.75, .55, 0, 0),
ncol = 4, byrow = TRUE)
ECV_SG_Expected <- c(.11744676, .71381542, .04862725, .12011057)
## First with no names
expect_equal(ECV_SG(Lambda), ECV_SG_Expected, tolerance = .000001)
colnames(Lambda) = c("A", "B", "C", "D")
names(ECV_SG_Expected) = c("A", "B", "C", "D")
## Now with names
expect_equal(ECV_SG(Lambda), ECV_SG_Expected, tolerance = .000001)
## Now let's do one with no general factor
Lambda2 <- matrix(c(.3, 0, 0,
.4, 0, 0,
.5, 0, 0,
0, .4, 0,
0, .6, 0,
0, .7, 0,
0, 0, .4,
0, 0, .5,
0, 0, .3),
ncol = 3, byrow = TRUE)
expect_equal(ECV_SG(Lambda2), NULL)
})
test_that("ECV_GS Works", {
Lambda <- matrix(c( 0, .82, .10, 0,
0, .77, .35, 0,
0, .79, .32, 0,
0, .66, .39, 0,
0, .51, 0, .71,
0, .56, 0, .43,
0, .68, 0, .13,
0, .60, 0, .50,
.47, .83, 0, 0,
.27, .60, 0, 0,
.28, .78, 0, 0,
.75, .55, 0, 0),
ncol = 4, byrow = TRUE)
ECV_GS_Expected <- c(.6770772, .71381542, .8573009, .5935799)
## First with no names
expect_equal(ECV_GS(Lambda), ECV_GS_Expected, tolerance = .000001)
colnames(Lambda) = c("A", "B", "C", "D")
names(ECV_GS_Expected) = c("A", "B", "C", "D")
## Now with names
expect_equal(ECV_GS(Lambda), ECV_GS_Expected, tolerance = .000001)
## Now let's do one with no general factor
Lambda2 <- matrix(c(.3, 0, 0,
.4, 0, 0,
.5, 0, 0,
0, .4, 0,
0, .6, 0,
0, .7, 0,
0, 0, .4,
0, 0, .5,
0, 0, .3),
ncol = 3, byrow = TRUE)
expect_equal(ECV_GS(Lambda2), NULL)
})
test_that("IECV Works", {
Lambda <- matrix(c( 0, .82, .10, 0,
0, .77, .35, 0,
0, .79, .32, 0,
0, .66, .39, 0,
0, .51, 0, .71,
0, .56, 0, .43,
0, .68, 0, .13,
0, .60, 0, .50,
.47, .83, 0, 0,
.27, .60, 0, 0,
.28, .78, 0, 0,
.75, .55, 0, 0),
ncol = 4, byrow = TRUE)
IECV_Expected <- c(0.9853458, 0.8287671, 0.8590502, 0.7411945, 0.3403559,
0.6290873, 0.9647402, 0.5901639, 0.7571994, 0.8316008,
0.8858474, 0.3497110)
## First with no names
expect_equal(IECV(Lambda), IECV_Expected, tolerance = .000001)
rownames(Lambda) = c("A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L")
names(IECV_Expected) = c("A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L")
## Now with names
expect_equal(IECV(Lambda), IECV_Expected, tolerance = .000001)
## Now let's do one with no general factor
Lambda2 <- matrix(c(.3, 0, 0,
.4, 0, 0,
.5, 0, 0,
0, .4, 0,
0, .6, 0,
0, .7, 0,
0, 0, .4,
0, 0, .5,
0, 0, .3),
ncol = 3, byrow = TRUE)
expect_equal(IECV(Lambda2), NULL)
})
test_that("Omega_S Works", {
Lambda <- matrix(c( 0, .82, .10, 0,
0, .77, .35, 0,
0, .79, .32, 0,
0, .66, .39, 0,
0, .51, 0, .71,
0, .56, 0, .43,
0, .68, 0, .13,
0, .60, 0, .50,
.47, .83, 0, 0,
.27, .60, 0, 0,
.28, .78, 0, 0,
.75, .55, 0, 0),
ncol = 4, byrow = TRUE)
Theta <- rep(1, nrow(Lambda)) - rowSums(Lambda^2)
Omega_S_Expected <- c(.9067561, .9482359, .8915387, .8400528)
## First with no names
expect_equal(Omega_S(Lambda, Theta), Omega_S_Expected, tolerance = .000001)
colnames(Lambda) = c("A", "B", "C", "D")
names(Omega_S_Expected) = c("A", "B", "C", "D")
## Now with names
expect_equal(Omega_S(Lambda, Theta), Omega_S_Expected, tolerance = .000001)
## Now let's do one with no general factor
Lambda2 <- matrix(c(.3, 0, 0,
.4, 0, 0,
.5, 0, 0,
0, .4, 0,
0, .6, 0,
0, .7, 0,
0, 0, .4,
0, 0, .5,
0, 0, .3),
ncol = 3, byrow = TRUE)
Theta2 <- rep(1, nrow(Lambda2)) - rowSums(Lambda2^2)
Omega_S_Expected2 <- c(.3654822, .5922131, .3654822)
expect_equal(Omega_S(Lambda2, Theta2), Omega_S_Expected2, tolerance = .000001)
})
test_that("Omega_H Works", {
Lambda <- matrix(c( 0, .82, .10, 0,
0, .77, .35, 0,
0, .79, .32, 0,
0, .66, .39, 0,
0, .51, 0, .71,
0, .56, 0, .43,
0, .68, 0, .13,
0, .60, 0, .50,
.47, .83, 0, 0,
.27, .60, 0, 0,
.28, .78, 0, 0,
.75, .55, 0, 0),
ncol = 4, byrow = TRUE)
Theta <- rep(1, nrow(Lambda)) - rowSums(Lambda^2)
Omega_H_Expected <- c(.2642460, .8507481, .1133118, .3040647)
## First with no names
expect_equal(Omega_H(Lambda, Theta), Omega_H_Expected, tolerance = .000001)
## Now with names
colnames(Lambda) = c("A", "B", "C", "D")
names(Omega_H_Expected) = c("A", "B", "C", "D")
expect_equal(Omega_H(Lambda, Theta), Omega_H_Expected, tolerance = .000001)
## Now let's do one with no general factor
Lambda2 <- matrix(c(.3, 0, 0,
.4, 0, 0,
.5, 0, 0,
0, .4, 0,
0, .6, 0,
0, .7, 0,
0, 0, .4,
0, 0, .5,
0, 0, .3),
ncol = 3, byrow = TRUE)
Theta2 <- rep(1, nrow(Lambda2)) - rowSums(Lambda2^2)
Omega_H_Expected2 <- c(.3654822, .5922131, .3654822)
expect_equal(Omega_H(Lambda2, Theta2), Omega_H_Expected2, tolerance = .000001)
})
test_that("PUC Works", {
Lambda <- matrix(c( 0, .82, .10, 0,
0, .77, .35, 0,
0, .79, .32, 0,
0, .66, .39, 0,
0, .51, 0, .71,
0, .56, 0, .43,
0, .68, 0, .13,
0, .60, 0, .50,
.47, .83, 0, 0,
.27, .60, 0, 0,
.28, .78, 0, 0,
.75, .55, 0, 0),
ncol = 4, byrow = TRUE)
expect_equal(PUC(Lambda), .7272727272727, tolerance = .000001)
## Now let's do one with no general factor
Lambda2 <- matrix(c(.3, 0, 0,
.4, 0, 0,
.5, 0, 0,
0, .4, 0,
0, .6, 0,
0, .7, 0,
0, 0, .4,
0, 0, .5,
0, 0, .3),
ncol = 3, byrow = TRUE)
expect_equal(PUC(Lambda2), NULL)
})
test_that("ARPB Works", {
Lambda <- matrix(c( 0, .82, .10, 0,
0, .77, .35, 0,
0, .79, .32, 0,
0, .66, .39, 0,
0, .51, 0, .71,
0, .56, 0, .43,
0, .68, 0, .13,
0, .60, 0, .50,
.47, .83, 0, 0,
.27, .60, 0, 0,
.28, .78, 0, 0,
.75, .55, 0, 0),
ncol = 4, byrow = TRUE)
UniLambda <- c(.78, .84, .82, .77, .69, .62, .69, .66, .82, .56, .74, .65)
## First with no names
ARPB_Expected <- list(ARPB = c(.102578),
AbsRelBias = c(0.04878049, 0.09090909, 0.03797468, 0.16666667,
0.35294118, 0.10714286, 0.01470588, 0.10000000,
0.01204819, 0.06666667, 0.05128205, 0.18181818))
expect_equal(ARPB(Lambda, UniLambda), ARPB_Expected, tolerance = .000001)
# Now, with names
rownames(Lambda) = c("A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L")
names(ARPB_Expected$AbsRelBias) = c("A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L")
expect_equal(ARPB(Lambda, UniLambda), ARPB_Expected, tolerance = .000001)
## Now let's do one with no general factor
Lambda2 <- matrix(c(.3, 0, 0,
.4, 0, 0,
.5, 0, 0,
0, .4, 0,
0, .6, 0,
0, .7, 0,
0, 0, .4,
0, 0, .5,
0, 0, .3),
ncol = 3, byrow = TRUE)
UniLambda2 <- c(.78, .84, .82, .77, .69, .62, .69, .66, .82)
expect_equal(ARPB(Lambda2, UniLambda2), NULL)
})
test_that("FD Works", {
Lambda <- matrix(c( 0, .82, .10, 0,
0, .77, .35, 0,
0, .79, .32, 0,
0, .66, .39, 0,
0, .51, 0, .71,
0, .56, 0, .43,
0, .68, 0, .13,
0, .60, 0, .50,
.47, .83, 0, 0,
.27, .60, 0, 0,
.28, .78, 0, 0,
.75, .55, 0, 0),
ncol = 4, byrow = TRUE)
Phi <- diag(nrow = 4)
FD_Expected <- c(0.8764811, 0.9500041, 0.6280990, 0.8438307)
## First with no names
expect_equal(FD(Lambda, Phi), FD_Expected, tolerance = .000001)
colnames(Lambda) = c("A", "B", "C", "D")
names(FD_Expected) = c("A", "B", "C", "D")
## Now with names
expect_equal(FD(Lambda, Phi), FD_Expected, tolerance = .000001)
## Now let's do one with correlated traits
Lambda2 <- matrix(c(.3, 0, 0,
.4, 0, 0,
.5, 0, 0,
0, .4, 0,
0, .6, 0,
0, .7, 0,
0, 0, .4,
0, 0, .5,
0, 0, .3),
ncol = 3, byrow = TRUE)
Phi2 <- matrix(c(1, .3, .4, .3, 1, .5, .4, .5, 1), nrow = 3)
FD_Expected_2 <- c(0.6489614, 0.8052508, 0.6808390)
expect_equal(FD(Lambda2, Phi2), FD_Expected_2, tolerance = .000001)
})
test_that("H Works", {
Lambda <- matrix(c( 0, .82, .10, 0,
0, .77, .35, 0,
0, .79, .32, 0,
0, .66, .39, 0,
0, .51, 0, .71,
0, .56, 0, .43,
0, .68, 0, .13,
0, .60, 0, .50,
.47, .83, 0, 0,
.27, .60, 0, 0,
.28, .78, 0, 0,
.75, .55, 0, 0),
ncol = 4, byrow = TRUE)
H_Expected <- c(0.6340948, 0.9282446, 0.3070802, 0.6144805)
## First with no names
expect_equal(H(Lambda), H_Expected, tolerance = .000001)
colnames(Lambda) = c("A", "B", "C", "D")
names(H_Expected) = c("A", "B", "C", "D")
## Now with names
expect_equal(H(Lambda), H_Expected, tolerance = .000001)
## Now let's do one with no general factor
Lambda2 <- matrix(c(.3, 0, 0,
.4, 0, 0,
.5, 0, 0,
0, .4, 0,
0, .6, 0,
0, .7, 0,
0, 0, .4,
0, 0, .5,
0, 0, .3),
ncol = 3, byrow = TRUE)
H_Expected_2 <- c(0.3837472, 0.6315076, 0.3837472)
expect_equal(H(Lambda2), H_Expected_2, tolerance = .000001)
})
test_that("getGen Works", {
Lambda <- matrix(c( 0, .82, .10, 0,
0, .77, .35, 0,
0, .79, .32, 0,
0, .66, .39, 0,
0, .51, 0, .71,
0, .56, 0, .43,
0, .68, 0, .13,
0, .60, 0, .50,
.47, .83, 0, 0,
.27, .60, 0, 0,
.28, .78, 0, 0,
.75, .55, 0, 0),
ncol = 4, byrow = TRUE)
expect_equal(getGen(Lambda), 2)
## Now let's do one with no general factor
Lambda2 <- matrix(c(.3, 0, 0,
.4, 0, 0,
.5, 0, 0,
0, .4, 0,
0, .6, 0,
0, .7, 0,
0, 0, .4,
0, 0, .5,
0, 0, .3),
ncol = 3, byrow = TRUE)
expect_equal(getGen(Lambda2), NULL)
})
test_that("isBifactor Works", {
Lambda <- matrix(c( 0, .82, .10, 0,
0, .77, .35, 0,
0, .79, .32, 0,
0, .66, .39, 0,
0, .51, 0, .71,
0, .56, 0, .43,
0, .68, 0, .13,
0, .60, 0, .50,
.47, .83, 0, 0,
.27, .60, 0, 0,
.28, .78, 0, 0,
.75, .55, 0, 0),
ncol = 4, byrow = TRUE)
expect_equal(isBifactor(Lambda), TRUE)
## Now let's do one with no general factor
Lambda2 <- matrix(c(.3, 0, 0,
.4, 0, 0,
.5, 0, 0,
0, .4, 0,
0, .6, 0,
0, .7, 0,
0, 0, .4,
0, 0, .5,
0, 0, .3),
ncol = 3, byrow = TRUE)
expect_equal(isBifactor(Lambda2), FALSE)
})
test_that("bifactorIndices Works", {
##bifactor from matrices
Lambda <- matrix(c( 0, .82, .10, 0,
0, .77, .35, 0,
0, .79, .32, 0,
0, .66, .39, 0,
0, .51, 0, .71,
0, .56, 0, .43,
0, .68, 0, .13,
0, .60, 0, .50,
.47, .83, 0, 0,
.27, .60, 0, 0,
.28, .78, 0, 0,
.75, .55, 0, 0),
ncol = 4, byrow = TRUE)
UniLambda <- c(.78, .84, .82, .77, .69, .62, .69, .66, .82, .56, .74, .65)
expect_equal(bifactorIndices(Lambda, UniLambda = UniLambda), readRDS("bindices_from_matrix.rds"), tolerance = .000001)
expect_error(bifactorIndices(Lambda, UniLambda = UniLambda, standardized = FALSE), "Not enough information is provided to compute indicator residual variances. Either provide indicator residual variances or use a standardized solution.")
# correlated traits example
Lambda2 <- matrix(c(.3, 0, 0,
.4, 0, 0,
.5, 0, 0,
0, .4, 0,
0, .6, 0,
0, .7, 0,
0, 0, .4,
0, 0, .5,
0, 0, .3),
ncol = 3, byrow = TRUE)
Phi2 <- matrix(c(1, .5, .4, .5, 1, .6, .4, .6, 1), nrow = 3)
expect_equal(bifactorIndices(Lambda2, Phi = Phi2), readRDS("Lambda2_indices.rds"), tolerance = .000001)
## bifactor from lavaan
SRS_UnidimensionalModel <-
"SRS =~ SRS_1 + SRS_2 + SRS_3 + SRS_4 + SRS_5 +
SRS_6 + SRS_7 + SRS_8 + SRS_9 + SRS_10 +
SRS_11 + SRS_12 + SRS_13 + SRS_14 + SRS_15 +
SRS_16 + SRS_17 + SRS_18 + SRS_19 + SRS_20"
SRS_BifactorModel <-
"SRS =~ SRS_1 + SRS_2 + SRS_3 + SRS_4 + SRS_5 +
SRS_6 + SRS_7 + SRS_8 + SRS_9 + SRS_10 +
SRS_11 + SRS_12 + SRS_13 + SRS_14 + SRS_15 +
SRS_16 + SRS_17 + SRS_18 + SRS_19 + SRS_20
Function =~ SRS_5 + SRS_9 + SRS_12 + SRS_15 + SRS_18
Pain =~ SRS_1 + SRS_2 + SRS_8 + SRS_11 + SRS_17
SelfImage =~ SRS_4 + SRS_6 + SRS_10 + SRS_14 + SRS_19
MentalHealth =~ SRS_3 + SRS_7 + SRS_13 + SRS_16 + SRS_20"
SRS_Unidimensional <- lavaan::cfa(SRS_UnidimensionalModel,
SRS_data,
ordered = paste0("SRS_", 1:20),
orthogonal = TRUE)
SRS_bifactor <- lavaan::cfa(SRS_BifactorModel,
SRS_data,
ordered = paste0("SRS_", 1:20),
orthogonal = TRUE)
expect_equal(bifactorIndices(SRS_bifactor, UniLambda = SRS_Unidimensional), readRDS("lav_indices.rds"), tolerance = .000001)
## Two tier from lavaan
MTMM_model <- "
Trait1 =~ T1M1_1+T1M1_2+T1M1_3+T1M2_1+T1M2_2+T1M2_3+T1M3_1+T1M3_2+T1M1_3
Trait2 =~ T2M1_1+T2M1_2+T2M1_3+T2M2_1+T2M2_2+T2M2_3+T2M3_1+T2M3_2+T2M1_3
Trait3 =~ T3M1_1+T3M1_2+T3M1_3+T3M2_1+T3M2_2+T3M2_3+T3M3_1+T3M3_2+T3M1_3
Method1 =~ T1M1_1+T1M1_2+T1M1_3+T2M1_1+T2M1_2+T2M1_3+T3M1_1+T3M1_2+T3M1_3
Method2 =~ T1M2_1+T1M2_2+T1M2_3+T2M2_1+T2M2_2+T2M2_3+T3M2_1+T3M2_2+T3M2_3
Method3 =~ T1M3_1+T1M3_2+T1M3_3+T2M3_1+T2M3_2+T2M3_3+T3M3_1+T3M3_2+T3M3_3
Trait1 ~~ 0*Method1
Trait1 ~~ 0*Method2
Trait1 ~~ 0*Method3
Trait2 ~~ 0*Method1
Trait2 ~~ 0*Method2
Trait2 ~~ 0*Method3
Trait3 ~~ 0*Method1
Trait3 ~~ 0*Method2
Trait3 ~~ 0*Method3
Method1 ~~ 0*Method2
Method1 ~~ 0*Method3
Method2 ~~ 0*Method3"
MTMM_fit <- lavaan::cfa(MTMM_model, MTMM_data)
expect_equal(bifactorIndices(MTMM_fit), readRDS("mtmm.rds"), tolerance = .000001)
## bifactor from mirt -- these lines commented out because they take too long for the R CMD check on CRAN
#bi_data <- read.csv("bifactorData.csv")
#colnames(bi_data) <- c(paste0("x", 1:24))
#specific <- c(1, 1, 2, 1, 2, 2, 2, 1, 3, 3, 1, 1, 2, 3, 2, 2, 1, 2, 3, 3, 3, 1, 3, 3)
#bi_fit_mirt <- mirt::bfactor(bi_data, specific)
#expect_equal(bifactorIndices(bi_fit_mirt), readRDS("mirt_indices.rds"), tolerance = .000001)
})
test_that("bifactorIndices_expl Works", {
library(psych)
SRS_BEFA <- fa(SRS_data, nfactors = 5, rotate = "bifactor")
ItemsBySF = list(MR4 = paste0("SRS_", c(5, 9, 12, 15, 18)),
MR2 = paste0("SRS_", c(1, 2, 8, 11, 17)),
MR3 = paste0("SRS_", c(4, 6, 10, 14, 19)),
MR5 = paste0("SRS_", c(3, 7, 13, 16, 20)))
expect_equal(bifactorIndices_expl(SRS_BEFA), readRDS("exploratory_bindices_SRS.rds"), tolerance = .000001)
expect_equal(bifactorIndices_expl(SRS_BEFA, ItemsBySF), readRDS("exploratory_bindices_SRS_fixed.rds"), tolerance = .000001)
})
test_that("bifactorIndicesMplus Works", {
cont_output <- MplusAutomation::readModels("continuous.out")
cat_output <- MplusAutomation::readModels("categorical.out")
cont_output_facvar <- MplusAutomation::readModels("bifactor_continuous_wrongfacvar.out")
expect_error(bifactorIndicesMplus(cont_output), "You must request standardized output from Mplus when standardized = TRUE")
expect_equal(bifactorIndicesMplus(cont_output, standardized = FALSE), readRDS("cont_unst.rds"), tolerance = .000001)
expect_equal(bifactorIndicesMplus(cat_output), readRDS("cat_stdyx.rds"), tolerance = .000001)
expect_error(bifactorIndicesMplus(cat_output, standardized = FALSE), "Bifactor indices require latent factors have variance = 1. Respecify your model or use standardized = TRUE")
expect_error(bifactorIndicesMplus(cont_output_facvar, standardized = FALSE), "Bifactor indices require latent factors have variance = 1. Respecify your model or use standardized = TRUE")
})
test_that("bifactorIndicesMplus_expl Works", {
efa_output <- MplusAutomation::readModels("bifactor_efa.out")
expect_error(bifactorIndicesMplus_expl(efa_output), "MplusAutomation does not support EFA output yet, but should soon!")
})
test_that("bifactorIndicesMplus_ESEM Works", {
std_output <- MplusAutomation::readModels("bifactor_esem.out")
nostd_output <- MplusAutomation::readModels("bifactor_esem_nostd.out")
expect_equal(bifactorIndicesMplus_ESEM(std_output), readRDS("ESEM.rds"), tolerance = .000001)
expect_error(bifactorIndicesMplus_ESEM(nostd_output), "You must request standardized output from Mplus when standardized = TRUE")
})
| /tests/testthat/test-BifactorIndicesCalculator.R | no_license | iago-ContributedForks/BifactorIndicesCalculator | R | false | false | 23,145 | r | test_that("ECV_SS Works", {
Lambda <- matrix(c( 0, .82, .10, 0,
0, .77, .35, 0,
0, .79, .32, 0,
0, .66, .39, 0,
0, .51, 0, .71,
0, .56, 0, .43,
0, .68, 0, .13,
0, .60, 0, .50,
.47, .83, 0, 0,
.27, .60, 0, 0,
.28, .78, 0, 0,
.75, .55, 0, 0),
ncol = 4, byrow = TRUE)
ECV_SS_Expected <- c(.3229228, .7138154, .1426991, .4064201)
## First with no names
expect_equal(ECV_SS(Lambda), ECV_SS_Expected, tolerance = .000001)
## Now with names
colnames(Lambda) = c("A", "B", "C", "D")
names(ECV_SS_Expected) = c("A", "B", "C", "D")
expect_equal(ECV_SS(Lambda), ECV_SS_Expected, tolerance = .000001)
## Now let's do one with no general factor
Lambda2 <- matrix(c(.3, 0, 0,
.4, 0, 0,
.5, 0, 0,
0, .4, 0,
0, .6, 0,
0, .7, 0,
0, 0, .4,
0, 0, .5,
0, 0, .3),
ncol = 3, byrow = TRUE)
ECV_SS_Expected2 <- c(1, 1, 1)
expect_equal(ECV_SS(Lambda2), ECV_SS_Expected2, tolerance = .000001)
})
test_that("ECV_SG Works", {
Lambda <- matrix(c( 0, .82, .10, 0,
0, .77, .35, 0,
0, .79, .32, 0,
0, .66, .39, 0,
0, .51, 0, .71,
0, .56, 0, .43,
0, .68, 0, .13,
0, .60, 0, .50,
.47, .83, 0, 0,
.27, .60, 0, 0,
.28, .78, 0, 0,
.75, .55, 0, 0),
ncol = 4, byrow = TRUE)
ECV_SG_Expected <- c(.11744676, .71381542, .04862725, .12011057)
## First with no names
expect_equal(ECV_SG(Lambda), ECV_SG_Expected, tolerance = .000001)
colnames(Lambda) = c("A", "B", "C", "D")
names(ECV_SG_Expected) = c("A", "B", "C", "D")
## Now with names
expect_equal(ECV_SG(Lambda), ECV_SG_Expected, tolerance = .000001)
## Now let's do one with no general factor
Lambda2 <- matrix(c(.3, 0, 0,
.4, 0, 0,
.5, 0, 0,
0, .4, 0,
0, .6, 0,
0, .7, 0,
0, 0, .4,
0, 0, .5,
0, 0, .3),
ncol = 3, byrow = TRUE)
expect_equal(ECV_SG(Lambda2), NULL)
})
test_that("ECV_GS Works", {
Lambda <- matrix(c( 0, .82, .10, 0,
0, .77, .35, 0,
0, .79, .32, 0,
0, .66, .39, 0,
0, .51, 0, .71,
0, .56, 0, .43,
0, .68, 0, .13,
0, .60, 0, .50,
.47, .83, 0, 0,
.27, .60, 0, 0,
.28, .78, 0, 0,
.75, .55, 0, 0),
ncol = 4, byrow = TRUE)
ECV_GS_Expected <- c(.6770772, .71381542, .8573009, .5935799)
## First with no names
expect_equal(ECV_GS(Lambda), ECV_GS_Expected, tolerance = .000001)
colnames(Lambda) = c("A", "B", "C", "D")
names(ECV_GS_Expected) = c("A", "B", "C", "D")
## Now with names
expect_equal(ECV_GS(Lambda), ECV_GS_Expected, tolerance = .000001)
## Now let's do one with no general factor
Lambda2 <- matrix(c(.3, 0, 0,
.4, 0, 0,
.5, 0, 0,
0, .4, 0,
0, .6, 0,
0, .7, 0,
0, 0, .4,
0, 0, .5,
0, 0, .3),
ncol = 3, byrow = TRUE)
expect_equal(ECV_GS(Lambda2), NULL)
})
test_that("IECV Works", {
Lambda <- matrix(c( 0, .82, .10, 0,
0, .77, .35, 0,
0, .79, .32, 0,
0, .66, .39, 0,
0, .51, 0, .71,
0, .56, 0, .43,
0, .68, 0, .13,
0, .60, 0, .50,
.47, .83, 0, 0,
.27, .60, 0, 0,
.28, .78, 0, 0,
.75, .55, 0, 0),
ncol = 4, byrow = TRUE)
IECV_Expected <- c(0.9853458, 0.8287671, 0.8590502, 0.7411945, 0.3403559,
0.6290873, 0.9647402, 0.5901639, 0.7571994, 0.8316008,
0.8858474, 0.3497110)
## First with no names
expect_equal(IECV(Lambda), IECV_Expected, tolerance = .000001)
rownames(Lambda) = c("A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L")
names(IECV_Expected) = c("A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L")
## Now with names
expect_equal(IECV(Lambda), IECV_Expected, tolerance = .000001)
## Now let's do one with no general factor
Lambda2 <- matrix(c(.3, 0, 0,
.4, 0, 0,
.5, 0, 0,
0, .4, 0,
0, .6, 0,
0, .7, 0,
0, 0, .4,
0, 0, .5,
0, 0, .3),
ncol = 3, byrow = TRUE)
expect_equal(IECV(Lambda2), NULL)
})
test_that("Omega_S Works", {
Lambda <- matrix(c( 0, .82, .10, 0,
0, .77, .35, 0,
0, .79, .32, 0,
0, .66, .39, 0,
0, .51, 0, .71,
0, .56, 0, .43,
0, .68, 0, .13,
0, .60, 0, .50,
.47, .83, 0, 0,
.27, .60, 0, 0,
.28, .78, 0, 0,
.75, .55, 0, 0),
ncol = 4, byrow = TRUE)
Theta <- rep(1, nrow(Lambda)) - rowSums(Lambda^2)
Omega_S_Expected <- c(.9067561, .9482359, .8915387, .8400528)
## First with no names
expect_equal(Omega_S(Lambda, Theta), Omega_S_Expected, tolerance = .000001)
colnames(Lambda) = c("A", "B", "C", "D")
names(Omega_S_Expected) = c("A", "B", "C", "D")
## Now with names
expect_equal(Omega_S(Lambda, Theta), Omega_S_Expected, tolerance = .000001)
## Now let's do one with no general factor
Lambda2 <- matrix(c(.3, 0, 0,
.4, 0, 0,
.5, 0, 0,
0, .4, 0,
0, .6, 0,
0, .7, 0,
0, 0, .4,
0, 0, .5,
0, 0, .3),
ncol = 3, byrow = TRUE)
Theta2 <- rep(1, nrow(Lambda2)) - rowSums(Lambda2^2)
Omega_S_Expected2 <- c(.3654822, .5922131, .3654822)
expect_equal(Omega_S(Lambda2, Theta2), Omega_S_Expected2, tolerance = .000001)
})
test_that("Omega_H Works", {
Lambda <- matrix(c( 0, .82, .10, 0,
0, .77, .35, 0,
0, .79, .32, 0,
0, .66, .39, 0,
0, .51, 0, .71,
0, .56, 0, .43,
0, .68, 0, .13,
0, .60, 0, .50,
.47, .83, 0, 0,
.27, .60, 0, 0,
.28, .78, 0, 0,
.75, .55, 0, 0),
ncol = 4, byrow = TRUE)
Theta <- rep(1, nrow(Lambda)) - rowSums(Lambda^2)
Omega_H_Expected <- c(.2642460, .8507481, .1133118, .3040647)
## First with no names
expect_equal(Omega_H(Lambda, Theta), Omega_H_Expected, tolerance = .000001)
## Now with names
colnames(Lambda) = c("A", "B", "C", "D")
names(Omega_H_Expected) = c("A", "B", "C", "D")
expect_equal(Omega_H(Lambda, Theta), Omega_H_Expected, tolerance = .000001)
## Now let's do one with no general factor
Lambda2 <- matrix(c(.3, 0, 0,
.4, 0, 0,
.5, 0, 0,
0, .4, 0,
0, .6, 0,
0, .7, 0,
0, 0, .4,
0, 0, .5,
0, 0, .3),
ncol = 3, byrow = TRUE)
Theta2 <- rep(1, nrow(Lambda2)) - rowSums(Lambda2^2)
Omega_H_Expected2 <- c(.3654822, .5922131, .3654822)
expect_equal(Omega_H(Lambda2, Theta2), Omega_H_Expected2, tolerance = .000001)
})
test_that("PUC Works", {
Lambda <- matrix(c( 0, .82, .10, 0,
0, .77, .35, 0,
0, .79, .32, 0,
0, .66, .39, 0,
0, .51, 0, .71,
0, .56, 0, .43,
0, .68, 0, .13,
0, .60, 0, .50,
.47, .83, 0, 0,
.27, .60, 0, 0,
.28, .78, 0, 0,
.75, .55, 0, 0),
ncol = 4, byrow = TRUE)
expect_equal(PUC(Lambda), .7272727272727, tolerance = .000001)
## Now let's do one with no general factor
Lambda2 <- matrix(c(.3, 0, 0,
.4, 0, 0,
.5, 0, 0,
0, .4, 0,
0, .6, 0,
0, .7, 0,
0, 0, .4,
0, 0, .5,
0, 0, .3),
ncol = 3, byrow = TRUE)
expect_equal(PUC(Lambda2), NULL)
})
test_that("ARPB Works", {
Lambda <- matrix(c( 0, .82, .10, 0,
0, .77, .35, 0,
0, .79, .32, 0,
0, .66, .39, 0,
0, .51, 0, .71,
0, .56, 0, .43,
0, .68, 0, .13,
0, .60, 0, .50,
.47, .83, 0, 0,
.27, .60, 0, 0,
.28, .78, 0, 0,
.75, .55, 0, 0),
ncol = 4, byrow = TRUE)
UniLambda <- c(.78, .84, .82, .77, .69, .62, .69, .66, .82, .56, .74, .65)
## First with no names
ARPB_Expected <- list(ARPB = c(.102578),
AbsRelBias = c(0.04878049, 0.09090909, 0.03797468, 0.16666667,
0.35294118, 0.10714286, 0.01470588, 0.10000000,
0.01204819, 0.06666667, 0.05128205, 0.18181818))
expect_equal(ARPB(Lambda, UniLambda), ARPB_Expected, tolerance = .000001)
# Now, with names
rownames(Lambda) = c("A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L")
names(ARPB_Expected$AbsRelBias) = c("A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L")
expect_equal(ARPB(Lambda, UniLambda), ARPB_Expected, tolerance = .000001)
## Now let's do one with no general factor
Lambda2 <- matrix(c(.3, 0, 0,
.4, 0, 0,
.5, 0, 0,
0, .4, 0,
0, .6, 0,
0, .7, 0,
0, 0, .4,
0, 0, .5,
0, 0, .3),
ncol = 3, byrow = TRUE)
UniLambda2 <- c(.78, .84, .82, .77, .69, .62, .69, .66, .82)
expect_equal(ARPB(Lambda2, UniLambda2), NULL)
})
test_that("FD Works", {
Lambda <- matrix(c( 0, .82, .10, 0,
0, .77, .35, 0,
0, .79, .32, 0,
0, .66, .39, 0,
0, .51, 0, .71,
0, .56, 0, .43,
0, .68, 0, .13,
0, .60, 0, .50,
.47, .83, 0, 0,
.27, .60, 0, 0,
.28, .78, 0, 0,
.75, .55, 0, 0),
ncol = 4, byrow = TRUE)
Phi <- diag(nrow = 4)
FD_Expected <- c(0.8764811, 0.9500041, 0.6280990, 0.8438307)
## First with no names
expect_equal(FD(Lambda, Phi), FD_Expected, tolerance = .000001)
colnames(Lambda) = c("A", "B", "C", "D")
names(FD_Expected) = c("A", "B", "C", "D")
## Now with names
expect_equal(FD(Lambda, Phi), FD_Expected, tolerance = .000001)
## Now let's do one with correlated traits
Lambda2 <- matrix(c(.3, 0, 0,
.4, 0, 0,
.5, 0, 0,
0, .4, 0,
0, .6, 0,
0, .7, 0,
0, 0, .4,
0, 0, .5,
0, 0, .3),
ncol = 3, byrow = TRUE)
Phi2 <- matrix(c(1, .3, .4, .3, 1, .5, .4, .5, 1), nrow = 3)
FD_Expected_2 <- c(0.6489614, 0.8052508, 0.6808390)
expect_equal(FD(Lambda2, Phi2), FD_Expected_2, tolerance = .000001)
})
test_that("H Works", {
Lambda <- matrix(c( 0, .82, .10, 0,
0, .77, .35, 0,
0, .79, .32, 0,
0, .66, .39, 0,
0, .51, 0, .71,
0, .56, 0, .43,
0, .68, 0, .13,
0, .60, 0, .50,
.47, .83, 0, 0,
.27, .60, 0, 0,
.28, .78, 0, 0,
.75, .55, 0, 0),
ncol = 4, byrow = TRUE)
H_Expected <- c(0.6340948, 0.9282446, 0.3070802, 0.6144805)
## First with no names
expect_equal(H(Lambda), H_Expected, tolerance = .000001)
colnames(Lambda) = c("A", "B", "C", "D")
names(H_Expected) = c("A", "B", "C", "D")
## Now with names
expect_equal(H(Lambda), H_Expected, tolerance = .000001)
## Now let's do one with no general factor
Lambda2 <- matrix(c(.3, 0, 0,
.4, 0, 0,
.5, 0, 0,
0, .4, 0,
0, .6, 0,
0, .7, 0,
0, 0, .4,
0, 0, .5,
0, 0, .3),
ncol = 3, byrow = TRUE)
H_Expected_2 <- c(0.3837472, 0.6315076, 0.3837472)
expect_equal(H(Lambda2), H_Expected_2, tolerance = .000001)
})
test_that("getGen Works", {
Lambda <- matrix(c( 0, .82, .10, 0,
0, .77, .35, 0,
0, .79, .32, 0,
0, .66, .39, 0,
0, .51, 0, .71,
0, .56, 0, .43,
0, .68, 0, .13,
0, .60, 0, .50,
.47, .83, 0, 0,
.27, .60, 0, 0,
.28, .78, 0, 0,
.75, .55, 0, 0),
ncol = 4, byrow = TRUE)
expect_equal(getGen(Lambda), 2)
## Now let's do one with no general factor
Lambda2 <- matrix(c(.3, 0, 0,
.4, 0, 0,
.5, 0, 0,
0, .4, 0,
0, .6, 0,
0, .7, 0,
0, 0, .4,
0, 0, .5,
0, 0, .3),
ncol = 3, byrow = TRUE)
expect_equal(getGen(Lambda2), NULL)
})
test_that("isBifactor Works", {
Lambda <- matrix(c( 0, .82, .10, 0,
0, .77, .35, 0,
0, .79, .32, 0,
0, .66, .39, 0,
0, .51, 0, .71,
0, .56, 0, .43,
0, .68, 0, .13,
0, .60, 0, .50,
.47, .83, 0, 0,
.27, .60, 0, 0,
.28, .78, 0, 0,
.75, .55, 0, 0),
ncol = 4, byrow = TRUE)
expect_equal(isBifactor(Lambda), TRUE)
## Now let's do one with no general factor
Lambda2 <- matrix(c(.3, 0, 0,
.4, 0, 0,
.5, 0, 0,
0, .4, 0,
0, .6, 0,
0, .7, 0,
0, 0, .4,
0, 0, .5,
0, 0, .3),
ncol = 3, byrow = TRUE)
expect_equal(isBifactor(Lambda2), FALSE)
})
test_that("bifactorIndices Works", {
##bifactor from matrices
Lambda <- matrix(c( 0, .82, .10, 0,
0, .77, .35, 0,
0, .79, .32, 0,
0, .66, .39, 0,
0, .51, 0, .71,
0, .56, 0, .43,
0, .68, 0, .13,
0, .60, 0, .50,
.47, .83, 0, 0,
.27, .60, 0, 0,
.28, .78, 0, 0,
.75, .55, 0, 0),
ncol = 4, byrow = TRUE)
UniLambda <- c(.78, .84, .82, .77, .69, .62, .69, .66, .82, .56, .74, .65)
expect_equal(bifactorIndices(Lambda, UniLambda = UniLambda), readRDS("bindices_from_matrix.rds"), tolerance = .000001)
expect_error(bifactorIndices(Lambda, UniLambda = UniLambda, standardized = FALSE), "Not enough information is provided to compute indicator residual variances. Either provide indicator residual variances or use a standardized solution.")
# correlated traits example
Lambda2 <- matrix(c(.3, 0, 0,
.4, 0, 0,
.5, 0, 0,
0, .4, 0,
0, .6, 0,
0, .7, 0,
0, 0, .4,
0, 0, .5,
0, 0, .3),
ncol = 3, byrow = TRUE)
Phi2 <- matrix(c(1, .5, .4, .5, 1, .6, .4, .6, 1), nrow = 3)
expect_equal(bifactorIndices(Lambda2, Phi = Phi2), readRDS("Lambda2_indices.rds"), tolerance = .000001)
## bifactor from lavaan
SRS_UnidimensionalModel <-
"SRS =~ SRS_1 + SRS_2 + SRS_3 + SRS_4 + SRS_5 +
SRS_6 + SRS_7 + SRS_8 + SRS_9 + SRS_10 +
SRS_11 + SRS_12 + SRS_13 + SRS_14 + SRS_15 +
SRS_16 + SRS_17 + SRS_18 + SRS_19 + SRS_20"
SRS_BifactorModel <-
"SRS =~ SRS_1 + SRS_2 + SRS_3 + SRS_4 + SRS_5 +
SRS_6 + SRS_7 + SRS_8 + SRS_9 + SRS_10 +
SRS_11 + SRS_12 + SRS_13 + SRS_14 + SRS_15 +
SRS_16 + SRS_17 + SRS_18 + SRS_19 + SRS_20
Function =~ SRS_5 + SRS_9 + SRS_12 + SRS_15 + SRS_18
Pain =~ SRS_1 + SRS_2 + SRS_8 + SRS_11 + SRS_17
SelfImage =~ SRS_4 + SRS_6 + SRS_10 + SRS_14 + SRS_19
MentalHealth =~ SRS_3 + SRS_7 + SRS_13 + SRS_16 + SRS_20"
SRS_Unidimensional <- lavaan::cfa(SRS_UnidimensionalModel,
SRS_data,
ordered = paste0("SRS_", 1:20),
orthogonal = TRUE)
SRS_bifactor <- lavaan::cfa(SRS_BifactorModel,
SRS_data,
ordered = paste0("SRS_", 1:20),
orthogonal = TRUE)
expect_equal(bifactorIndices(SRS_bifactor, UniLambda = SRS_Unidimensional), readRDS("lav_indices.rds"), tolerance = .000001)
## Two tier from lavaan
MTMM_model <- "
Trait1 =~ T1M1_1+T1M1_2+T1M1_3+T1M2_1+T1M2_2+T1M2_3+T1M3_1+T1M3_2+T1M1_3
Trait2 =~ T2M1_1+T2M1_2+T2M1_3+T2M2_1+T2M2_2+T2M2_3+T2M3_1+T2M3_2+T2M1_3
Trait3 =~ T3M1_1+T3M1_2+T3M1_3+T3M2_1+T3M2_2+T3M2_3+T3M3_1+T3M3_2+T3M1_3
Method1 =~ T1M1_1+T1M1_2+T1M1_3+T2M1_1+T2M1_2+T2M1_3+T3M1_1+T3M1_2+T3M1_3
Method2 =~ T1M2_1+T1M2_2+T1M2_3+T2M2_1+T2M2_2+T2M2_3+T3M2_1+T3M2_2+T3M2_3
Method3 =~ T1M3_1+T1M3_2+T1M3_3+T2M3_1+T2M3_2+T2M3_3+T3M3_1+T3M3_2+T3M3_3
Trait1 ~~ 0*Method1
Trait1 ~~ 0*Method2
Trait1 ~~ 0*Method3
Trait2 ~~ 0*Method1
Trait2 ~~ 0*Method2
Trait2 ~~ 0*Method3
Trait3 ~~ 0*Method1
Trait3 ~~ 0*Method2
Trait3 ~~ 0*Method3
Method1 ~~ 0*Method2
Method1 ~~ 0*Method3
Method2 ~~ 0*Method3"
MTMM_fit <- lavaan::cfa(MTMM_model, MTMM_data)
expect_equal(bifactorIndices(MTMM_fit), readRDS("mtmm.rds"), tolerance = .000001)
## bifactor from mirt -- these lines commented out because they take too long for the R CMD check on CRAN
#bi_data <- read.csv("bifactorData.csv")
#colnames(bi_data) <- c(paste0("x", 1:24))
#specific <- c(1, 1, 2, 1, 2, 2, 2, 1, 3, 3, 1, 1, 2, 3, 2, 2, 1, 2, 3, 3, 3, 1, 3, 3)
#bi_fit_mirt <- mirt::bfactor(bi_data, specific)
#expect_equal(bifactorIndices(bi_fit_mirt), readRDS("mirt_indices.rds"), tolerance = .000001)
})
test_that("bifactorIndices_expl Works", {
library(psych)
SRS_BEFA <- fa(SRS_data, nfactors = 5, rotate = "bifactor")
ItemsBySF = list(MR4 = paste0("SRS_", c(5, 9, 12, 15, 18)),
MR2 = paste0("SRS_", c(1, 2, 8, 11, 17)),
MR3 = paste0("SRS_", c(4, 6, 10, 14, 19)),
MR5 = paste0("SRS_", c(3, 7, 13, 16, 20)))
expect_equal(bifactorIndices_expl(SRS_BEFA), readRDS("exploratory_bindices_SRS.rds"), tolerance = .000001)
expect_equal(bifactorIndices_expl(SRS_BEFA, ItemsBySF), readRDS("exploratory_bindices_SRS_fixed.rds"), tolerance = .000001)
})
test_that("bifactorIndicesMplus Works", {
cont_output <- MplusAutomation::readModels("continuous.out")
cat_output <- MplusAutomation::readModels("categorical.out")
cont_output_facvar <- MplusAutomation::readModels("bifactor_continuous_wrongfacvar.out")
expect_error(bifactorIndicesMplus(cont_output), "You must request standardized output from Mplus when standardized = TRUE")
expect_equal(bifactorIndicesMplus(cont_output, standardized = FALSE), readRDS("cont_unst.rds"), tolerance = .000001)
expect_equal(bifactorIndicesMplus(cat_output), readRDS("cat_stdyx.rds"), tolerance = .000001)
expect_error(bifactorIndicesMplus(cat_output, standardized = FALSE), "Bifactor indices require latent factors have variance = 1. Respecify your model or use standardized = TRUE")
expect_error(bifactorIndicesMplus(cont_output_facvar, standardized = FALSE), "Bifactor indices require latent factors have variance = 1. Respecify your model or use standardized = TRUE")
})
test_that("bifactorIndicesMplus_expl Works", {
efa_output <- MplusAutomation::readModels("bifactor_efa.out")
expect_error(bifactorIndicesMplus_expl(efa_output), "MplusAutomation does not support EFA output yet, but should soon!")
})
test_that("bifactorIndicesMplus_ESEM Works", {
std_output <- MplusAutomation::readModels("bifactor_esem.out")
nostd_output <- MplusAutomation::readModels("bifactor_esem_nostd.out")
expect_equal(bifactorIndicesMplus_ESEM(std_output), readRDS("ESEM.rds"), tolerance = .000001)
expect_error(bifactorIndicesMplus_ESEM(nostd_output), "You must request standardized output from Mplus when standardized = TRUE")
})
|
# Read data
data <- read.table("household_power_consumption.txt",
nrows = 173000,
sep = ";",
header = TRUE)
s <- split(data, data$Date)
data <- rbind(s[[2]], s[[46]]) # These are the two dates we need
rm(s) # We no longer need s
# Convert Date and Time columns into date-time (POSIXlt) format:
data[, "Date"] <- as.Date(data$Date, format="%d/%m/%Y")
dates <- data[, 1]
times <- data[, 2]
datetimes <- paste(dates, times)
datetimes <- strptime(datetimes, "%Y-%m-%d %H:%M:%S")
# Plot 3
png("plot3.png")
plot(datetimes,
as.numeric(as.character(data$Sub_metering_1)), # Convert factor into numeric
ylab ="Energy sub metering",
type = "l",
xlab = "")
lines(datetimes,
as.numeric(as.character(data$Sub_metering_2)), # Factor into numeric
col = "red")
lines(datetimes,
as.numeric(as.character(data$Sub_metering_3)), # Factor into numeric
col = "blue")
legend("topright",
lty = "solid",
col = c("black", "red", "blue"),
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.off() | /plot3.R | no_license | iramosp/ExData_Plotting1 | R | false | false | 1,123 | r | # Read data
data <- read.table("household_power_consumption.txt",
nrows = 173000,
sep = ";",
header = TRUE)
s <- split(data, data$Date)
data <- rbind(s[[2]], s[[46]]) # These are the two dates we need
rm(s) # We no longer need s
# Convert Date and Time columns into date-time (POSIXlt) format:
data[, "Date"] <- as.Date(data$Date, format="%d/%m/%Y")
dates <- data[, 1]
times <- data[, 2]
datetimes <- paste(dates, times)
datetimes <- strptime(datetimes, "%Y-%m-%d %H:%M:%S")
# Plot 3
png("plot3.png")
plot(datetimes,
as.numeric(as.character(data$Sub_metering_1)), # Convert factor into numeric
ylab ="Energy sub metering",
type = "l",
xlab = "")
lines(datetimes,
as.numeric(as.character(data$Sub_metering_2)), # Factor into numeric
col = "red")
lines(datetimes,
as.numeric(as.character(data$Sub_metering_3)), # Factor into numeric
col = "blue")
legend("topright",
lty = "solid",
col = c("black", "red", "blue"),
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.off() |
#' FMCS searchable sigma values of taft fragments
#'
#' A dataset containing the fragment SMILES and various sigma values coming from datasets or
#' calculated from the dataset amalgamation. fmcs searchable component to the "taft" data frame.
#'
#' @format A Spatial Data File list of 639 s4 objects
#' @source
#'
#'
#'
"taftSDF"
| /R/taftSDF.R | no_license | michaela-koopmans/HTdescR | R | false | false | 330 | r | #' FMCS searchable sigma values of taft fragments
#'
#' A dataset containing the fragment SMILES and various sigma values coming from datasets or
#' calculated from the dataset amalgamation. fmcs searchable component to the "taft" data frame.
#'
#' @format A Spatial Data File list of 639 s4 objects
#' @source
#'
#'
#'
"taftSDF"
|
#' Summarize Wear and Nonwear Time Interval
#'
#' This function summarizes the classified wear (nonwear) time by interval basis
#' from the epoch-by-epoch classified wear (nonwear) status classified by
#' \code{\link{wearingMarking}}.
#'
#' @param datavct Data with classified wear (nonwear) status classified by
#' \code{\link{wearingMarking}}.
#' @param wearing The column name for classified wear and nonwear status. The
#' default is "wearing".
#' @param TS The column name for timestamp. The default is "TimeStamp".
#' @param markingString Option for summarizing wear (markingString="w") or
#' nonwear time interval (markingString="nw").
#' @param by A sequence of days for classified wear (nonwear) time intervals.
#' @param id Optional output for subject identification or file name.
#'
#' @return The summary data for wear and nonwear time intervals.
#'
#' @template ref2011
#'
#' @templateVar author all
#' @template auth
#'
#' @seealso \code{\link{wearingMarking}}, \code{\link{summaryData}}
#'
#' @examples
#'
#' data(dataSec)
#'
#' mydata1m = dataCollapser(dataSec, TS = "TimeStamp", col = "counts", by = 60)
#'
#' data1m = wearingMarking(dataset = mydata1m,
#' frame = 90,
#' perMinuteCts = 1,
#' TS = "TimeStamp",
#' cts = "counts",
#' streamFrame = NULL,
#' allowanceFrame= 2,
#' newcolname = "wearing")
#'
#' sumVct(data1m, id="sdata1m")
#' sumVct(data1m, id="sdata1m", markingString = "nw")
#' @export
sumVct <- function(datavct, wearing = "wearing", TS = getOption('pa.timeStamp'),
markingString = "w", by = "days", id = NULL) {
len <- as.numeric(table(datavct[,by]))
len <- c(0, cumsum(len))
zz <- split(datavct, datavct[,by])
allrst <- vector('list', length(zz))
for(i in seq_along(zz)) {
smalldatavct <- zz[[i]]
loc <- which(smalldatavct[,wearing] == markingString)
if(length(loc)) {
ix <- which(diff(loc) != 1)
pos <- sort(c(loc[1], loc[ix], loc[ix+1], loc[length(loc)]))
rst <- data.frame(matrix(pos, ncol=2, byrow=TRUE))
names(rst) <- c('start', 'end')
rst[,'duration'] <- rst[,'end'] - rst[,'start'] + 1
rst[,'startTimeStamp'] <- smalldatavct[rst[,'start'], TS]
rst[,'endTimeStamp'] <- smalldatavct[rst[,'end'], TS]
rst[,'days'] <- smalldatavct[1,by]
rst[,'start'] <- rst[,'start'] + len[i]
rst[,'end'] <- rst[,'end'] + len[i]
rst[,'weekday'] <- weekdays(rst[,'startTimeStamp'])
allrst[[i]] <- rst
}
}
collist <- c("startTimeStamp", "endTimeStamp", "days", "weekday", "start",
"end", "duration")
allrst <- do.call(rbind, allrst)[,collist]
if(!is.null(id)) {
allrst <- cbind(id = id, allrst)
}
allrst
}
| /R/sumVct.R | no_license | couthcommander/PhysicalActivity | R | false | false | 2,963 | r | #' Summarize Wear and Nonwear Time Interval
#'
#' This function summarizes the classified wear (nonwear) time by interval basis
#' from the epoch-by-epoch classified wear (nonwear) status classified by
#' \code{\link{wearingMarking}}.
#'
#' @param datavct Data with classified wear (nonwear) status classified by
#' \code{\link{wearingMarking}}.
#' @param wearing The column name for classified wear and nonwear status. The
#' default is "wearing".
#' @param TS The column name for timestamp. The default is "TimeStamp".
#' @param markingString Option for summarizing wear (markingString="w") or
#' nonwear time interval (markingString="nw").
#' @param by A sequence of days for classified wear (nonwear) time intervals.
#' @param id Optional output for subject identification or file name.
#'
#' @return The summary data for wear and nonwear time intervals.
#'
#' @template ref2011
#'
#' @templateVar author all
#' @template auth
#'
#' @seealso \code{\link{wearingMarking}}, \code{\link{summaryData}}
#'
#' @examples
#'
#' data(dataSec)
#'
#' mydata1m = dataCollapser(dataSec, TS = "TimeStamp", col = "counts", by = 60)
#'
#' data1m = wearingMarking(dataset = mydata1m,
#' frame = 90,
#' perMinuteCts = 1,
#' TS = "TimeStamp",
#' cts = "counts",
#' streamFrame = NULL,
#' allowanceFrame= 2,
#' newcolname = "wearing")
#'
#' sumVct(data1m, id="sdata1m")
#' sumVct(data1m, id="sdata1m", markingString = "nw")
#' @export
sumVct <- function(datavct, wearing = "wearing", TS = getOption('pa.timeStamp'),
markingString = "w", by = "days", id = NULL) {
len <- as.numeric(table(datavct[,by]))
len <- c(0, cumsum(len))
zz <- split(datavct, datavct[,by])
allrst <- vector('list', length(zz))
for(i in seq_along(zz)) {
smalldatavct <- zz[[i]]
loc <- which(smalldatavct[,wearing] == markingString)
if(length(loc)) {
ix <- which(diff(loc) != 1)
pos <- sort(c(loc[1], loc[ix], loc[ix+1], loc[length(loc)]))
rst <- data.frame(matrix(pos, ncol=2, byrow=TRUE))
names(rst) <- c('start', 'end')
rst[,'duration'] <- rst[,'end'] - rst[,'start'] + 1
rst[,'startTimeStamp'] <- smalldatavct[rst[,'start'], TS]
rst[,'endTimeStamp'] <- smalldatavct[rst[,'end'], TS]
rst[,'days'] <- smalldatavct[1,by]
rst[,'start'] <- rst[,'start'] + len[i]
rst[,'end'] <- rst[,'end'] + len[i]
rst[,'weekday'] <- weekdays(rst[,'startTimeStamp'])
allrst[[i]] <- rst
}
}
collist <- c("startTimeStamp", "endTimeStamp", "days", "weekday", "start",
"end", "duration")
allrst <- do.call(rbind, allrst)[,collist]
if(!is.null(id)) {
allrst <- cbind(id = id, allrst)
}
allrst
}
|
## Load libraries --------------------------
# General utility
library(phyloseq)
library(data.table)
library(tidyr)
library(biomformat)
# Plotting and analysis
library(vegan)
library(pairwiseAdonis)
# source helper functions for plotting and subsetting
source("src/helper_functions.R")
# set color scheme for sample types
sample_type_cols <- c(CCA = "#6469ed",
Coral = "#e49c4c",
Limu = "#7cc854" )
# Load Cleaned Metabolite Data----------------------------------------------------------------
chem_phy <- readRDS("data/processed/chem_phy.rds")
# 13. Statistics on Metabolite Subclass by Sample Type -----------------------------------------------
# For each metabolite subclass, run anova and look for fold changes in abundance between sample types
# This information can be used to confirm that subclasss actually associate with a given sample type.
# Results inform subclass ordinations, cytoscape outputs, etc. with simple linear statistics.
# pull out metabolite peak data
peak_data <- as.data.frame(
as(tax_table(chem_phy),
"matrix") )
# sum relative abundance of features in each subclass
# phyloseq treats taxonomy as hierarchical, so we need to drop most columns
# we only need 'Qemistree_sbuclass', which is the subclass
# we can also keep higher level classifications 'class' and 'superclass'
tax_mat <- as(tax_table(chem_phy),
"matrix")
tax_mat <- tax_mat[ , c("Qemistree_superclass",
"Qemistree_class","Qemistree_subclass")]
sub_tax_table <- tax_table(tax_mat)
sub_phy <-phyloseq(sample_data(chem_phy),
otu_table(chem_phy),
sub_tax_table)
# sum relative areas within a subclass
sub_merge <- tax_glom(sub_phy,
"Qemistree_subclass")
# the column 'Qemistree_subclass' identifies the subclass
unique_subclasses <- unique(as(tax_table(sub_merge),
"matrix")[,"Qemistree_subclass"])
# for each subclass, subset the data and run anova
subclass_data <- list()
for(a_subclass in unique_subclasses) {
# subset compounds by subclass
subclass_phy <- subset_taxa(sub_merge,
Qemistree_subclass == a_subclass)
# create a map identifying samples by sample types
sample_map <- setNames(sample_data(sub_phy)[["sample_type"]],
sample_names(sub_phy))
sample_types <-c("Limu",
"CCA",
"Coral")
# get mean relative abundance for each sample type present in the subclass
mean_RAs <- list()
for(a_sample_type in sample_types){
mean_RAs[[a_sample_type]] <- mean(
otu_table(
subset_samples(subclass_phy,
sample_type == a_sample_type)))
}
# prior to modeling, normalize data by transforming relative abundances to arcsin(sqrt(x))
arcsin_phy <- transform_sample_counts(subclass_phy,
fun = function(x)
asin(sqrt(x)) )
# pull out transformed counts of sample_types, clean up
sample_abunds <- data.frame(t( as(otu_table(arcsin_phy),
"matrix")))
sample_abunds[is.na(sample_abunds)] <- 0
# make table with columns for sample type and abundance
sample_table <- data.frame(sample_type = sample_map[row.names(sample_abunds)],
abundance = sample_abunds[[1]]
)
# count how many samples the family was observed in
n_samp_obs <- nrow(sample_table[sample_table$abundance > 0,])
# run anova on sample type and get relevant outputs
aov_out <- aov(abundance ~ sample_type, data = sample_table)
aov_sum <- summary(aov_out)
p_val <- aov_sum[[1]][["Pr(>F)"]][[1]]
sum_sq <- aov_sum[[1]]["Sum Sq"][[1,1]]
f_val <- aov_sum[[1]]["F value"][1,1]
# run tukey HSD to test anova results
tuk_out <-TukeyHSD(aov_out)
# assemble the output as a list
# output list shows for each subclass, mean RA by sample type, log2 fold changes, anova + tukey results
subclass_data[[a_subclass]] <- list(
subclass = a_subclass,
n_samp_obs = n_samp_obs,
Limu_MA = mean_RAs$Limu,
CCA_MA = mean_RAs$CCA,
Coral_MA = mean_RAs$Coral,
FC_LimuVCoral = fc(mean_RAs$Limu, mean_RAs$Coral),
FC_LimuVCCA = fc(mean_RAs$Limu, mean_RAs$CCA),
FC_CoralVCCA = fc(mean_RAs$Coral, mean_RAs$CCA),
tuk_Coral_CCA_diff = tuk_out[[1]]["Coral-CCA","diff"],
tuk_Coral_CCA_p = tuk_out[[1]]["Coral-CCA","p adj"],
tuk_Limu_CCA_diff = tuk_out[[1]]["Limu-CCA","diff"],
tuk_Limu_CCA_p = tuk_out[[1]]["Limu-CCA","p adj"],
tuk_Limu_Coral_diff = tuk_out[[1]]["Limu-Coral","diff"],
tuk_Limu_Coral_p = tuk_out[[1]]["Limu-Coral","p adj"],
f_stat = f_val,
sum_of_sq = sum_sq,
p_val = p_val
)
}
# put the fold changes for all subclasss into a nice data.frame
subclass_fold_changes <- do.call(rbind, subclass_data)
subclasses <- row.names(subclass_fold_changes)
subclass_fold_changes <-as.data.frame(apply(subclass_fold_changes, 2, as.numeric))
subclass_fold_changes$subclass <- subclasses
# adjust p values using
subclass_fold_changes$adj_p_val <- p.adjust(subclass_fold_changes$p_val, method = "BH")
# classify enrichment in a sample type
classify_DA <- function(a_subclass, results_table = subclass_fold_changes){
results_table <- results_table[results_table$subclass == a_subclass,]
# vector identifying enrichment in sample types
enriched <- c()
# tests
if(results_table$tuk_Coral_CCA_p < 0.05 & results_table$Coral_MA > results_table$CCA_MA |
results_table$tuk_Limu_Coral_p < 0.05 & results_table$Coral_MA > results_table$Limu_MA){
enriched <- append(enriched,"Coral")
}
if(results_table$tuk_Coral_CCA_p < 0.05 & results_table$CCA_MA > results_table$Coral_MA |
results_table$tuk_Limu_CCA_p < 0.05 & results_table$CCA_MA > results_table$Limu_MA){
enriched <- append(enriched,"CCA")
}
if(results_table$tuk_Limu_CCA_p < 0.05 & results_table$Limu_MA > results_table$CCA_MA |
results_table$tuk_Limu_Coral_p < 0.05 & results_table$Limu_MA > results_table$Coral_MA){
enriched <- append(enriched,"Limu")
}
if(is.null(enriched)){
return("NA")
}else{
return(paste(enriched, sep = "/", collapse = ""))
}
}
# get differentially abundant sample types for all features
subclass_fold_changes$sample_type_DA <-
vapply(subclass_fold_changes$subclass,
classify_DA,
FUN.VALUE = character(1))
write.csv(subclass_fold_changes,
"data/processed/subclass_anova_and_fold_changes.csv",
row.names = F)
| /9_Metabolite_Subclasses.R | no_license | soswift/waimea_marine | R | false | false | 6,668 | r | ## Load libraries --------------------------
# General utility
library(phyloseq)
library(data.table)
library(tidyr)
library(biomformat)
# Plotting and analysis
library(vegan)
library(pairwiseAdonis)
# source helper functions for plotting and subsetting
source("src/helper_functions.R")
# set color scheme for sample types
sample_type_cols <- c(CCA = "#6469ed",
Coral = "#e49c4c",
Limu = "#7cc854" )
# Load Cleaned Metabolite Data----------------------------------------------------------------
chem_phy <- readRDS("data/processed/chem_phy.rds")
# 13. Statistics on Metabolite Subclass by Sample Type -----------------------------------------------
# For each metabolite subclass, run anova and look for fold changes in abundance between sample types
# This information can be used to confirm that subclasss actually associate with a given sample type.
# Results inform subclass ordinations, cytoscape outputs, etc. with simple linear statistics.
# pull out metabolite peak data
peak_data <- as.data.frame(
as(tax_table(chem_phy),
"matrix") )
# sum relative abundance of features in each subclass
# phyloseq treats taxonomy as hierarchical, so we need to drop most columns
# we only need 'Qemistree_sbuclass', which is the subclass
# we can also keep higher level classifications 'class' and 'superclass'
tax_mat <- as(tax_table(chem_phy),
"matrix")
tax_mat <- tax_mat[ , c("Qemistree_superclass",
"Qemistree_class","Qemistree_subclass")]
sub_tax_table <- tax_table(tax_mat)
sub_phy <-phyloseq(sample_data(chem_phy),
otu_table(chem_phy),
sub_tax_table)
# sum relative areas within a subclass
sub_merge <- tax_glom(sub_phy,
"Qemistree_subclass")
# the column 'Qemistree_subclass' identifies the subclass
unique_subclasses <- unique(as(tax_table(sub_merge),
"matrix")[,"Qemistree_subclass"])
# for each subclass, subset the data and run anova
subclass_data <- list()
for(a_subclass in unique_subclasses) {
# subset compounds by subclass
subclass_phy <- subset_taxa(sub_merge,
Qemistree_subclass == a_subclass)
# create a map identifying samples by sample types
sample_map <- setNames(sample_data(sub_phy)[["sample_type"]],
sample_names(sub_phy))
sample_types <-c("Limu",
"CCA",
"Coral")
# get mean relative abundance for each sample type present in the subclass
mean_RAs <- list()
for(a_sample_type in sample_types){
mean_RAs[[a_sample_type]] <- mean(
otu_table(
subset_samples(subclass_phy,
sample_type == a_sample_type)))
}
# prior to modeling, normalize data by transforming relative abundances to arcsin(sqrt(x))
arcsin_phy <- transform_sample_counts(subclass_phy,
fun = function(x)
asin(sqrt(x)) )
# pull out transformed counts of sample_types, clean up
sample_abunds <- data.frame(t( as(otu_table(arcsin_phy),
"matrix")))
sample_abunds[is.na(sample_abunds)] <- 0
# make table with columns for sample type and abundance
sample_table <- data.frame(sample_type = sample_map[row.names(sample_abunds)],
abundance = sample_abunds[[1]]
)
# count how many samples the family was observed in
n_samp_obs <- nrow(sample_table[sample_table$abundance > 0,])
# run anova on sample type and get relevant outputs
aov_out <- aov(abundance ~ sample_type, data = sample_table)
aov_sum <- summary(aov_out)
p_val <- aov_sum[[1]][["Pr(>F)"]][[1]]
sum_sq <- aov_sum[[1]]["Sum Sq"][[1,1]]
f_val <- aov_sum[[1]]["F value"][1,1]
# run tukey HSD to test anova results
tuk_out <-TukeyHSD(aov_out)
# assemble the output as a list
# output list shows for each subclass, mean RA by sample type, log2 fold changes, anova + tukey results
subclass_data[[a_subclass]] <- list(
subclass = a_subclass,
n_samp_obs = n_samp_obs,
Limu_MA = mean_RAs$Limu,
CCA_MA = mean_RAs$CCA,
Coral_MA = mean_RAs$Coral,
FC_LimuVCoral = fc(mean_RAs$Limu, mean_RAs$Coral),
FC_LimuVCCA = fc(mean_RAs$Limu, mean_RAs$CCA),
FC_CoralVCCA = fc(mean_RAs$Coral, mean_RAs$CCA),
tuk_Coral_CCA_diff = tuk_out[[1]]["Coral-CCA","diff"],
tuk_Coral_CCA_p = tuk_out[[1]]["Coral-CCA","p adj"],
tuk_Limu_CCA_diff = tuk_out[[1]]["Limu-CCA","diff"],
tuk_Limu_CCA_p = tuk_out[[1]]["Limu-CCA","p adj"],
tuk_Limu_Coral_diff = tuk_out[[1]]["Limu-Coral","diff"],
tuk_Limu_Coral_p = tuk_out[[1]]["Limu-Coral","p adj"],
f_stat = f_val,
sum_of_sq = sum_sq,
p_val = p_val
)
}
# put the fold changes for all subclasss into a nice data.frame
subclass_fold_changes <- do.call(rbind, subclass_data)
subclasses <- row.names(subclass_fold_changes)
subclass_fold_changes <-as.data.frame(apply(subclass_fold_changes, 2, as.numeric))
subclass_fold_changes$subclass <- subclasses
# adjust p values using
subclass_fold_changes$adj_p_val <- p.adjust(subclass_fold_changes$p_val, method = "BH")
# classify enrichment in a sample type
classify_DA <- function(a_subclass, results_table = subclass_fold_changes){
results_table <- results_table[results_table$subclass == a_subclass,]
# vector identifying enrichment in sample types
enriched <- c()
# tests
if(results_table$tuk_Coral_CCA_p < 0.05 & results_table$Coral_MA > results_table$CCA_MA |
results_table$tuk_Limu_Coral_p < 0.05 & results_table$Coral_MA > results_table$Limu_MA){
enriched <- append(enriched,"Coral")
}
if(results_table$tuk_Coral_CCA_p < 0.05 & results_table$CCA_MA > results_table$Coral_MA |
results_table$tuk_Limu_CCA_p < 0.05 & results_table$CCA_MA > results_table$Limu_MA){
enriched <- append(enriched,"CCA")
}
if(results_table$tuk_Limu_CCA_p < 0.05 & results_table$Limu_MA > results_table$CCA_MA |
results_table$tuk_Limu_Coral_p < 0.05 & results_table$Limu_MA > results_table$Coral_MA){
enriched <- append(enriched,"Limu")
}
if(is.null(enriched)){
return("NA")
}else{
return(paste(enriched, sep = "/", collapse = ""))
}
}
# get differentially abundant sample types for all features
subclass_fold_changes$sample_type_DA <-
vapply(subclass_fold_changes$subclass,
classify_DA,
FUN.VALUE = character(1))
write.csv(subclass_fold_changes,
"data/processed/subclass_anova_and_fold_changes.csv",
row.names = F)
|
options(stringsAsFactors = FALSE)
library(stringr)
library(plyr)
## PREPARATION
# Getting page files
f = list.files("raw_fichas/")
f = f[order(as.integer(gsub("page(.*).csv", "\\1", f)))]
f = paste0("victims/raw_data/Albacete/raw_fichas/", f)
# Merge
df_list = lapply(f, function(x) read.csv(x))
if(any(sapply(df_list, function(x) ncol(x)) != 4)){
stop("problem with ncol")} else {
data = as.data.frame(do.call("rbind", df_list))}
# Removing people without information
data = subset(data, head != " .")
## SPLITTING VARIABLES
info = str_split(data$info, "\n")
info_n = sapply(info, function(x) length(x))
label = str_split(data$label, "\n")
label_n = sapply(label, function(x) length(x))
# Some observaciones are split in several lines
for(i in which(info_n != label_n)){
diff = info_n[i] - label_n[i]
obs = which(grepl("Observaciones", label[[i]]))
merge = seq(obs, obs+diff, 1)
remove = merge[!merge %in% obs]
info[[i]][obs] = paste(info[[i]][merge], collapse = " ")
info[[i]] = info[[i]][-remove]
}
# Remove final : from labels & change spaces
label = lapply(label, function(x) gsub("(: |: )$", "", x))
label = lapply(label, function(x) gsub(" ", "_", x))
# Turn into a list of dataframes
### (is it possible to do with lapply?)
for(i in 1:length(info)){
info[[i]] = t(data.frame(info[[i]], row.names = label[[i]]))
info[[i]] = as.data.frame(info[[i]])
row.names(info[[i]]) = i
}
# Transform to data frame and order (just in case)
info_df = as.data.frame(do.call("rbind.fill", info))
info_df = info_df[order(as.integer(row.names(info_df))),]
# Put everything together
data = cbind(data[, c("nombre", "head")], info_df)
# Add info on birthplace if available
data$head = gsub("\\.$", "", data$head)
data$Natural_de = gsub(".*natural de ", "", data$head)
data$Natural_de = gsub("( y m| \\().*", "", data$Natural_de)
data$Natural_de[!grepl("natural", data$head)] = NA
# Add info on death date
data$Fecha_muerte = gsub(".*murio el dia ", "", data$head)
data$Fecha_muerte[!grepl("murio", data$head)] = NA
data$Fecha_muerte = as.Date(data$Fecha_muerte, "%d/%m/%Y")
# Clean a couple things
data$Tipo[data$Tipo %in% c("Muerto en cumplimento de sentencia",
"Muerto en cumplimineto de sentencia")] =
"Muerto en cumplimiento de sentencia"
data$Tipo[data$Tipo == "prision"] = "Prision"
### SAVE DATA
write.csv(data, "victims_albacete_raw.csv", row.names = FALSE)
| /R/victims_albacete_clean.R | no_license | franvillamil/franvillamil.github.io | R | false | false | 2,392 | r | options(stringsAsFactors = FALSE)
library(stringr)
library(plyr)
## PREPARATION
# Getting page files
f = list.files("raw_fichas/")
f = f[order(as.integer(gsub("page(.*).csv", "\\1", f)))]
f = paste0("victims/raw_data/Albacete/raw_fichas/", f)
# Merge
df_list = lapply(f, function(x) read.csv(x))
if(any(sapply(df_list, function(x) ncol(x)) != 4)){
stop("problem with ncol")} else {
data = as.data.frame(do.call("rbind", df_list))}
# Removing people without information
data = subset(data, head != " .")
## SPLITTING VARIABLES
info = str_split(data$info, "\n")
info_n = sapply(info, function(x) length(x))
label = str_split(data$label, "\n")
label_n = sapply(label, function(x) length(x))
# Some observaciones are split in several lines
for(i in which(info_n != label_n)){
diff = info_n[i] - label_n[i]
obs = which(grepl("Observaciones", label[[i]]))
merge = seq(obs, obs+diff, 1)
remove = merge[!merge %in% obs]
info[[i]][obs] = paste(info[[i]][merge], collapse = " ")
info[[i]] = info[[i]][-remove]
}
# Remove final : from labels & change spaces
label = lapply(label, function(x) gsub("(: |: )$", "", x))
label = lapply(label, function(x) gsub(" ", "_", x))
# Turn into a list of dataframes
### (is it possible to do with lapply?)
for(i in 1:length(info)){
info[[i]] = t(data.frame(info[[i]], row.names = label[[i]]))
info[[i]] = as.data.frame(info[[i]])
row.names(info[[i]]) = i
}
# Transform to data frame and order (just in case)
info_df = as.data.frame(do.call("rbind.fill", info))
info_df = info_df[order(as.integer(row.names(info_df))),]
# Put everything together
data = cbind(data[, c("nombre", "head")], info_df)
# Add info on birthplace if available
data$head = gsub("\\.$", "", data$head)
data$Natural_de = gsub(".*natural de ", "", data$head)
data$Natural_de = gsub("( y m| \\().*", "", data$Natural_de)
data$Natural_de[!grepl("natural", data$head)] = NA
# Add info on death date
data$Fecha_muerte = gsub(".*murio el dia ", "", data$head)
data$Fecha_muerte[!grepl("murio", data$head)] = NA
data$Fecha_muerte = as.Date(data$Fecha_muerte, "%d/%m/%Y")
# Clean a couple things
data$Tipo[data$Tipo %in% c("Muerto en cumplimento de sentencia",
"Muerto en cumplimineto de sentencia")] =
"Muerto en cumplimiento de sentencia"
data$Tipo[data$Tipo == "prision"] = "Prision"
### SAVE DATA
write.csv(data, "victims_albacete_raw.csv", row.names = FALSE)
|
# FIGURE 1: SCATTERPLOTS AND CLUSTERING
# ensure reproducibility by fixing the random seed to an arbitrary offset:
load(latestPrepName)
set.seed(1234)
message("=== FIGURE 1 ===")
### CORRELATION HEATMAPS (Figure 1C) ###
# perform hierarchical clustering and plot heatmap:
message("Correlation heatmaps (Figure 1.c)...")
for (curType in c("absolute","relative")) {
curDatasets <- datasetsByType[[curType]]
message("\t* ",curType)
curCorrelations <- corTable[curDatasets,curDatasets]
colnames(curCorrelations) <- datasetTable[curDatasets,"prettyLabel"]
rownames(curCorrelations) <- datasetTable[curDatasets,"prettyLabel"]
clust <- hclust(dist(1-abs(curCorrelations)),"average")
svgPlot(paste0("cor_hierarchical_clustering_",curType), 5, 5)
par(mar=c(4,2,1,8))
dend <-as.dendrogram(clust)
plot(dend,horiz=TRUE)
dev.off()
cOrder <- rank(clust$order[datasetNames %in% curDatasets])
svgPlot(paste("cor_heatmap_",curType), 12, 12)
pheatmap(curCorrelations[cOrder,cOrder],col=colorRampPalette(rev(brewer.pal(5,"Oranges")),bias=0.2,space="Lab")(20),breaks=seq(0,1,0.05),border_color="white",cellwidth=22,cellheight=22,cluster_cols=FALSE,cluster_rows=FALSE,display_numbers=TRUE,fontsize=13,fontcolor="black")
dev.off()
}
### EXAMPLE SCATTERPLOTS (Figure 1D) ###
# for selection of datasets:
# sort(corTable[rownames(corTable) %in% datasetsByType$absolute,"Pyroseq_1"])
message("Example scatter plots (Figure 1.d)...")
for(s2 in c("Pyroseq_1")) {
for(s1 in c("Pyroseq_1_replicate","Pyroseq_2","Infinium","EpiTyper_3","AmpliconBS_1","EnrichmentBS_1")) {
message("\t* ",s1,"-vs-",s2)
ggDataSelected <- assayVsAssayData[assayVsAssayData$assay1==s1&assayVsAssayData$assay2==s2,]
d <- ggplot(data=ggDataSelected,aes(x=meth1,y=meth2)) + geom_point(shape=16,alpha=0.3) + geom_smooth(method=lm,fill='steelblue') + theme_bw() + xlab(DNA_METH_LABEL) + ylab(DNA_METH_LABEL) + xlim(0,100) + ylim(0,100) + annotate("text", x = 100, y = 0, label = paste("r =",format(cor(ggDataSelected$meth1,ggDataSelected$meth2),digits=3,nsmall=3)),hjust=1,vjust=0,size=4) + labs(title = datasetTable[s1,"prettyLabel"]) + theme( axis.line = element_line(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank()
)
svgPlotGG(d, paste0("individual_scatterplots_ggplot_",s1,"-vs-",s2), 3, 3, units="in")
}
}
### BEESWARM OVERVIEW PLOTS (Figure 1B) ###
message("Beeswarm overview plots (Figure 1.b)...")
svgPlot("beeswarm_1D_panel", 5, 3.4, pointsize=11)
temp <- lsaData[lsaData$datasetName%in%datasetsByType[["absolute"]],]
temp <- temp[sample(nrow(temp),nrow(temp)),] # randomize order so not to bias the looks of the plots
par(xpd=NA)
par(mfrow=c(3,1))
par(mgp=c(1.75,0.5,0))
par(mar=c(0.25,2.75,2,0.5)+0.1)
# plots the three components of the figure panel:
for(curCat in c("dataset","sample","region")) {
message("\t* ",curCat)
bees <- beeswarm(
as.formula(paste("methValue","~",curCat,"Name",sep="")), data=temp, labels=NA,
horizontal=FALSE, cex=0.05, spacing=2, cex.lab=1, pch=16, col="#555555",method="hex",corral="random",main=paste(length(unique(temp[,paste(curCat,"Name",sep="")]))," ",curCat,"s",sep=""),xlab=NA,ylab=NA,ylim=c(0,100),pwcol=makeTransparent(plotColLookup$datasetName[temp$datasetName],0.75), bty='l', xaxt='n',yaxt='n'
)
axis(2,seq(0,100,25),as.character(c(0,NA,50,NA,100)),las=1,cex=0.9)
}
dev.off()
# p-values (for pairwise differences in overall distributions):
temp2 <- temp[order(temp$datasetName,temp$regionName,temp$sampleName),]
cmps <- list(
"TumorNormal"=c("Tumor","Normal","two.sided"),
"DrugControl"=c("Control","AzaC","two.sided"),
"FrozenFFPE"=c("FFPE","Frozen","two.sided"),
"Titration1"=c("1_100","6_0","two.sided"),
"Titration2"=c("1_100","6_0","two.sided")
)
pvals <- c()
for(selSampleType in names(cmps)) {
g1 <- grep(cmps[[selSampleType]][1],sampleNamesByType[[selSampleType]],value=TRUE)
g2 <- grep(cmps[[selSampleType]][2],sampleNamesByType[[selSampleType]],value=TRUE)
temp3 <- temp2[temp2$sampleName%in%c(g1,g2),]
temp3$simpleSampleName <- gsub(paste0("(",cmps[[selSampleType]][1],"|",cmps[[selSampleType]][2],")"),"",temp3$sampleName)
temp3g1 <- temp3[temp3$sampleName%in%g1,c("simpleSampleName","regionName","datasetName","methValue")]
temp3g2 <- temp3[temp3$sampleName%in%g2,c("simpleSampleName","regionName","datasetName","methValue")]
temp3mrg <- merge(temp3g1,temp3g2, by=c("simpleSampleName","regionName","datasetName"), all=FALSE)
pval <- t.test(beta2mval(temp3mrg$methValue.x/100), beta2mval(temp3mrg$methValue.y/100), alternative = cmps[[selSampleType]][3], paired = TRUE)
message("paired, two-sided t-test for ", selSampleType, ": ", cmps[[selSampleType]][1], " vs ", cmps[[selSampleType]][2], " (",cmps[[selSampleType]][3],")?\tp-value = ", pval$p.value)
pvals <- c(pvals,pval$p.value)
names(pvals)[length(pvals)] <- selSampleType
}
print(round(p.adjust(pvals),8))
# same with corrected titration data:
temp2 <- correctedTitrationData[order(correctedTitrationData$datasetName,correctedTitrationData$regionName,correctedTitrationData$titrationType,correctedTitrationData$titrationPercent),]
temp2 <- temp2[temp2$titrationPercent%in%c(0,100),]
cmps <- list(
"Titration1"=c(100,0,"two.sided"),
"Titration2"=c(100,0,"two.sided")
)
pvals <- c()
for(titrationType in names(cmps)) {
g1 <- temp2$titrationPercent == cmps[[titrationType]][1] & temp2$titrationType == titrationType
g2 <- temp2$titrationPercent == cmps[[titrationType]][2] & temp2$titrationType == titrationType
temp3g1 <- temp2[g1,c("regionName","datasetName","meth")]
temp3g2 <- temp2[g2,c("regionName","datasetName","meth")]
temp3mrg <- merge(temp3g1,temp3g2, by=c("regionName","datasetName"), all=FALSE)
pval <- t.test(beta2mval(temp3mrg$meth.x/100), beta2mval(temp3mrg$meth.y/100), alternative = cmps[[titrationType]][3], paired = TRUE)
message("paired, two-sided t-test for ", titrationType, ": ", cmps[[titrationType]][1], " vs ", cmps[[titrationType]][2], " (",cmps[[titrationType]][3],")?\tp-value = ", pval$p.value)
pvals <- c(pvals,pval$p.value)
names(pvals)[length(pvals)] <- titrationType
}
print(round(p.adjust(pvals),8))
save.image(paste("methBench","afterFig1",format(Sys.time(), "%Y%m%d_%H%M"),"session.Rdata",sep="_"))
| /analysis/methBench_fig1.R | permissive | epigen/methylation-biomarker-benchmark | R | false | false | 6,280 | r | # FIGURE 1: SCATTERPLOTS AND CLUSTERING
# ensure reproducibility by fixing the random seed to an arbitrary offset:
load(latestPrepName)
set.seed(1234)
message("=== FIGURE 1 ===")
### CORRELATION HEATMAPS (Figure 1C) ###
# perform hierarchical clustering and plot heatmap:
message("Correlation heatmaps (Figure 1.c)...")
for (curType in c("absolute","relative")) {
curDatasets <- datasetsByType[[curType]]
message("\t* ",curType)
curCorrelations <- corTable[curDatasets,curDatasets]
colnames(curCorrelations) <- datasetTable[curDatasets,"prettyLabel"]
rownames(curCorrelations) <- datasetTable[curDatasets,"prettyLabel"]
clust <- hclust(dist(1-abs(curCorrelations)),"average")
svgPlot(paste0("cor_hierarchical_clustering_",curType), 5, 5)
par(mar=c(4,2,1,8))
dend <-as.dendrogram(clust)
plot(dend,horiz=TRUE)
dev.off()
cOrder <- rank(clust$order[datasetNames %in% curDatasets])
svgPlot(paste("cor_heatmap_",curType), 12, 12)
pheatmap(curCorrelations[cOrder,cOrder],col=colorRampPalette(rev(brewer.pal(5,"Oranges")),bias=0.2,space="Lab")(20),breaks=seq(0,1,0.05),border_color="white",cellwidth=22,cellheight=22,cluster_cols=FALSE,cluster_rows=FALSE,display_numbers=TRUE,fontsize=13,fontcolor="black")
dev.off()
}
### EXAMPLE SCATTERPLOTS (Figure 1D) ###
# for selection of datasets:
# sort(corTable[rownames(corTable) %in% datasetsByType$absolute,"Pyroseq_1"])
message("Example scatter plots (Figure 1.d)...")
for(s2 in c("Pyroseq_1")) {
for(s1 in c("Pyroseq_1_replicate","Pyroseq_2","Infinium","EpiTyper_3","AmpliconBS_1","EnrichmentBS_1")) {
message("\t* ",s1,"-vs-",s2)
ggDataSelected <- assayVsAssayData[assayVsAssayData$assay1==s1&assayVsAssayData$assay2==s2,]
d <- ggplot(data=ggDataSelected,aes(x=meth1,y=meth2)) + geom_point(shape=16,alpha=0.3) + geom_smooth(method=lm,fill='steelblue') + theme_bw() + xlab(DNA_METH_LABEL) + ylab(DNA_METH_LABEL) + xlim(0,100) + ylim(0,100) + annotate("text", x = 100, y = 0, label = paste("r =",format(cor(ggDataSelected$meth1,ggDataSelected$meth2),digits=3,nsmall=3)),hjust=1,vjust=0,size=4) + labs(title = datasetTable[s1,"prettyLabel"]) + theme( axis.line = element_line(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank()
)
svgPlotGG(d, paste0("individual_scatterplots_ggplot_",s1,"-vs-",s2), 3, 3, units="in")
}
}
### BEESWARM OVERVIEW PLOTS (Figure 1B) ###
message("Beeswarm overview plots (Figure 1.b)...")
svgPlot("beeswarm_1D_panel", 5, 3.4, pointsize=11)
temp <- lsaData[lsaData$datasetName%in%datasetsByType[["absolute"]],]
temp <- temp[sample(nrow(temp),nrow(temp)),] # randomize order so not to bias the looks of the plots
par(xpd=NA)
par(mfrow=c(3,1))
par(mgp=c(1.75,0.5,0))
par(mar=c(0.25,2.75,2,0.5)+0.1)
# plots the three components of the figure panel:
for(curCat in c("dataset","sample","region")) {
message("\t* ",curCat)
bees <- beeswarm(
as.formula(paste("methValue","~",curCat,"Name",sep="")), data=temp, labels=NA,
horizontal=FALSE, cex=0.05, spacing=2, cex.lab=1, pch=16, col="#555555",method="hex",corral="random",main=paste(length(unique(temp[,paste(curCat,"Name",sep="")]))," ",curCat,"s",sep=""),xlab=NA,ylab=NA,ylim=c(0,100),pwcol=makeTransparent(plotColLookup$datasetName[temp$datasetName],0.75), bty='l', xaxt='n',yaxt='n'
)
axis(2,seq(0,100,25),as.character(c(0,NA,50,NA,100)),las=1,cex=0.9)
}
dev.off()
# p-values (for pairwise differences in overall distributions):
temp2 <- temp[order(temp$datasetName,temp$regionName,temp$sampleName),]
cmps <- list(
"TumorNormal"=c("Tumor","Normal","two.sided"),
"DrugControl"=c("Control","AzaC","two.sided"),
"FrozenFFPE"=c("FFPE","Frozen","two.sided"),
"Titration1"=c("1_100","6_0","two.sided"),
"Titration2"=c("1_100","6_0","two.sided")
)
pvals <- c()
for(selSampleType in names(cmps)) {
g1 <- grep(cmps[[selSampleType]][1],sampleNamesByType[[selSampleType]],value=TRUE)
g2 <- grep(cmps[[selSampleType]][2],sampleNamesByType[[selSampleType]],value=TRUE)
temp3 <- temp2[temp2$sampleName%in%c(g1,g2),]
temp3$simpleSampleName <- gsub(paste0("(",cmps[[selSampleType]][1],"|",cmps[[selSampleType]][2],")"),"",temp3$sampleName)
temp3g1 <- temp3[temp3$sampleName%in%g1,c("simpleSampleName","regionName","datasetName","methValue")]
temp3g2 <- temp3[temp3$sampleName%in%g2,c("simpleSampleName","regionName","datasetName","methValue")]
temp3mrg <- merge(temp3g1,temp3g2, by=c("simpleSampleName","regionName","datasetName"), all=FALSE)
pval <- t.test(beta2mval(temp3mrg$methValue.x/100), beta2mval(temp3mrg$methValue.y/100), alternative = cmps[[selSampleType]][3], paired = TRUE)
message("paired, two-sided t-test for ", selSampleType, ": ", cmps[[selSampleType]][1], " vs ", cmps[[selSampleType]][2], " (",cmps[[selSampleType]][3],")?\tp-value = ", pval$p.value)
pvals <- c(pvals,pval$p.value)
names(pvals)[length(pvals)] <- selSampleType
}
print(round(p.adjust(pvals),8))
# same with corrected titration data:
temp2 <- correctedTitrationData[order(correctedTitrationData$datasetName,correctedTitrationData$regionName,correctedTitrationData$titrationType,correctedTitrationData$titrationPercent),]
temp2 <- temp2[temp2$titrationPercent%in%c(0,100),]
cmps <- list(
"Titration1"=c(100,0,"two.sided"),
"Titration2"=c(100,0,"two.sided")
)
pvals <- c()
for(titrationType in names(cmps)) {
g1 <- temp2$titrationPercent == cmps[[titrationType]][1] & temp2$titrationType == titrationType
g2 <- temp2$titrationPercent == cmps[[titrationType]][2] & temp2$titrationType == titrationType
temp3g1 <- temp2[g1,c("regionName","datasetName","meth")]
temp3g2 <- temp2[g2,c("regionName","datasetName","meth")]
temp3mrg <- merge(temp3g1,temp3g2, by=c("regionName","datasetName"), all=FALSE)
pval <- t.test(beta2mval(temp3mrg$meth.x/100), beta2mval(temp3mrg$meth.y/100), alternative = cmps[[titrationType]][3], paired = TRUE)
message("paired, two-sided t-test for ", titrationType, ": ", cmps[[titrationType]][1], " vs ", cmps[[titrationType]][2], " (",cmps[[titrationType]][3],")?\tp-value = ", pval$p.value)
pvals <- c(pvals,pval$p.value)
names(pvals)[length(pvals)] <- titrationType
}
print(round(p.adjust(pvals),8))
save.image(paste("methBench","afterFig1",format(Sys.time(), "%Y%m%d_%H%M"),"session.Rdata",sep="_"))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.