content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
library(forecast)
library(smooth)
library(TStools)
#Load the Data
medium_noise <- read.csv("medium_noise.csv", header = FALSE)
#Convert to Time Series
medium_noise <- ts(medium_noise, frequency = 12, start = c(2012,1))
#Set horizon and number of rolling origins
h <- 12
origins <- 10
medium_noise_length <- length(medium_noise)
train_length <- medium_noise_length - h - origins + 1
test_length <- h + origins - 1
medium_noise_train <- ts(medium_noise[1:train_length],
frequency = frequency(medium_noise),
start = start(medium_noise))
medium_noise_test <- medium_noise[(train_length+1):medium_noise_length]
medium_noise_forecasts <- matrix(NA, nrow = origins, ncol = h)
medium_noise_holdout <- matrix(NA, nrow = origins, ncol = h)
colnames(medium_noise_forecasts) <- paste0("horizon",c(1:h))
rownames(medium_noise_forecasts) <- paste0("origin", c(1:origins))
dimnames(medium_noise_holdout) <- dimnames(medium_noise_forecasts)
View(medium_noise_holdout)
for(i in 1:origins) {
#Create a ts object out of the medium noise data
our_train_set <- ts(medium_noise[1:(train_length+i-1)],
frequency = frequency(medium_noise),
start = start(medium_noise))
#Write down the holdout values from the test set
medium_noise_holdout[i,] <- medium_noise_test[i-1+(1:h)]
#Produce forecasts and write them down
medium_noise_forecasts[i,] <- forecast(ets(our_train_set, "ANN"),h=h)$mean
}
#MAE for each horizon
colMeans(abs(medium_noise_holdout - medium_noise_forecasts))
###SES###
#Fit SES with fixed intial seed
es_ANN_initial_1 <- es(medium_noise, model = "ANN", initial = medium_noise[1],
h=h, holdout = TRUE)
es_ANN_initial_1$accuracy
#Fit SES with optimized seed
es_ANN_opt <- es(medium_noise, model = "ANN", h=h, holdout= TRUE)
es_ANN_opt$accuracy
#Benchmarking
#Fit SES with optimized seed
medium_noise_naive <- es(medium_noise, model = "ANN", persistance = 1,
h=h, holdout = TRUE)
medium_noise_naive$accuracy
##Other SES methods, Holt's method
trend_data <- read.csv("trend_data.csv")
plot(trend_data$x, type = "l")
trend_data <- ts(trend_data, frequency = 12)
plot(trend_data)
trend_data_length <- length(trend_data)
#Split into training and testing
trend_data_train <- ts(trend_data[1:36], frequency = 12)
trend_data_test <- trend_data[37:trend_data_length]
#Calculate Holt Method
ets_ANN <- ets(trend_data_train, model = "AAN")
ets_ANN
coef(ets_ANN)
forecast(ets_ANN, h=h)$mean
plot(forecast(ets_ANN, h=h))
#Calculate a Damped Holt Method
ets_AAdn <- ets(trend_data_train, model = "AAN", damped = TRUE)
ets_AAdn
#Fit a holt's method , no damped trend
ets(trend_data_train, model = "AAN", damped = FALSE)
es_AAdn <- es(trend_data, model = "AAdN", h=h, holdout = TRUE)
##Holt-Winters
trend_seasonal_data <- read.csv("trend_seasonal_data.csv", header = FALSE)
trend_seasonal_data <- ts(trend_seasonal_data, frequency = 12)
plot(trend_seasonal_data)
trend_seasonal_data_train <- ts(trend_seasonal_data[1:36], frequency = 12)
trend_seasonal_data_test <- trend_seasonal_data[37:trend_data_length]
#Fit a model using ets()
ets_AAA <- ets(trend_seasonal_data_train, model = "AAA", damped = FALSE)
#do the same thing using es():
es_AAA <- es(trend_seasonal_data_train, model = "AAA", h=h)
ets_AAA
es_AAA
#Selecting best model based on optimization
#calculate an optimized ETS method using ets()
ets_ZZZ <- ets(trend_seasonal_data_test, model = "ZZZ")
#Do the same thing using es()
es_ZZZ <- es(trend_seasonal_data_train, model = "ZZZ")
#Select the most appropriate non-seasonal model with ets()
ets_ZZN <- ets(trend_data_train, model = "ZZN")
#Do the same thing with es()
es_ZZN <- es(trend_data_train, model = "ZZN", silent = "a")
| /Workshop 4.R | no_license | SaifRehman11/Forecasting-Holt-Winters-Exponential-Smoothing | R | false | false | 3,974 | r | library(forecast)
library(smooth)
library(TStools)
#Load the Data
medium_noise <- read.csv("medium_noise.csv", header = FALSE)
#Convert to Time Series
medium_noise <- ts(medium_noise, frequency = 12, start = c(2012,1))
#Set horizon and number of rolling origins
h <- 12
origins <- 10
medium_noise_length <- length(medium_noise)
train_length <- medium_noise_length - h - origins + 1
test_length <- h + origins - 1
medium_noise_train <- ts(medium_noise[1:train_length],
frequency = frequency(medium_noise),
start = start(medium_noise))
medium_noise_test <- medium_noise[(train_length+1):medium_noise_length]
medium_noise_forecasts <- matrix(NA, nrow = origins, ncol = h)
medium_noise_holdout <- matrix(NA, nrow = origins, ncol = h)
colnames(medium_noise_forecasts) <- paste0("horizon",c(1:h))
rownames(medium_noise_forecasts) <- paste0("origin", c(1:origins))
dimnames(medium_noise_holdout) <- dimnames(medium_noise_forecasts)
View(medium_noise_holdout)
for(i in 1:origins) {
#Create a ts object out of the medium noise data
our_train_set <- ts(medium_noise[1:(train_length+i-1)],
frequency = frequency(medium_noise),
start = start(medium_noise))
#Write down the holdout values from the test set
medium_noise_holdout[i,] <- medium_noise_test[i-1+(1:h)]
#Produce forecasts and write them down
medium_noise_forecasts[i,] <- forecast(ets(our_train_set, "ANN"),h=h)$mean
}
#MAE for each horizon
colMeans(abs(medium_noise_holdout - medium_noise_forecasts))
###SES###
#Fit SES with fixed intial seed
es_ANN_initial_1 <- es(medium_noise, model = "ANN", initial = medium_noise[1],
h=h, holdout = TRUE)
es_ANN_initial_1$accuracy
#Fit SES with optimized seed
es_ANN_opt <- es(medium_noise, model = "ANN", h=h, holdout= TRUE)
es_ANN_opt$accuracy
#Benchmarking
#Fit SES with optimized seed
medium_noise_naive <- es(medium_noise, model = "ANN", persistance = 1,
h=h, holdout = TRUE)
medium_noise_naive$accuracy
##Other SES methods, Holt's method
trend_data <- read.csv("trend_data.csv")
plot(trend_data$x, type = "l")
trend_data <- ts(trend_data, frequency = 12)
plot(trend_data)
trend_data_length <- length(trend_data)
#Split into training and testing
trend_data_train <- ts(trend_data[1:36], frequency = 12)
trend_data_test <- trend_data[37:trend_data_length]
#Calculate Holt Method
ets_ANN <- ets(trend_data_train, model = "AAN")
ets_ANN
coef(ets_ANN)
forecast(ets_ANN, h=h)$mean
plot(forecast(ets_ANN, h=h))
#Calculate a Damped Holt Method
ets_AAdn <- ets(trend_data_train, model = "AAN", damped = TRUE)
ets_AAdn
#Fit a holt's method , no damped trend
ets(trend_data_train, model = "AAN", damped = FALSE)
es_AAdn <- es(trend_data, model = "AAdN", h=h, holdout = TRUE)
##Holt-Winters
trend_seasonal_data <- read.csv("trend_seasonal_data.csv", header = FALSE)
trend_seasonal_data <- ts(trend_seasonal_data, frequency = 12)
plot(trend_seasonal_data)
trend_seasonal_data_train <- ts(trend_seasonal_data[1:36], frequency = 12)
trend_seasonal_data_test <- trend_seasonal_data[37:trend_data_length]
#Fit a model using ets()
ets_AAA <- ets(trend_seasonal_data_train, model = "AAA", damped = FALSE)
#do the same thing using es():
es_AAA <- es(trend_seasonal_data_train, model = "AAA", h=h)
ets_AAA
es_AAA
#Selecting best model based on optimization
#calculate an optimized ETS method using ets()
ets_ZZZ <- ets(trend_seasonal_data_test, model = "ZZZ")
#Do the same thing using es()
es_ZZZ <- es(trend_seasonal_data_train, model = "ZZZ")
#Select the most appropriate non-seasonal model with ets()
ets_ZZN <- ets(trend_data_train, model = "ZZN")
#Do the same thing with es()
es_ZZN <- es(trend_data_train, model = "ZZN", silent = "a")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visual_spatial.R
\name{vis_rasterbrick}
\alias{vis_rasterbrick}
\title{Visualize rasterbrick}
\usage{
vis_rasterbrick(
rb = NULL,
bands = NULL,
cols = NULL,
ncol = 2,
outfn = "vis_bands.pdf",
save = FALSE,
width = 4,
height = 4
)
}
\arguments{
\item{rb}{raster brick object}
\item{bands}{numbers of the bands}
\item{cols}{pallettes}
\item{ncol}{number of columns}
\item{outfn}{file name of the output}
\item{save}{indicate export of pdf}
\item{width}{(inchs) of single band image files}
\item{height}{(inchs) of single band image files}
}
\value{
}
\description{
Visualize rasterbrick
}
\examples{
fn <- "I:/projects/fire/victoria/input/GLDAS_bilinear/GLDAS_V21_8d_2000-01-09.tif"
rb <- brick(fn)
names(rb)
cols <- colorRampPalette(RColorBrewer::brewer.pal(11, "Spectral"))
vis_rasterbrick(rb)
vis_rasterbrick(rb, bands = c(1,3,5,7), outfn = "vis_bands2.pdf", cols = cols, save = TRUE)
}
| /man/vis_rasterbrick.Rd | no_license | xuzhenwu/xu | R | false | true | 991 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visual_spatial.R
\name{vis_rasterbrick}
\alias{vis_rasterbrick}
\title{Visualize rasterbrick}
\usage{
vis_rasterbrick(
rb = NULL,
bands = NULL,
cols = NULL,
ncol = 2,
outfn = "vis_bands.pdf",
save = FALSE,
width = 4,
height = 4
)
}
\arguments{
\item{rb}{raster brick object}
\item{bands}{numbers of the bands}
\item{cols}{pallettes}
\item{ncol}{number of columns}
\item{outfn}{file name of the output}
\item{save}{indicate export of pdf}
\item{width}{(inchs) of single band image files}
\item{height}{(inchs) of single band image files}
}
\value{
}
\description{
Visualize rasterbrick
}
\examples{
fn <- "I:/projects/fire/victoria/input/GLDAS_bilinear/GLDAS_V21_8d_2000-01-09.tif"
rb <- brick(fn)
names(rb)
cols <- colorRampPalette(RColorBrewer::brewer.pal(11, "Spectral"))
vis_rasterbrick(rb)
vis_rasterbrick(rb, bands = c(1,3,5,7), outfn = "vis_bands2.pdf", cols = cols, save = TRUE)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SpaTimeClus.R
\docType{package}
\name{SpaTimeClus-package}
\alias{SpaTimeClus}
\alias{SpaTimeClus-package}
\title{SpaTimeClus a package for clustering spatio-temporal data}
\description{
SpaTimeClus is a tool for clustering Spatio-Temporal data.
}
\details{
\tabular{ll}{
Package: \tab SpaTimeClus\cr
Type: \tab Package\cr
Version: \tab 1.0.0\cr
Date: \tab 2016-12-21\cr
License: \tab GPL-2\cr
LazyLoad: \tab yes\cr
}
The main function of this package is \link{spatimeclus} that performs the clustering of spatio-temporal data.
}
\examples{
\dontrun{
data(airparif)
# Clustering of the data by considering the spatial dependencies
res.spa <- spatimeclus(airparif$obs, G=3, K=4, Q=4, map = airparif$map,
nbinitSmall=50, nbinitKept=5, nbiterSmall=5)
summary(res.spa)
# Clustering of the data without considering the spatial dependencies
res.nospa <- spatimeclus(airparif$obs, G=3, K=4, Q=4, nbinitSmall=50, nbinitKept=5, nbiterSmall=5)
summary(res.nospa)
}
}
\author{
Author: Cheam A., Marbac M., and McNicholas P.
}
\references{
Cheam A., Marbac M., and McNicholas P., Model-Based Clustering for Spatio-Temporal Data Applied for Air Quality.
}
\keyword{package}
| /fuzzedpackages/SpaTimeClus/man/SpaTimeClus-package.Rd | no_license | akhikolla/testpackages | R | false | true | 1,263 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SpaTimeClus.R
\docType{package}
\name{SpaTimeClus-package}
\alias{SpaTimeClus}
\alias{SpaTimeClus-package}
\title{SpaTimeClus a package for clustering spatio-temporal data}
\description{
SpaTimeClus is a tool for clustering Spatio-Temporal data.
}
\details{
\tabular{ll}{
Package: \tab SpaTimeClus\cr
Type: \tab Package\cr
Version: \tab 1.0.0\cr
Date: \tab 2016-12-21\cr
License: \tab GPL-2\cr
LazyLoad: \tab yes\cr
}
The main function of this package is \link{spatimeclus} that performs the clustering of spatio-temporal data.
}
\examples{
\dontrun{
data(airparif)
# Clustering of the data by considering the spatial dependencies
res.spa <- spatimeclus(airparif$obs, G=3, K=4, Q=4, map = airparif$map,
nbinitSmall=50, nbinitKept=5, nbiterSmall=5)
summary(res.spa)
# Clustering of the data without considering the spatial dependencies
res.nospa <- spatimeclus(airparif$obs, G=3, K=4, Q=4, nbinitSmall=50, nbinitKept=5, nbiterSmall=5)
summary(res.nospa)
}
}
\author{
Author: Cheam A., Marbac M., and McNicholas P.
}
\references{
Cheam A., Marbac M., and McNicholas P., Model-Based Clustering for Spatio-Temporal Data Applied for Air Quality.
}
\keyword{package}
|
#' Clustering row/column categories on the basis of Correspondence Analysis coordinates from a space of user-defined dimensionality.
#'
#' This function allows to plot the result of cluster analysis performed on the results of Correspondence Analysis, providing the facility to plot a dendrogram, a silouette plot depicting the "quality" of the clustering solution, and a scatterplot with points coded according to the cluster membership.
#'
#' The function provides the facility to perform hierarchical cluster analysis of row and/or column categories on the basis of Correspondence Analysis result.
#' The clustering is based on the row and/or colum categories' coordinates from: \cr
#' (1) a high-dimensional space corresponding to the whole dimensionality of the input contingency table; \cr
#' (2) a high-dimensional space of dimensionality smaller than the full dimensionality of the input dataset; \cr
#' (3) a bi-dimensional space defined by a pair of user-defined dimensions. \cr
#' To obtain (1), the 'dim' parameter must be left in its default value (NULL); \cr
#' To obtain (2), the 'dim' parameter must be given an integer (needless to say, smaller than the full dimensionality of the input data); \cr
#' To obtain (3), the 'dim' parameter must be given a vector (e.g., c(1,3)) specifying the dimensions the user is interested in.
#'
#' The method by which the distance is calculated is specified using the 'dist.meth' parameter, while the agglomerative method is speficied using the 'aggl.meth' parameter. By default, they are set to "euclidean" and "ward.D2" respectively.
#'
#' The user may want to specify beforehand the desired number of clusters (i.e., the cluster solution). This is accomplished feeding an integer into the 'part' parameter.
#' A dendrogram (with rectangles indicating the clustering solution), a silhouette plot (indicating the "quality" of the cluster solution), and a CA scatterplot (with points given colours on the basis of their cluster membership) are returned. Please note that, when a high-dimensional space is selected, the scatterplot will use the first 2 CA dimensions; the user must keep in mind that the clustering based on a higher-dimensional space may not be well reflected on the subspace defined by the first two dimensions only.\cr
#' Also note: \cr
#' -if both row and column categories are subject to the clustering, the column categories will be flagged by an asterisk (*) in the dendrogram (and in the silhouette plot) just to make it easier to identify rows and columns; \cr
#' -the silhouette plot displays the average silhouette width as a dashed vertical line; the dimensionality of the CA space used is reported in the plot's title; if a pair of dimensions has been used, the individual dimensions are reported in the plot's title; \cr
#' -the silhouette plot's labels end with a number indicating the cluster to which each category is closer.
#'
#' An optimal clustering solution can be obtained setting the 'opt.part' parameter to TRUE. The optimal partition is selected by means of an iterative routine which locates at which cluster solution the highest average silhouette width is achieved.
#' If the 'opt.part' parameter is set to TRUE, an additional plot is returned along with the silhouette plot. It displays a scatterplot in which the cluster solution (x-axis) is plotted against the average silhouette width (y-axis). A vertical reference line indicate the cluster solution which maximize the silhouette width, corresponding to the suggested optimal partition.
#'
#' The function returns a list storing information about the cluster membership (i.e., which categories belong to which cluster).
#'
#' Further info and Disclaimer: \cr
#' The silhouette plot is obtained from the silhouette() function out from the 'cluster' package (https://cran.r-project.org/web/packages/cluster/index.html).
#' For a detailed description of the silhouette plot, its rationale, and its interpretation, see: \cr
#' -Rousseeuw P J. 1987. "Silhouettes: A graphical aid to the interpretation and validation of cluster analysis", Journal of Computational and Applied Mathematics 20, 53-65 (http://www.sciencedirect.com/science/article/pii/0377042787901257)
#'
#' For the idea of clustering categories on the basis of the CA coordinates from a full high-dimensional space (or from a subset thereof), see: \cr
#' -Ciampi et al. 2005. "Correspondence analysis and two-way clustering", SORT 29 (1), 27-4 \cr
#' -Beh et al. 2011. "A European perception of food using two methods of correspondence analysis", Food Quality and Preference 22(2), 226-231
#'
#' Please note that the interpretation of the clustering when both row AND column categories are used must procede with caution due to the issue of inter-class points' distance interpretation. For a full description of the issue (also with further references), see: \cr
#' -Greenacre M. 2007. "Correspondence Analysis in Practice", Boca Raton-London-New York, Chapman&Hall/CRC, 267-268.
#'
#' @param data: contingency table, in dataframe format.
#' @param which: "both" to cluster both row and column categories; "rows" or "columns" to cluster only row or column categories respectivily
#' @param dim: sets the dimensionality of the space whose coordinates are used to cluster the CA categories; it can be an integer or a vector (e.g., c(2,3)) specifying the first and second selected dimension. NULL is the default; it will make the clustering to be based on the maximum dimensionality of the dataset.
#' @param dist.meth: sets the distance method used for the calculation of the distance between categories; "euclidean" is the default (see the help of the help if the dist() function for more info and other methods available).
#' @param aggl.meth: sets the agglomerative method to be used in the dendrogram construction; "ward.D2" is the default (see the help of the hclust() function for more info and for other methods available).
#' @param opt.part: takes TRUE or FALSE (default) if the user wants or doesn't want an optimal partition to be suggested; the latter is based upon an iterative process that seek for the maximizition of the average silhouette width.
#' @param opt.part.meth: sets whether the optimal partition method will try to maximize the average ("mean") or median ("median") silhouette width. The former is the default.
#' @param part: integer which sets the number of desired clusters (NULL is default); this will override the optimal cluster solution.
#' @param cex.dndr.lab: sets the size of the dendrogram's labels. 0.85 is the default.
#' @param cex.sil.lab: sets the size of the silhouette plot's s labels. 0.75 is the default.
#' @param cex.sctpl.lab: sets the size of the Correspondence Analysis scatterplot's labels. 3.5 is the default.
#' @keywords correspondence analysis clustering method chart silhouette
#' @export
#' @examples
#' #data(brand_coffee)
#' #caCluster(brand_coffee, opt.part=FALSE)
#' #displays a dendrogram of row AND column categories
#'
#' #res <- caCluster(brand_coffee, opt.part=TRUE)
#' #displays a dendrogram for row AND column categories; the clustering is based on the CA coordinates from a full high-dimensional space. Rectangles indicating the clusters defined by the optimal partition method (see Details). A silhouette plot, a scatterplot, and a CA scatterplot with indication of cluster membership are also produced (see Details). The cluster membership is stored in the object 'res'.
#'
#' #res <- caCluster(brand_coffee, which="rows", dim=4, opt.part=TRUE)
#' #displays a dendrogram for row categories, with rectangles indicating the clusters defined by the optimal partition method (see Details). The clustering is based on a space of dimensionality 4. A silhouette plot, a scatterplot, and a CA scatterplot with indication of cluster membership are also produced (see Details). The cluster membership is stored in the object 'res'.
#'
#' #res <- caCluster(brand_coffee, which="rows", dim=c(1,4), opt.part=TRUE)
#' #like the above example, but the clustering is based on the coordinates on the sub-space defined by a pair of dimensions (i.e., 1 and 4).
caCluster <- function(data, which="both", dim=NULL, dist.meth="euclidean", aggl.meth="ward.D2", opt.part=FALSE, opt.part.meth="mean", part=NULL, cex.dndr.lab=0.85, cex.sil.lab=0.75, cex.sctpl.lab=3.5){
dimensionality <- min(ncol(data), nrow(data))-1 # calculate the dimensionality of the input table
ifelse(is.null(dim), dimens.to.report <- paste0("from a space of dimensionality: ", dimensionality), ifelse(length(dim)==1, dimens.to.report <- paste0("from a space of dimensionality: ", dim), dimens.to.report <- paste0("from the subspace defin. by the ", dim[1], " and ", dim[2], " dim.")))
ifelse(is.null(dim), sil.plt.title <- paste0("Silhouette plot for CA (dimensionality: ", dimensionality, ")"), ifelse(length(dim)==1, sil.plt.title <- paste0("Silhouette plot for CA (dimensionality: ", dim, ")"), sil.plt.title <- paste0("Silhouette plot for CA (dim. ", dim[1], " + ", dim[2], ")")))
ifelse(is.null(dim), ca.plt.title <- paste0("Clusters based on CA coordinates from a space of dimensionality: ", dimensionality), ifelse(length(dim)==1, ca.plt.title <- paste0("Clusters based on CA coordinates from a space of dimensionality: ", dim), ca.plt.title <- paste0("Clusters based on CA coordinates from the sub-space defined by dim. ", dim[1], " + ", dim[2])))
res.ca <- CA(data, ncp = dimensionality, graph = FALSE) # get the CA results from the CA command of the FactoMiner package
ifelse(which=="rows", binded.coord<-res.ca$row$coord, ifelse(which=="cols", binded.coord<-res.ca$col$coord, binded.coord <- rbind(res.ca$col$coord, res.ca$row$coord))) # get the columns and/or rows coordinates for all the dimensions and save them in a new table
binded.coord <- as.data.frame(binded.coord)
#binded.coord <- rbind(res.ca$col$coord, res.ca$row$coord) # get the columns and rows coordinates and bind them in a table
if(which=="both"){
rownames(binded.coord)[1:nrow(res.ca$col$coord)] <- paste(rownames(binded.coord)[1:nrow(res.ca$col$coord)], "*", sep = "") # add an asterisk to the dataframe row names corresponding to the column categories
dendr.title <- paste("Clusters of Row and Column (*) categories \nclustering based on Correspondence Analysis' coordinates", dimens.to.report)
} else {ifelse(which=="rows", dendr.title <- paste("Clusters of Row categories \nclustering based on Correspondence Analysis' coordinates", dimens.to.report), dendr.title <- paste("Clusters of Column categories \nclustering based on Correspondence Analysis' coordinates", dimens.to.report))}
max.ncl <- nrow(binded.coord)-1 # calculate the max number of clusters, 1 less than the number of objects (i.e., the binded table's rows)
sil.width.val <- numeric(max.ncl-1) # create an empty vector to store the average value of the silhouette width at different cluster solutions
sil.width.step <- c(2:max.ncl) # create an empty vector to store the progressive number of clusters for which silhouettes are calculated
ifelse(is.null(dim), d <- dist(binded.coord, method = dist.meth), ifelse(length(dim)==1, d <- dist(subset(binded.coord, select=1:dim)), d <- dist(subset(binded.coord, select=dim), method = dist.meth))) # calculate the distance matrix on the whole coordinate dataset if 'dim' is not entered by the user; otherwise, the matrix is calculated on a subset of the coordinate dataset
if (is.null(dim) | length(dim)==1) { # condition to extract the coordinates to be used later for plooting a scatterplot with cluster membership
first.setcoord <- 1
second.setcoord <- 2
dim.labelA <- "Dim. 1"
dim.labelB <- "Dim. 2"
} else {
first.setcoord <- dim[1]
second.setcoord <- dim[2]
dim.labelA <- paste0("Dim. ", dim[1])
dim.labelB <- paste0("Dim. ", dim[2])
}
#d <- dist(binded.coord, method = dist.meth)
fit <- hclust(d, method=aggl.meth) # perform the hierc agglomer clustering
if (is.null(part) & opt.part==TRUE) {
for (i in 2:max.ncl){
counter <- i-1
clust <- silhouette(cutree(fit, k=i),d) # calculate the silhouettes for increasing numbers of clusters; requires the 'cluster' package
sil.width.val[counter] <- ifelse(opt.part.meth=="mean", mean(clust[,3]), ifelse(opt.part.meth=="median", median(clust[,3]))) # store the mean or median of the silhouette width distribution at increasing cluster solutions
}
sil.res <- as.data.frame(cbind(sil.width.step, sil.width.val)) # store the results of the preceding loop binding the two vectors into a dataframe
select.clst.num <- sil.res$sil.width.step[sil.res$sil.width.val==max(sil.res$sil.width.val)] # from a column of the dataframe extract the cluster solution that corresponds to the maximum mean or median silhouette width
plot(fit, main=dendr.title, sub=paste("Distance method:", dist.meth, "\nAgglomeration method:", aggl.meth), xlab="", cex=cex.dndr.lab, cex.main=0.9, cex.sub=0.75) # display the dendogram when the optimal partition is desired, not the user-defined one
solution <- rect.hclust(fit, k=select.clst.num, border=1:select.clst.num) # create the cluster partition on the dendrogram using the optimal number of clusters stored in 'select.clst.num'
binded.coord$membership <- assignCluster(binded.coord, binded.coord, cutree(fit, k=select.clst.num)) # store the cluster membership in the 'binded.coord' dataframe; requires 'RcmdrMiscโ
par(mfrow=c(1,2))
final.sil.data <- silhouette(cutree(fit, k=select.clst.num),d) # store the silhouette data related to the selected cluster solution
row.names(final.sil.data) <- row.names(binded.coord) # copy the objects names to the rows' name of the object created in the above step
rownames(final.sil.data) <- paste(rownames(final.sil.data), final.sil.data[,2], sep = "_") # append a suffix to the objects names corresponding to the neighbor cluster; the latter info is got from the 'final.sil.data' object
par(oma=c(0,4,0,0)) # enlarge the left outer margin of the plot area to leave room for long objects' labels
plot(final.sil.data, cex.names=cex.sil.lab, max.strlen=30, nmax.lab=nrow(binded.coord)+1, main=sil.plt.title) # plot the final silhouette chart, allowing for long objects'labels
abline(v=mean(final.sil.data[,3]), lty=2) # add a reference line for the average silhouette width of the optimal partition
plot(sil.res, xlab="number of clusters", ylab="silhouette width", ylim=c(0,1), xaxt="n", type="b", main="Silhouette width vs. number of clusters", sub=paste("values on the y-axis represent the", opt.part.meth, "of the silhouettes' width distribution at each cluster solution"), cex.sub=0.75) # plot the scatterplot
axis(1, at = 0:max.ncl, cex.axis=0.70) # set the numbers for the x-axis labels starting from 2, which is the min number of clusters
text(x=sil.res$sil.width.step, y=sil.res$sil.width.val, labels = round(sil.res$sil.width.val, 3), cex = 0.65, pos = 3, offset = 1, srt=90) # add the average width values on the top of the dots in the scatterplot
abline(v=select.clst.num, lty=2, col="red") # add a red reference line indicating the number of selected clusters
par(mfrow=c(1,1)) # reset the default plot layout
p <- ggplot(binded.coord, aes(x=binded.coord[,first.setcoord], y=binded.coord[,second.setcoord], color=membership)) +
labs(x=dim.labelA, y=dim.labelB, colour="Clusters") +
geom_point() +
geom_vline(xintercept = 0, linetype=2, color="gray") +
geom_hline(yintercept = 0, linetype=2, color="gray") +
theme(panel.background = element_rect(fill="white", colour="black")) +
geom_text_repel(aes(x=binded.coord[,first.setcoord], y=binded.coord[,second.setcoord], label = rownames(binded.coord)), size=cex.sctpl.lab) +
coord_fixed(ratio = 1, xlim = NULL, ylim = NULL, expand = TRUE) +
ggtitle(ca.plt.title)
print(p)
return(solution)
} else {
if(is.null(part) & opt.part==FALSE){
plot(fit, main=dendr.title, sub=paste("Distance method:", dist.meth, "\nAgglomeration method:", aggl.meth), xlab="", cex=cex.dndr.lab, cex.main=0.9, cex.sub=0.75) # display the dendogram if neither a user-defined partition nor an optimal partition is desired
} else {
plot(fit, main=dendr.title, sub=paste("Distance method:", dist.meth, "\nAgglomeration method:", aggl.meth), xlab="", cex=cex.dndr.lab, cex.main=0.9, cex.sub=0.75) # display the dendogram if a user-defined partition is desired
select.clst.num <- part
solution <- rect.hclust(fit, k=select.clst.num, border=1:select.clst.num)
binded.coord$membership <- assignCluster(binded.coord, binded.coord, cutree(fit, k=select.clst.num))
final.sil.data <- silhouette(cutree(fit, k=select.clst.num),d)
row.names(final.sil.data) <- row.names(binded.coord)
rownames(final.sil.data) <- paste(rownames(final.sil.data), final.sil.data[,2], sep = "_")
plot(final.sil.data, cex.names=cex.sil.lab, max.strlen=30, nmax.lab=nrow(binded.coord)+1, main=sil.plt.title) # plot the final silhouette chart, allowing for long objects'labels
abline(v=mean(final.sil.data[,3]), lty=2)
p <- ggplot(binded.coord, aes(x=binded.coord[,first.setcoord], y=binded.coord[,second.setcoord], color=membership)) +
labs(x=dim.labelA, y=dim.labelB, colour="Clusters") +
geom_point() +
geom_vline(xintercept = 0, linetype=2, color="gray") +
geom_hline(yintercept = 0, linetype=2, color="gray") +
theme(panel.background = element_rect(fill="white", colour="black")) +
geom_text_repel(aes(x=binded.coord[,first.setcoord], y=binded.coord[,second.setcoord], label = rownames(binded.coord)), size=cex.sctpl.lab) +
coord_fixed(ratio = 1, xlim = NULL, ylim = NULL, expand = TRUE) +
ggtitle(ca.plt.title)
print(p)
return(solution)
}
}
} | /R/ca_cluster.R | no_license | keltoskytoi/CAinterprTools | R | false | false | 17,920 | r | #' Clustering row/column categories on the basis of Correspondence Analysis coordinates from a space of user-defined dimensionality.
#'
#' This function allows to plot the result of cluster analysis performed on the results of Correspondence Analysis, providing the facility to plot a dendrogram, a silouette plot depicting the "quality" of the clustering solution, and a scatterplot with points coded according to the cluster membership.
#'
#' The function provides the facility to perform hierarchical cluster analysis of row and/or column categories on the basis of Correspondence Analysis result.
#' The clustering is based on the row and/or colum categories' coordinates from: \cr
#' (1) a high-dimensional space corresponding to the whole dimensionality of the input contingency table; \cr
#' (2) a high-dimensional space of dimensionality smaller than the full dimensionality of the input dataset; \cr
#' (3) a bi-dimensional space defined by a pair of user-defined dimensions. \cr
#' To obtain (1), the 'dim' parameter must be left in its default value (NULL); \cr
#' To obtain (2), the 'dim' parameter must be given an integer (needless to say, smaller than the full dimensionality of the input data); \cr
#' To obtain (3), the 'dim' parameter must be given a vector (e.g., c(1,3)) specifying the dimensions the user is interested in.
#'
#' The method by which the distance is calculated is specified using the 'dist.meth' parameter, while the agglomerative method is speficied using the 'aggl.meth' parameter. By default, they are set to "euclidean" and "ward.D2" respectively.
#'
#' The user may want to specify beforehand the desired number of clusters (i.e., the cluster solution). This is accomplished feeding an integer into the 'part' parameter.
#' A dendrogram (with rectangles indicating the clustering solution), a silhouette plot (indicating the "quality" of the cluster solution), and a CA scatterplot (with points given colours on the basis of their cluster membership) are returned. Please note that, when a high-dimensional space is selected, the scatterplot will use the first 2 CA dimensions; the user must keep in mind that the clustering based on a higher-dimensional space may not be well reflected on the subspace defined by the first two dimensions only.\cr
#' Also note: \cr
#' -if both row and column categories are subject to the clustering, the column categories will be flagged by an asterisk (*) in the dendrogram (and in the silhouette plot) just to make it easier to identify rows and columns; \cr
#' -the silhouette plot displays the average silhouette width as a dashed vertical line; the dimensionality of the CA space used is reported in the plot's title; if a pair of dimensions has been used, the individual dimensions are reported in the plot's title; \cr
#' -the silhouette plot's labels end with a number indicating the cluster to which each category is closer.
#'
#' An optimal clustering solution can be obtained setting the 'opt.part' parameter to TRUE. The optimal partition is selected by means of an iterative routine which locates at which cluster solution the highest average silhouette width is achieved.
#' If the 'opt.part' parameter is set to TRUE, an additional plot is returned along with the silhouette plot. It displays a scatterplot in which the cluster solution (x-axis) is plotted against the average silhouette width (y-axis). A vertical reference line indicate the cluster solution which maximize the silhouette width, corresponding to the suggested optimal partition.
#'
#' The function returns a list storing information about the cluster membership (i.e., which categories belong to which cluster).
#'
#' Further info and Disclaimer: \cr
#' The silhouette plot is obtained from the silhouette() function out from the 'cluster' package (https://cran.r-project.org/web/packages/cluster/index.html).
#' For a detailed description of the silhouette plot, its rationale, and its interpretation, see: \cr
#' -Rousseeuw P J. 1987. "Silhouettes: A graphical aid to the interpretation and validation of cluster analysis", Journal of Computational and Applied Mathematics 20, 53-65 (http://www.sciencedirect.com/science/article/pii/0377042787901257)
#'
#' For the idea of clustering categories on the basis of the CA coordinates from a full high-dimensional space (or from a subset thereof), see: \cr
#' -Ciampi et al. 2005. "Correspondence analysis and two-way clustering", SORT 29 (1), 27-4 \cr
#' -Beh et al. 2011. "A European perception of food using two methods of correspondence analysis", Food Quality and Preference 22(2), 226-231
#'
#' Please note that the interpretation of the clustering when both row AND column categories are used must procede with caution due to the issue of inter-class points' distance interpretation. For a full description of the issue (also with further references), see: \cr
#' -Greenacre M. 2007. "Correspondence Analysis in Practice", Boca Raton-London-New York, Chapman&Hall/CRC, 267-268.
#'
#' @param data: contingency table, in dataframe format.
#' @param which: "both" to cluster both row and column categories; "rows" or "columns" to cluster only row or column categories respectivily
#' @param dim: sets the dimensionality of the space whose coordinates are used to cluster the CA categories; it can be an integer or a vector (e.g., c(2,3)) specifying the first and second selected dimension. NULL is the default; it will make the clustering to be based on the maximum dimensionality of the dataset.
#' @param dist.meth: sets the distance method used for the calculation of the distance between categories; "euclidean" is the default (see the help of the help if the dist() function for more info and other methods available).
#' @param aggl.meth: sets the agglomerative method to be used in the dendrogram construction; "ward.D2" is the default (see the help of the hclust() function for more info and for other methods available).
#' @param opt.part: takes TRUE or FALSE (default) if the user wants or doesn't want an optimal partition to be suggested; the latter is based upon an iterative process that seek for the maximizition of the average silhouette width.
#' @param opt.part.meth: sets whether the optimal partition method will try to maximize the average ("mean") or median ("median") silhouette width. The former is the default.
#' @param part: integer which sets the number of desired clusters (NULL is default); this will override the optimal cluster solution.
#' @param cex.dndr.lab: sets the size of the dendrogram's labels. 0.85 is the default.
#' @param cex.sil.lab: sets the size of the silhouette plot's s labels. 0.75 is the default.
#' @param cex.sctpl.lab: sets the size of the Correspondence Analysis scatterplot's labels. 3.5 is the default.
#' @keywords correspondence analysis clustering method chart silhouette
#' @export
#' @examples
#' #data(brand_coffee)
#' #caCluster(brand_coffee, opt.part=FALSE)
#' #displays a dendrogram of row AND column categories
#'
#' #res <- caCluster(brand_coffee, opt.part=TRUE)
#' #displays a dendrogram for row AND column categories; the clustering is based on the CA coordinates from a full high-dimensional space. Rectangles indicating the clusters defined by the optimal partition method (see Details). A silhouette plot, a scatterplot, and a CA scatterplot with indication of cluster membership are also produced (see Details). The cluster membership is stored in the object 'res'.
#'
#' #res <- caCluster(brand_coffee, which="rows", dim=4, opt.part=TRUE)
#' #displays a dendrogram for row categories, with rectangles indicating the clusters defined by the optimal partition method (see Details). The clustering is based on a space of dimensionality 4. A silhouette plot, a scatterplot, and a CA scatterplot with indication of cluster membership are also produced (see Details). The cluster membership is stored in the object 'res'.
#'
#' #res <- caCluster(brand_coffee, which="rows", dim=c(1,4), opt.part=TRUE)
#' #like the above example, but the clustering is based on the coordinates on the sub-space defined by a pair of dimensions (i.e., 1 and 4).
caCluster <- function(data, which="both", dim=NULL, dist.meth="euclidean", aggl.meth="ward.D2", opt.part=FALSE, opt.part.meth="mean", part=NULL, cex.dndr.lab=0.85, cex.sil.lab=0.75, cex.sctpl.lab=3.5){
dimensionality <- min(ncol(data), nrow(data))-1 # calculate the dimensionality of the input table
ifelse(is.null(dim), dimens.to.report <- paste0("from a space of dimensionality: ", dimensionality), ifelse(length(dim)==1, dimens.to.report <- paste0("from a space of dimensionality: ", dim), dimens.to.report <- paste0("from the subspace defin. by the ", dim[1], " and ", dim[2], " dim.")))
ifelse(is.null(dim), sil.plt.title <- paste0("Silhouette plot for CA (dimensionality: ", dimensionality, ")"), ifelse(length(dim)==1, sil.plt.title <- paste0("Silhouette plot for CA (dimensionality: ", dim, ")"), sil.plt.title <- paste0("Silhouette plot for CA (dim. ", dim[1], " + ", dim[2], ")")))
ifelse(is.null(dim), ca.plt.title <- paste0("Clusters based on CA coordinates from a space of dimensionality: ", dimensionality), ifelse(length(dim)==1, ca.plt.title <- paste0("Clusters based on CA coordinates from a space of dimensionality: ", dim), ca.plt.title <- paste0("Clusters based on CA coordinates from the sub-space defined by dim. ", dim[1], " + ", dim[2])))
res.ca <- CA(data, ncp = dimensionality, graph = FALSE) # get the CA results from the CA command of the FactoMiner package
ifelse(which=="rows", binded.coord<-res.ca$row$coord, ifelse(which=="cols", binded.coord<-res.ca$col$coord, binded.coord <- rbind(res.ca$col$coord, res.ca$row$coord))) # get the columns and/or rows coordinates for all the dimensions and save them in a new table
binded.coord <- as.data.frame(binded.coord)
#binded.coord <- rbind(res.ca$col$coord, res.ca$row$coord) # get the columns and rows coordinates and bind them in a table
if(which=="both"){
rownames(binded.coord)[1:nrow(res.ca$col$coord)] <- paste(rownames(binded.coord)[1:nrow(res.ca$col$coord)], "*", sep = "") # add an asterisk to the dataframe row names corresponding to the column categories
dendr.title <- paste("Clusters of Row and Column (*) categories \nclustering based on Correspondence Analysis' coordinates", dimens.to.report)
} else {ifelse(which=="rows", dendr.title <- paste("Clusters of Row categories \nclustering based on Correspondence Analysis' coordinates", dimens.to.report), dendr.title <- paste("Clusters of Column categories \nclustering based on Correspondence Analysis' coordinates", dimens.to.report))}
max.ncl <- nrow(binded.coord)-1 # calculate the max number of clusters, 1 less than the number of objects (i.e., the binded table's rows)
sil.width.val <- numeric(max.ncl-1) # create an empty vector to store the average value of the silhouette width at different cluster solutions
sil.width.step <- c(2:max.ncl) # create an empty vector to store the progressive number of clusters for which silhouettes are calculated
ifelse(is.null(dim), d <- dist(binded.coord, method = dist.meth), ifelse(length(dim)==1, d <- dist(subset(binded.coord, select=1:dim)), d <- dist(subset(binded.coord, select=dim), method = dist.meth))) # calculate the distance matrix on the whole coordinate dataset if 'dim' is not entered by the user; otherwise, the matrix is calculated on a subset of the coordinate dataset
if (is.null(dim) | length(dim)==1) { # condition to extract the coordinates to be used later for plooting a scatterplot with cluster membership
first.setcoord <- 1
second.setcoord <- 2
dim.labelA <- "Dim. 1"
dim.labelB <- "Dim. 2"
} else {
first.setcoord <- dim[1]
second.setcoord <- dim[2]
dim.labelA <- paste0("Dim. ", dim[1])
dim.labelB <- paste0("Dim. ", dim[2])
}
#d <- dist(binded.coord, method = dist.meth)
fit <- hclust(d, method=aggl.meth) # perform the hierc agglomer clustering
if (is.null(part) & opt.part==TRUE) {
for (i in 2:max.ncl){
counter <- i-1
clust <- silhouette(cutree(fit, k=i),d) # calculate the silhouettes for increasing numbers of clusters; requires the 'cluster' package
sil.width.val[counter] <- ifelse(opt.part.meth=="mean", mean(clust[,3]), ifelse(opt.part.meth=="median", median(clust[,3]))) # store the mean or median of the silhouette width distribution at increasing cluster solutions
}
sil.res <- as.data.frame(cbind(sil.width.step, sil.width.val)) # store the results of the preceding loop binding the two vectors into a dataframe
select.clst.num <- sil.res$sil.width.step[sil.res$sil.width.val==max(sil.res$sil.width.val)] # from a column of the dataframe extract the cluster solution that corresponds to the maximum mean or median silhouette width
plot(fit, main=dendr.title, sub=paste("Distance method:", dist.meth, "\nAgglomeration method:", aggl.meth), xlab="", cex=cex.dndr.lab, cex.main=0.9, cex.sub=0.75) # display the dendogram when the optimal partition is desired, not the user-defined one
solution <- rect.hclust(fit, k=select.clst.num, border=1:select.clst.num) # create the cluster partition on the dendrogram using the optimal number of clusters stored in 'select.clst.num'
binded.coord$membership <- assignCluster(binded.coord, binded.coord, cutree(fit, k=select.clst.num)) # store the cluster membership in the 'binded.coord' dataframe; requires 'RcmdrMiscโ
par(mfrow=c(1,2))
final.sil.data <- silhouette(cutree(fit, k=select.clst.num),d) # store the silhouette data related to the selected cluster solution
row.names(final.sil.data) <- row.names(binded.coord) # copy the objects names to the rows' name of the object created in the above step
rownames(final.sil.data) <- paste(rownames(final.sil.data), final.sil.data[,2], sep = "_") # append a suffix to the objects names corresponding to the neighbor cluster; the latter info is got from the 'final.sil.data' object
par(oma=c(0,4,0,0)) # enlarge the left outer margin of the plot area to leave room for long objects' labels
plot(final.sil.data, cex.names=cex.sil.lab, max.strlen=30, nmax.lab=nrow(binded.coord)+1, main=sil.plt.title) # plot the final silhouette chart, allowing for long objects'labels
abline(v=mean(final.sil.data[,3]), lty=2) # add a reference line for the average silhouette width of the optimal partition
plot(sil.res, xlab="number of clusters", ylab="silhouette width", ylim=c(0,1), xaxt="n", type="b", main="Silhouette width vs. number of clusters", sub=paste("values on the y-axis represent the", opt.part.meth, "of the silhouettes' width distribution at each cluster solution"), cex.sub=0.75) # plot the scatterplot
axis(1, at = 0:max.ncl, cex.axis=0.70) # set the numbers for the x-axis labels starting from 2, which is the min number of clusters
text(x=sil.res$sil.width.step, y=sil.res$sil.width.val, labels = round(sil.res$sil.width.val, 3), cex = 0.65, pos = 3, offset = 1, srt=90) # add the average width values on the top of the dots in the scatterplot
abline(v=select.clst.num, lty=2, col="red") # add a red reference line indicating the number of selected clusters
par(mfrow=c(1,1)) # reset the default plot layout
p <- ggplot(binded.coord, aes(x=binded.coord[,first.setcoord], y=binded.coord[,second.setcoord], color=membership)) +
labs(x=dim.labelA, y=dim.labelB, colour="Clusters") +
geom_point() +
geom_vline(xintercept = 0, linetype=2, color="gray") +
geom_hline(yintercept = 0, linetype=2, color="gray") +
theme(panel.background = element_rect(fill="white", colour="black")) +
geom_text_repel(aes(x=binded.coord[,first.setcoord], y=binded.coord[,second.setcoord], label = rownames(binded.coord)), size=cex.sctpl.lab) +
coord_fixed(ratio = 1, xlim = NULL, ylim = NULL, expand = TRUE) +
ggtitle(ca.plt.title)
print(p)
return(solution)
} else {
if(is.null(part) & opt.part==FALSE){
plot(fit, main=dendr.title, sub=paste("Distance method:", dist.meth, "\nAgglomeration method:", aggl.meth), xlab="", cex=cex.dndr.lab, cex.main=0.9, cex.sub=0.75) # display the dendogram if neither a user-defined partition nor an optimal partition is desired
} else {
plot(fit, main=dendr.title, sub=paste("Distance method:", dist.meth, "\nAgglomeration method:", aggl.meth), xlab="", cex=cex.dndr.lab, cex.main=0.9, cex.sub=0.75) # display the dendogram if a user-defined partition is desired
select.clst.num <- part
solution <- rect.hclust(fit, k=select.clst.num, border=1:select.clst.num)
binded.coord$membership <- assignCluster(binded.coord, binded.coord, cutree(fit, k=select.clst.num))
final.sil.data <- silhouette(cutree(fit, k=select.clst.num),d)
row.names(final.sil.data) <- row.names(binded.coord)
rownames(final.sil.data) <- paste(rownames(final.sil.data), final.sil.data[,2], sep = "_")
plot(final.sil.data, cex.names=cex.sil.lab, max.strlen=30, nmax.lab=nrow(binded.coord)+1, main=sil.plt.title) # plot the final silhouette chart, allowing for long objects'labels
abline(v=mean(final.sil.data[,3]), lty=2)
p <- ggplot(binded.coord, aes(x=binded.coord[,first.setcoord], y=binded.coord[,second.setcoord], color=membership)) +
labs(x=dim.labelA, y=dim.labelB, colour="Clusters") +
geom_point() +
geom_vline(xintercept = 0, linetype=2, color="gray") +
geom_hline(yintercept = 0, linetype=2, color="gray") +
theme(panel.background = element_rect(fill="white", colour="black")) +
geom_text_repel(aes(x=binded.coord[,first.setcoord], y=binded.coord[,second.setcoord], label = rownames(binded.coord)), size=cex.sctpl.lab) +
coord_fixed(ratio = 1, xlim = NULL, ylim = NULL, expand = TRUE) +
ggtitle(ca.plt.title)
print(p)
return(solution)
}
}
} |
##CourseProject
##ui.R
library(shiny);library(ggplot2);data(diamonds);library(BH)
shinyUI(pageWithSidebar(
headerPanel(h1("Get an Estimated Price of Your Diamond",
style = "font-family:
'Lobster', cursive;
font-weight: 500; line-height: 1.1;
color: #4d3a7d;"
)),
sidebarPanel(
img(src="diamond11.png",height=150,width=200),
h4('Weight (in carat)',style = "font-family: 'Lobster', cursive;
font-weight: 500; line-height: 1.1;color: #4d3a7d;"),
numericInput("carat",label="",value=0.5,min=0.5,max=5,step=0.3),
h4('Clarity',style = "font-family: 'Lobster', cursive;
font-weight: 500; line-height: 1.1;color: #4d3a7d;"),
selectInput("clarity",label="",c("SI2","SI1","VS2","VS1","VVS2",
"VVS1","IF")),
h4("Color",style = "font-family: 'Lobster', cursive;
font-weight: 500; line-height: 1.1;color: #4d3a7d;"),
selectInput("color",label="",c("D","E","F","G","H")),
h4("cut",style="font-family: 'Lobster', cursive;
font-weight: 500; line-height: 1.1;color: #4d3a7d;"),
selectInput("cut",label="",c("Fair","Good","Very Good","Premium","Ideal"))
),
mainPanel(
h3('Your Diamond',style = "font-family: 'Lobster', cursive;
font-weight: 500; line-height: 1.1;
color: #4d3a7d;"),
br(),
h4('weight (in carat)',style = "font-family: 'Lobster', cursive;
font-weight: 500; line-height: 1.1;
color: #4d3a7d;"),
verbatimTextOutput('carat_1'),
h4('clarity',style = "font-family: 'Lobster', cursive;
font-weight: 500; line-height: 1.1;
color: #4d3a7d;"),
verbatimTextOutput('clarity_1'),
h4('color',style = "font-family: 'Lobster', cursive;
font-weight: 500; line-height: 1.1;
color: #4d3a7d;"),
verbatimTextOutput('color_1'),
h4('cut',style="font-family: 'Lobster', cursive;
font-weight: 500; line-height: 1.1;
color: #4d3a7d;"),
verbatimTextOutput('cut_1'),
br(),
h3('Estimated Price (in USD)',style = "font-family: 'Lobster', cursive;
font-weight: 500; line-height: 1.1;
color: #4d3a7d;"),
verbatimTextOutput('price_1')
)
))
| /ui.R | no_license | yonidahan/developing-data-products | R | false | false | 2,781 | r |
##CourseProject
##ui.R
library(shiny);library(ggplot2);data(diamonds);library(BH)
shinyUI(pageWithSidebar(
headerPanel(h1("Get an Estimated Price of Your Diamond",
style = "font-family:
'Lobster', cursive;
font-weight: 500; line-height: 1.1;
color: #4d3a7d;"
)),
sidebarPanel(
img(src="diamond11.png",height=150,width=200),
h4('Weight (in carat)',style = "font-family: 'Lobster', cursive;
font-weight: 500; line-height: 1.1;color: #4d3a7d;"),
numericInput("carat",label="",value=0.5,min=0.5,max=5,step=0.3),
h4('Clarity',style = "font-family: 'Lobster', cursive;
font-weight: 500; line-height: 1.1;color: #4d3a7d;"),
selectInput("clarity",label="",c("SI2","SI1","VS2","VS1","VVS2",
"VVS1","IF")),
h4("Color",style = "font-family: 'Lobster', cursive;
font-weight: 500; line-height: 1.1;color: #4d3a7d;"),
selectInput("color",label="",c("D","E","F","G","H")),
h4("cut",style="font-family: 'Lobster', cursive;
font-weight: 500; line-height: 1.1;color: #4d3a7d;"),
selectInput("cut",label="",c("Fair","Good","Very Good","Premium","Ideal"))
),
mainPanel(
h3('Your Diamond',style = "font-family: 'Lobster', cursive;
font-weight: 500; line-height: 1.1;
color: #4d3a7d;"),
br(),
h4('weight (in carat)',style = "font-family: 'Lobster', cursive;
font-weight: 500; line-height: 1.1;
color: #4d3a7d;"),
verbatimTextOutput('carat_1'),
h4('clarity',style = "font-family: 'Lobster', cursive;
font-weight: 500; line-height: 1.1;
color: #4d3a7d;"),
verbatimTextOutput('clarity_1'),
h4('color',style = "font-family: 'Lobster', cursive;
font-weight: 500; line-height: 1.1;
color: #4d3a7d;"),
verbatimTextOutput('color_1'),
h4('cut',style="font-family: 'Lobster', cursive;
font-weight: 500; line-height: 1.1;
color: #4d3a7d;"),
verbatimTextOutput('cut_1'),
br(),
h3('Estimated Price (in USD)',style = "font-family: 'Lobster', cursive;
font-weight: 500; line-height: 1.1;
color: #4d3a7d;"),
verbatimTextOutput('price_1')
)
))
|
library(statmod)
n <- 5000 #desired sample size
nn <- 5000*5 #a bigger sample size out of which n observation will be randomly sampled.
r <- 0.61 #type1 ceonsoring - observations with a greater observed time (event or censoring) will be censored at r.
g12 <- c(2,0.2,0.05) #regression coefficients
g13 <- c(0.05,1)
g23 <- c(1,0.5)
theta <- 1 #frailty parameter. for inverse Gaussian: 1/theta = variance.
c12_1 <- 0.005 #constant hazard12 below recL
c12_2 <- 1 #constant hazard12 between recL and recU
c12_3 <- 1 #constant hazard12 above recU
c13_1 <- 0.5 #constant hazard13 below recL
c13_2 <- 1 #constant hazard13 between recL and recU
c13_3 <- 2 #constant hazard13 above recU
c23_1 <- 0 #constant hazard13 below lower23
c23_2 <- 1 #constant hazard13 between recL and recU
c23_3 <- 1 #constant hazard13 above recU
recL <- 0.05
lower23 <- 0.12
recU <- 0.15
#the "real" cummulative hazard functions
H012_real <- function(t)
{
ifelse(t < recL,c12_1*t,ifelse(t < recU,c12_1*recL + c12_2*(t - recL),c12_1*recL + c12_2*(recU - recL) + c12_3*(t - recU)))
}
H013_real <- function(t)
{
ifelse(t < recL,c13_1*t,ifelse(t < recU,c13_1*recL + c13_2*(t - recL),c13_1*recL + c13_2*(recU - recL) + c13_3*(t - recU)))
}
H023_real <- function(t)
{
ifelse(t < lower23,c23_1*t,ifelse(t < recU,c23_1*lower23 + c23_2*(t - lower23),c23_1*lower23 + c23_2*(recU - lower23) + c23_3*(t - recU)))
}
############sampling from the positive stable frailty ################
a.fun <- function(theta,alpha)
{
num1<-sin( (1-alpha)*theta )
num2<-sin( alpha*theta )^( alpha/(1-alpha) )
den<-sin( theta)^( 1/(1-alpha) )
out<-num1*num2/den
out
}
ps.gen <- function(nobs,alpha)
{
w<-rexp(nobs)
theta<-runif(nobs)*pi
out<-( a.fun(theta,alpha)/w )^( (1-alpha)/alpha )
out
}
frailty.ps <- function(n, tau)
{
alpha <- 1 - tau
if(alpha > 0)
omega <- ps.gen(n, alpha)
if(alpha == 0)
omega <- rep(1, n)
return(omega)
}
## creating reference data
nnn <- 100000
z1 <- runif(nnn) ; z2 <- runif(nnn) ; z3 <- runif(nnn);
z <- cbind(z1,z2,z3)
omega_ref <- frailty.ps(nnn,tau)
u13_ref <- runif(nnn)
Z12 <- z[,1:3] ; Z13 <- z[,1:2]
egz12_ref <- exp(Z12 %*% g12)
egz13_ref <- exp(Z13 %*% g13)
q1_ref <- egz12_ref*c12_1 + egz13_ref*c13_1
q2_ref <- egz12_ref*c12_2 + egz13_ref*c13_2
q3_ref <- egz12_ref*c12_3 + egz13_ref*c13_3
A12_recL_ref <- c12_1*egz12_ref*q1_ref^((1-theta)/theta)*recL^(1/theta)
S12_recL_ref <- exp(-omega_ref * A12_recL_ref)
A12_recU_ref <- A12_recL_ref + c12_2*egz12_ref/q2_ref * ((q1_ref*recL + q2_ref*(recU-recL))^(1/theta) - (q1_ref*recL)^(1/theta))
S12_recU_ref <- exp(-omega_ref * A12_recU_ref)
tmp_ref <- q1_ref*recL + q2_ref*(recU-recL)
s12_1_ref <- (-log(u12_ref)/(c12_1*omega_ref))^theta * egz12_ref^(-theta) * q1_ref^(theta-1)
s12_2_ref <- ((q1_ref*recL)^(1/theta) - q2_ref/(c12_2*egz12_ref) * (log(u12_ref)/omega_ref + A12_recL_ref))^theta/q2_ref +recL*(1 - q1_ref/q2_ref)
s12_3_ref <- (tmp_ref^(1/theta) - q3_ref/(c12_3*egz12_ref) * (log(u12_ref)/omega_ref + A12_recU_ref))^theta/q3_ref - tmp_ref/q3_ref + recU
T12_ref <- ifelse(u12_ref > S12_recL_ref,s12_1_ref,ifelse(u12_ref > S12_recU_ref, s12_2_ref, s12_3_ref))
A13_recL_ref <- c13_1*egz13_ref*q1_ref^((1-theta)/theta)*recL^(1/theta)
S13_recL_ref <- exp(-omega_ref * A13_recL_ref)
A13_recU_ref <- A13_recL_ref + c13_2*egz13_ref/q2_ref * ((q1_ref*recL + q2_ref*(recU-recL))^(1/theta) - (q1_ref*recL)^(1/theta))
S13_recU_ref <- exp(-omega_ref * A13_recU_ref)
s13_1_ref <- (-log(u13_ref)/(c13_1*omega_ref))^theta * egz13_ref^(-theta) * q1_ref^(theta-1)
s13_2_ref <- ((q1_ref*recL)^(1/theta) - q2_ref/(c13_2*egz13_ref) * (log(u13_ref)/omega_ref + A13_recL_ref))^theta/q2_ref +recL*(1 - q1_ref/q2_ref)
s13_3_ref <- (tmp_ref^(1/theta) - q3_ref/(c13_3*egz13_ref) * (log(u13_ref)/omega_ref + A13_recU_ref))^theta/q3_ref - tmp_ref/q3_ref + recU
T13_ref <- ifelse(u13_ref > S13_recL_ref,s13_1_ref,ifelse(u13_ref > S13_recU_ref, s13_2_ref, s13_3_ref))
F13 <- ecdf(T13_ref)
H13 <- function(x) {-log(1 - F13(x))}
h13_apr <- function(x,D) {(H13(x + D) - H13(x))/D}
n_grid <- 50
D <- 0.01
h13_grid_times <- seq(0,recL,length.out = n_grid)
h13_grid <- h13_apr(h13_grid_times,D)
##creating the "main" sample:
z1 <- runif(nn) ; z2 <- runif(nn) ; z3 <- runif(nn); z4 <- runif(nn)
z <- cbind(z1,z2,z3,z4)
omega <- frailty.ps(nn,tau)
u12 <- runif(nn)
u13 <- runif(nn)
u23 <- runif(nn)
Z12 <- z[,1:3] ; Z13 <- z[,1:2]
egz12 <- exp(Z12 %*% g12)
egz13 <- exp(Z13 %*% g13)
q1 <- egz12*c12_1 + egz13*c13_1
q2 <- egz12*c12_2 + egz13*c13_2
q3 <- egz12*c12_3 + egz13*c13_3
A12_recL <- c12_1*egz12*q1^((1-theta)/theta)*recL^(1/theta)
S12_recL <- exp(-omega * A12_recL)
A12_recU <- A12_recL + c12_2*egz12/q2 * ((q1*recL + q2*(recU-recL))^(1/theta) - (q1*recL)^(1/theta))
S12_recU <- exp(-omega * A12_recU)
tmp <- q1*recL + q2*(recU-recL)
s12_1 <- (-log(u12)/(c12_1*omega))^theta * egz12^(-theta) * q1^(theta-1)
s12_2 <- ((q1*recL)^(1/theta) - q2/(c12_2*egz12) * (log(u12)/omega + A12_recL))^theta/q2 +recL*(1 - q1/q2)
s12_3 <- (tmp^(1/theta) - q3/(c12_3*egz12) * (log(u12)/omega + A12_recU))^theta/q3 - tmp/q3 + recU
T12 <- ifelse(u12 > S12_recL,s12_1,ifelse(u12 > S12_recU, s12_2, s12_3))
A13_recL <- c13_1*egz13*q1^((1-theta)/theta)*recL^(1/theta)
S13_recL <- exp(-omega * A13_recL)
A13_recU <- A13_recL + c13_2*egz13/q2 * ((q1*recL + q2*(recU-recL))^(1/theta) - (q1*recL)^(1/theta))
S13_recU <- exp(-omega * A13_recU)
s13_1 <- (-log(u13)/(c13_1*omega))^theta * egz13^(-theta) * q1^(theta-1)
s13_2 <- ((q1*recL)^(1/theta) - q2/(c13_2*egz13) * (log(u13)/omega + A13_recL))^theta/q2 +recL*(1 - q1/q2)
s13_3 <- (tmp^(1/theta) - q3/(c13_3*egz13) * (log(u13)/omega + A13_recU))^theta/q3 - tmp/q3 + recU
T13 <- ifelse(u13 > S13_recL,s13_1,ifelse(u13 > S13_recU, s13_2, s13_3))
Z23 <- z[,c(1,4)]
egz23 <- exp(Z23 %*% g23)
laplace_deriv <- function(x,theta) {-theta * exp(-x^theta) * x^(theta-1)}
A23_func_tosearch <- function(x,theta,egz,timepoint)
{
tmp <- laplace_deriv(x,theta=theta) + exp(-H023_real(timepoint)*egz)
tmp
}
A23_T12 <- rep(NA,nn)
for(i in 1:nn)
{
A23_T12[i] <- uniroot(f=A23_func_tosearch,theta=theta,timepoint=T12[i],egz=egz23[i],interval = c(0,10000000),tol=10^(-20))$root
}
Q <- -log(-laplace_deriv(A23_T12 -log(u23)/omega,theta = theta)) / egz23
H023_recL <- H023_real(recL)
H023_recU <- H023_real(recU)
s23_1 <- Q/c23_1
s23_2 <- Q/c23_2 + (c23_2 - c23_1)*lower23/c23_2
s23_3 <- Q/c23_3 + (c23_2 - c23_1)*lower23/c23_3 + (c23_3-c23_2)*recU/c23_3
T23 <- ifelse(Q < H023_recL, s23_1,ifelse(Q < H023_recU, s23_2, s23_3))
R <- runif(nn,recL,recU)
inout <- ifelse(T12 < T13, R < T23, R < T13) #observed in the sample
n.obs <- sum(inout)
Z12 <- Z12[inout,]; Z13 <- Z13[inout,]; Z23 <- Z23[inout,]
T12 <- T12[inout]; T13 <- T13[inout]; T23 <- T23[inout]; R <- R[inout]
sample.indx <- sample(1:n.obs,n,replace=FALSE)
Z12 <- Z12[sample.indx,]; Z13 <- Z13[sample.indx,]; Z23 <- Z23[sample.indx,]
T12 <- T12[sample.indx]; T13 <- T13[sample.indx]; T23 <- T23[sample.indx]; R <- R[sample.indx]
C <- rexp(n,2)
V <- pmin(T12,T13,R+C,r)
VpostR <- (V >= R)
delta1 <- (V == T12)
delta1postR <- (V == T12) & VpostR
delta2 <- V == T13
W <- ifelse(!delta1,0,pmin(T23,R+C,r))
delta3 <- as.vector((W == T23) & delta1)
############sampling from the inverse Gaussian frailty ##############
quad_eq_solver <- function(a,b,c) {suppressWarnings((-b + sqrt(b^2 - 4*a*c))/(2*a))}
## creating a reference sample:
nnn <- 100000
z1 <- runif(nnn) ; z2 <- runif(nnn) ; z3 <- runif(nnn);
z <- cbind(z1,z2,z3)
omega_ref <- rinvgauss(nnn,mean = 1,shape = theta)
u12_ref <- runif(nnn) ; u13_ref <- runif(nnn)
Z12 <- z[,1:3] ; Z13 <- z[,1:2]
egz12_ref <- exp(Z12 %*% g12)
egz13_ref <- exp(Z13 %*% g13)
q1_ref <- egz12_ref*c12_1 + egz13_ref*c13_1
q2_ref <- egz12_ref*c12_2 + egz13_ref*c13_2
q3_ref <- egz12_ref*c12_3 + egz13_ref*c13_3
A13_recL_ref <- egz13_ref*c13_1*(q1_ref/(2*theta)*recL^2 + recL)
S13_recL_ref <- exp(-omega_ref * A13_recL_ref)
A13_recU_ref <- A13_recL_ref + egz13_ref*c13_2*((recL/theta * q1_ref + 1)*(recU-recL) + q2_ref/(2*theta)*(recU-recL)^2)
S13_recU_ref <- exp(-omega_ref * A13_recU_ref)
coef_13_1_a_ref <- egz13_ref*c13_1*q1_ref/(2*theta)
coef_13_1_b_ref <- egz13_ref*c13_1
coef_13_1_c_ref <- log(u13_ref)/omega_ref
coef_13_2_a_ref <- egz13_ref*c13_2*q2_ref/(2*theta)
coef_13_2_b_ref <- egz13_ref*c13_2*(recL/theta*q1_ref + 1)
coef_13_2_c_ref <- A13_recL_ref + log(u13_ref)/omega_ref
coef_13_3_a_ref <- egz13_ref*c13_3*q3_ref/(2*theta)
coef_13_3_b_ref <- egz13_ref*c13_3*(recL/theta*q1_ref + (recU-recL)/theta*q2_ref + 1)
coef_13_3_c_ref <- A13_recU_ref + log(u13_ref)/omega_ref
s13_1_ref <- quad_eq_solver(coef_13_1_a_ref,coef_13_1_b_ref,coef_13_1_c_ref)
s13_2_ref <- quad_eq_solver(coef_13_2_a_ref,coef_13_2_b_ref,coef_13_2_c_ref) + recL
s13_3_ref <- quad_eq_solver(coef_13_3_a_ref,coef_13_3_b_ref,coef_13_3_c_ref) + recU
T13_ref <- ifelse(u13_ref > S13_recL_ref,s13_1_ref,ifelse(u13_ref > S13_recU_ref, s13_2_ref, s13_3_ref))
F13 <- ecdf(T13_ref)
H13 <- function(x) {-log(1 - F13(x))}
h13_apr <- function(x,D) {(H13(x + D) - H13(x))/D}
n_grid <- 50
D <- 0.01
h13_grid_times <- seq(0,recL,length.out = n_grid)
h13_grid <- h13_apr(h13_grid_times,D)
## creating the "main" sample:
z1 <- runif(nn) ; z2 <- runif(nn) ; z3 <- runif(nn); z4 <- runif(nn)
z <- cbind(z1,z2,z3,z4)
omega <- rinvgauss(nn,mean = 1,shape = theta)
u12 <- runif(nn)
u13 <- runif(nn)
u23 <- runif(nn)
Z12 <- z[,1:3] ; Z13 <- z[,1:2]
egz12 <- exp(Z12 %*% g12)
egz13 <- exp(Z13 %*% g13)
q1 <- egz12*c12_1 + egz13*c13_1
q2 <- egz12*c12_2 + egz13*c13_2
q3 <- egz12*c12_3 + egz13*c13_3
A12_recL <- egz12*c12_1*(q1/(2*theta)*recL^2 + recL)
S12_recL <- exp(-omega * A12_recL)
A12_recU <- A12_recL + egz12*c12_2*((recL/theta * q1 + 1)*(recU-recL) + q2/(2*theta)*(recU-recL)^2)
S12_recU <- exp(-omega * A12_recU)
coef_12_1_a <- egz12*c12_1*q1/(2*theta)
coef_12_1_b <- egz12*c12_1
coef_12_1_c <- log(u12)/omega
coef_12_2_a <- egz12*c12_2*q2/(2*theta)
coef_12_2_b <- egz12*c12_2*(recL/theta*q1 + 1)
coef_12_2_c <- A12_recL + log(u12)/omega
coef_12_3_a <- egz12*c12_3*q3/(2*theta)
coef_12_3_b <- egz12*c12_3*(recL/theta*q1 + (recU-recL)/theta*q2 + 1)
coef_12_3_c <- A12_recU + log(u12)/omega
s12_1 <- quad_eq_solver(coef_12_1_a,coef_12_1_b,coef_12_1_c)
s12_2 <- quad_eq_solver(coef_12_2_a,coef_12_2_b,coef_12_2_c) + recL
s12_3 <- quad_eq_solver(coef_12_3_a,coef_12_3_b,coef_12_3_c) + recU
T12 <- ifelse(u12 > S12_recL,s12_1,ifelse(u12 > S12_recU, s12_2, s12_3))
A13_recL <- egz13*c13_1*(q1/(2*theta)*recL^2 + recL)
S13_recL <- exp(-omega * A13_recL)
A13_recU <- A13_recL + egz13*c13_2*((recL/theta * q1 + 1)*(recU-recL) + q2/(2*theta)*(recU-recL)^2)
S13_recU <- exp(-omega * A13_recU)
coef_13_1_a <- egz13*c13_1*q1/(2*theta)
coef_13_1_b <- egz13*c13_1
coef_13_1_c <- log(u13)/omega
coef_13_2_a <- egz13*c13_2*q2/(2*theta)
coef_13_2_b <- egz13*c13_2*(recL/theta*q1 + 1)
coef_13_2_c <- A13_recL + log(u13)/omega
coef_13_3_a <- egz13*c13_3*q3/(2*theta)
coef_13_3_b <- egz13*c13_3*(recL/theta*q1 + (recU-recL)/theta*q2 + 1)
coef_13_3_c <- A13_recU + log(u13)/omega
s13_1 <- quad_eq_solver(coef_13_1_a,coef_13_1_b,coef_13_1_c)
s13_2 <- quad_eq_solver(coef_13_2_a,coef_13_2_b,coef_13_2_c) + recL
s13_3 <- quad_eq_solver(coef_13_3_a,coef_13_3_b,coef_13_3_c) + recU
T13 <- ifelse(u13 > S13_recL,s13_1,ifelse(u13 > S13_recU, s13_2, s13_3))
Z23 <- z[,c(1,4)]
egz23 <- exp(Z23 %*% g23)
laplace_deriv <- function(x,theta)
{
tmp <- sqrt(1 + 2/theta*x)
-exp(theta*(1-tmp))/tmp
}
A23_func_tosearch <- function(x,theta,egz,timepoint)
{
tmp <- laplace_deriv(x,theta = theta) + exp(-H023_real(timepoint)*egz)
tmp
}
A23_T12 <- rep(NA,nn)
for(i in 1:nn)
{
A23_T12[i] <- uniroot(f=A23_func_tosearch,theta = theta,timepoint=T12[i],egz=egz23[i],interval = c(0,10000000),tol=10^(-20))$root
}
Q <- -log(-laplace_deriv(A23_T12 -log(u23)/omega,theta = theta)) / egz23
H023_recL <- H023_real(recL)
H023_recU <- H023_real(recU)
s23_1 <- Q/c23_1
s23_2 <- Q/c23_2 + (c23_2 - c23_1)*lower23/c23_2
s23_3 <- Q/c23_3 + (c23_2 - c23_1)*lower23/c23_3 + (c23_3-c23_2)*recU/c23_3
T23 <- ifelse(Q < H023_recL, s23_1,ifelse(Q < H023_recU, s23_2, s23_3))
R <- runif(nn,recL,recU)
inout <- ifelse(T12 < T13, R < T23, R < T13) #observed in the sample
n.obs <- sum(inout)
Z12 <- Z12[inout,]; Z13 <- Z13[inout,]; Z23 <- Z23[inout,]
T12 <- T12[inout]; T13 <- T13[inout]; T23 <- T23[inout]; R <- R[inout]
sample.indx <- sample(1:n.obs,n,replace=FALSE) #sampling n observations out of nn
Z12 <- Z12[sample.indx,]; Z13 <- Z13[sample.indx,]; Z23 <- Z23[sample.indx,]
T12 <- T12[sample.indx]; T13 <- T13[sample.indx]; T23 <- T23[sample.indx]; R <- R[sample.indx]
C <- rexp(n,2)
V <- pmin(T12,T13,R+C,r)
VpostR <- (V >= R)
delta1 <- (V == T12)
delta1postR <- (V == T12) & VpostR
delta2 <- V == T13
W <- ifelse(!delta1,0,pmin(T23,R+C,r))
delta3 <- as.vector((W == T23) & delta1)
| /Sampling from IG and PS.R | no_license | nirkeret/frailty-LTRC | R | false | false | 13,226 | r | library(statmod)
n <- 5000 #desired sample size
nn <- 5000*5 #a bigger sample size out of which n observation will be randomly sampled.
r <- 0.61 #type1 ceonsoring - observations with a greater observed time (event or censoring) will be censored at r.
g12 <- c(2,0.2,0.05) #regression coefficients
g13 <- c(0.05,1)
g23 <- c(1,0.5)
theta <- 1 #frailty parameter. for inverse Gaussian: 1/theta = variance.
c12_1 <- 0.005 #constant hazard12 below recL
c12_2 <- 1 #constant hazard12 between recL and recU
c12_3 <- 1 #constant hazard12 above recU
c13_1 <- 0.5 #constant hazard13 below recL
c13_2 <- 1 #constant hazard13 between recL and recU
c13_3 <- 2 #constant hazard13 above recU
c23_1 <- 0 #constant hazard13 below lower23
c23_2 <- 1 #constant hazard13 between recL and recU
c23_3 <- 1 #constant hazard13 above recU
recL <- 0.05
lower23 <- 0.12
recU <- 0.15
#the "real" cummulative hazard functions
H012_real <- function(t)
{
ifelse(t < recL,c12_1*t,ifelse(t < recU,c12_1*recL + c12_2*(t - recL),c12_1*recL + c12_2*(recU - recL) + c12_3*(t - recU)))
}
H013_real <- function(t)
{
ifelse(t < recL,c13_1*t,ifelse(t < recU,c13_1*recL + c13_2*(t - recL),c13_1*recL + c13_2*(recU - recL) + c13_3*(t - recU)))
}
H023_real <- function(t)
{
ifelse(t < lower23,c23_1*t,ifelse(t < recU,c23_1*lower23 + c23_2*(t - lower23),c23_1*lower23 + c23_2*(recU - lower23) + c23_3*(t - recU)))
}
############sampling from the positive stable frailty ################
a.fun <- function(theta,alpha)
{
num1<-sin( (1-alpha)*theta )
num2<-sin( alpha*theta )^( alpha/(1-alpha) )
den<-sin( theta)^( 1/(1-alpha) )
out<-num1*num2/den
out
}
ps.gen <- function(nobs,alpha)
{
w<-rexp(nobs)
theta<-runif(nobs)*pi
out<-( a.fun(theta,alpha)/w )^( (1-alpha)/alpha )
out
}
frailty.ps <- function(n, tau)
{
alpha <- 1 - tau
if(alpha > 0)
omega <- ps.gen(n, alpha)
if(alpha == 0)
omega <- rep(1, n)
return(omega)
}
## creating reference data
nnn <- 100000
z1 <- runif(nnn) ; z2 <- runif(nnn) ; z3 <- runif(nnn);
z <- cbind(z1,z2,z3)
omega_ref <- frailty.ps(nnn,tau)
u13_ref <- runif(nnn)
Z12 <- z[,1:3] ; Z13 <- z[,1:2]
egz12_ref <- exp(Z12 %*% g12)
egz13_ref <- exp(Z13 %*% g13)
q1_ref <- egz12_ref*c12_1 + egz13_ref*c13_1
q2_ref <- egz12_ref*c12_2 + egz13_ref*c13_2
q3_ref <- egz12_ref*c12_3 + egz13_ref*c13_3
A12_recL_ref <- c12_1*egz12_ref*q1_ref^((1-theta)/theta)*recL^(1/theta)
S12_recL_ref <- exp(-omega_ref * A12_recL_ref)
A12_recU_ref <- A12_recL_ref + c12_2*egz12_ref/q2_ref * ((q1_ref*recL + q2_ref*(recU-recL))^(1/theta) - (q1_ref*recL)^(1/theta))
S12_recU_ref <- exp(-omega_ref * A12_recU_ref)
tmp_ref <- q1_ref*recL + q2_ref*(recU-recL)
s12_1_ref <- (-log(u12_ref)/(c12_1*omega_ref))^theta * egz12_ref^(-theta) * q1_ref^(theta-1)
s12_2_ref <- ((q1_ref*recL)^(1/theta) - q2_ref/(c12_2*egz12_ref) * (log(u12_ref)/omega_ref + A12_recL_ref))^theta/q2_ref +recL*(1 - q1_ref/q2_ref)
s12_3_ref <- (tmp_ref^(1/theta) - q3_ref/(c12_3*egz12_ref) * (log(u12_ref)/omega_ref + A12_recU_ref))^theta/q3_ref - tmp_ref/q3_ref + recU
T12_ref <- ifelse(u12_ref > S12_recL_ref,s12_1_ref,ifelse(u12_ref > S12_recU_ref, s12_2_ref, s12_3_ref))
A13_recL_ref <- c13_1*egz13_ref*q1_ref^((1-theta)/theta)*recL^(1/theta)
S13_recL_ref <- exp(-omega_ref * A13_recL_ref)
A13_recU_ref <- A13_recL_ref + c13_2*egz13_ref/q2_ref * ((q1_ref*recL + q2_ref*(recU-recL))^(1/theta) - (q1_ref*recL)^(1/theta))
S13_recU_ref <- exp(-omega_ref * A13_recU_ref)
s13_1_ref <- (-log(u13_ref)/(c13_1*omega_ref))^theta * egz13_ref^(-theta) * q1_ref^(theta-1)
s13_2_ref <- ((q1_ref*recL)^(1/theta) - q2_ref/(c13_2*egz13_ref) * (log(u13_ref)/omega_ref + A13_recL_ref))^theta/q2_ref +recL*(1 - q1_ref/q2_ref)
s13_3_ref <- (tmp_ref^(1/theta) - q3_ref/(c13_3*egz13_ref) * (log(u13_ref)/omega_ref + A13_recU_ref))^theta/q3_ref - tmp_ref/q3_ref + recU
T13_ref <- ifelse(u13_ref > S13_recL_ref,s13_1_ref,ifelse(u13_ref > S13_recU_ref, s13_2_ref, s13_3_ref))
F13 <- ecdf(T13_ref)
H13 <- function(x) {-log(1 - F13(x))}
h13_apr <- function(x,D) {(H13(x + D) - H13(x))/D}
n_grid <- 50
D <- 0.01
h13_grid_times <- seq(0,recL,length.out = n_grid)
h13_grid <- h13_apr(h13_grid_times,D)
##creating the "main" sample:
z1 <- runif(nn) ; z2 <- runif(nn) ; z3 <- runif(nn); z4 <- runif(nn)
z <- cbind(z1,z2,z3,z4)
omega <- frailty.ps(nn,tau)
u12 <- runif(nn)
u13 <- runif(nn)
u23 <- runif(nn)
Z12 <- z[,1:3] ; Z13 <- z[,1:2]
egz12 <- exp(Z12 %*% g12)
egz13 <- exp(Z13 %*% g13)
q1 <- egz12*c12_1 + egz13*c13_1
q2 <- egz12*c12_2 + egz13*c13_2
q3 <- egz12*c12_3 + egz13*c13_3
A12_recL <- c12_1*egz12*q1^((1-theta)/theta)*recL^(1/theta)
S12_recL <- exp(-omega * A12_recL)
A12_recU <- A12_recL + c12_2*egz12/q2 * ((q1*recL + q2*(recU-recL))^(1/theta) - (q1*recL)^(1/theta))
S12_recU <- exp(-omega * A12_recU)
tmp <- q1*recL + q2*(recU-recL)
s12_1 <- (-log(u12)/(c12_1*omega))^theta * egz12^(-theta) * q1^(theta-1)
s12_2 <- ((q1*recL)^(1/theta) - q2/(c12_2*egz12) * (log(u12)/omega + A12_recL))^theta/q2 +recL*(1 - q1/q2)
s12_3 <- (tmp^(1/theta) - q3/(c12_3*egz12) * (log(u12)/omega + A12_recU))^theta/q3 - tmp/q3 + recU
T12 <- ifelse(u12 > S12_recL,s12_1,ifelse(u12 > S12_recU, s12_2, s12_3))
A13_recL <- c13_1*egz13*q1^((1-theta)/theta)*recL^(1/theta)
S13_recL <- exp(-omega * A13_recL)
A13_recU <- A13_recL + c13_2*egz13/q2 * ((q1*recL + q2*(recU-recL))^(1/theta) - (q1*recL)^(1/theta))
S13_recU <- exp(-omega * A13_recU)
s13_1 <- (-log(u13)/(c13_1*omega))^theta * egz13^(-theta) * q1^(theta-1)
s13_2 <- ((q1*recL)^(1/theta) - q2/(c13_2*egz13) * (log(u13)/omega + A13_recL))^theta/q2 +recL*(1 - q1/q2)
s13_3 <- (tmp^(1/theta) - q3/(c13_3*egz13) * (log(u13)/omega + A13_recU))^theta/q3 - tmp/q3 + recU
T13 <- ifelse(u13 > S13_recL,s13_1,ifelse(u13 > S13_recU, s13_2, s13_3))
Z23 <- z[,c(1,4)]
egz23 <- exp(Z23 %*% g23)
laplace_deriv <- function(x,theta) {-theta * exp(-x^theta) * x^(theta-1)}
A23_func_tosearch <- function(x,theta,egz,timepoint)
{
tmp <- laplace_deriv(x,theta=theta) + exp(-H023_real(timepoint)*egz)
tmp
}
A23_T12 <- rep(NA,nn)
for(i in 1:nn)
{
A23_T12[i] <- uniroot(f=A23_func_tosearch,theta=theta,timepoint=T12[i],egz=egz23[i],interval = c(0,10000000),tol=10^(-20))$root
}
Q <- -log(-laplace_deriv(A23_T12 -log(u23)/omega,theta = theta)) / egz23
H023_recL <- H023_real(recL)
H023_recU <- H023_real(recU)
s23_1 <- Q/c23_1
s23_2 <- Q/c23_2 + (c23_2 - c23_1)*lower23/c23_2
s23_3 <- Q/c23_3 + (c23_2 - c23_1)*lower23/c23_3 + (c23_3-c23_2)*recU/c23_3
T23 <- ifelse(Q < H023_recL, s23_1,ifelse(Q < H023_recU, s23_2, s23_3))
R <- runif(nn,recL,recU)
inout <- ifelse(T12 < T13, R < T23, R < T13) #observed in the sample
n.obs <- sum(inout)
Z12 <- Z12[inout,]; Z13 <- Z13[inout,]; Z23 <- Z23[inout,]
T12 <- T12[inout]; T13 <- T13[inout]; T23 <- T23[inout]; R <- R[inout]
sample.indx <- sample(1:n.obs,n,replace=FALSE)
Z12 <- Z12[sample.indx,]; Z13 <- Z13[sample.indx,]; Z23 <- Z23[sample.indx,]
T12 <- T12[sample.indx]; T13 <- T13[sample.indx]; T23 <- T23[sample.indx]; R <- R[sample.indx]
C <- rexp(n,2)
V <- pmin(T12,T13,R+C,r)
VpostR <- (V >= R)
delta1 <- (V == T12)
delta1postR <- (V == T12) & VpostR
delta2 <- V == T13
W <- ifelse(!delta1,0,pmin(T23,R+C,r))
delta3 <- as.vector((W == T23) & delta1)
############sampling from the inverse Gaussian frailty ##############
quad_eq_solver <- function(a,b,c) {suppressWarnings((-b + sqrt(b^2 - 4*a*c))/(2*a))}
## creating a reference sample:
nnn <- 100000
z1 <- runif(nnn) ; z2 <- runif(nnn) ; z3 <- runif(nnn);
z <- cbind(z1,z2,z3)
omega_ref <- rinvgauss(nnn,mean = 1,shape = theta)
u12_ref <- runif(nnn) ; u13_ref <- runif(nnn)
Z12 <- z[,1:3] ; Z13 <- z[,1:2]
egz12_ref <- exp(Z12 %*% g12)
egz13_ref <- exp(Z13 %*% g13)
q1_ref <- egz12_ref*c12_1 + egz13_ref*c13_1
q2_ref <- egz12_ref*c12_2 + egz13_ref*c13_2
q3_ref <- egz12_ref*c12_3 + egz13_ref*c13_3
A13_recL_ref <- egz13_ref*c13_1*(q1_ref/(2*theta)*recL^2 + recL)
S13_recL_ref <- exp(-omega_ref * A13_recL_ref)
A13_recU_ref <- A13_recL_ref + egz13_ref*c13_2*((recL/theta * q1_ref + 1)*(recU-recL) + q2_ref/(2*theta)*(recU-recL)^2)
S13_recU_ref <- exp(-omega_ref * A13_recU_ref)
coef_13_1_a_ref <- egz13_ref*c13_1*q1_ref/(2*theta)
coef_13_1_b_ref <- egz13_ref*c13_1
coef_13_1_c_ref <- log(u13_ref)/omega_ref
coef_13_2_a_ref <- egz13_ref*c13_2*q2_ref/(2*theta)
coef_13_2_b_ref <- egz13_ref*c13_2*(recL/theta*q1_ref + 1)
coef_13_2_c_ref <- A13_recL_ref + log(u13_ref)/omega_ref
coef_13_3_a_ref <- egz13_ref*c13_3*q3_ref/(2*theta)
coef_13_3_b_ref <- egz13_ref*c13_3*(recL/theta*q1_ref + (recU-recL)/theta*q2_ref + 1)
coef_13_3_c_ref <- A13_recU_ref + log(u13_ref)/omega_ref
s13_1_ref <- quad_eq_solver(coef_13_1_a_ref,coef_13_1_b_ref,coef_13_1_c_ref)
s13_2_ref <- quad_eq_solver(coef_13_2_a_ref,coef_13_2_b_ref,coef_13_2_c_ref) + recL
s13_3_ref <- quad_eq_solver(coef_13_3_a_ref,coef_13_3_b_ref,coef_13_3_c_ref) + recU
T13_ref <- ifelse(u13_ref > S13_recL_ref,s13_1_ref,ifelse(u13_ref > S13_recU_ref, s13_2_ref, s13_3_ref))
F13 <- ecdf(T13_ref)
H13 <- function(x) {-log(1 - F13(x))}
h13_apr <- function(x,D) {(H13(x + D) - H13(x))/D}
n_grid <- 50
D <- 0.01
h13_grid_times <- seq(0,recL,length.out = n_grid)
h13_grid <- h13_apr(h13_grid_times,D)
## creating the "main" sample:
z1 <- runif(nn) ; z2 <- runif(nn) ; z3 <- runif(nn); z4 <- runif(nn)
z <- cbind(z1,z2,z3,z4)
omega <- rinvgauss(nn,mean = 1,shape = theta)
u12 <- runif(nn)
u13 <- runif(nn)
u23 <- runif(nn)
Z12 <- z[,1:3] ; Z13 <- z[,1:2]
egz12 <- exp(Z12 %*% g12)
egz13 <- exp(Z13 %*% g13)
q1 <- egz12*c12_1 + egz13*c13_1
q2 <- egz12*c12_2 + egz13*c13_2
q3 <- egz12*c12_3 + egz13*c13_3
A12_recL <- egz12*c12_1*(q1/(2*theta)*recL^2 + recL)
S12_recL <- exp(-omega * A12_recL)
A12_recU <- A12_recL + egz12*c12_2*((recL/theta * q1 + 1)*(recU-recL) + q2/(2*theta)*(recU-recL)^2)
S12_recU <- exp(-omega * A12_recU)
coef_12_1_a <- egz12*c12_1*q1/(2*theta)
coef_12_1_b <- egz12*c12_1
coef_12_1_c <- log(u12)/omega
coef_12_2_a <- egz12*c12_2*q2/(2*theta)
coef_12_2_b <- egz12*c12_2*(recL/theta*q1 + 1)
coef_12_2_c <- A12_recL + log(u12)/omega
coef_12_3_a <- egz12*c12_3*q3/(2*theta)
coef_12_3_b <- egz12*c12_3*(recL/theta*q1 + (recU-recL)/theta*q2 + 1)
coef_12_3_c <- A12_recU + log(u12)/omega
s12_1 <- quad_eq_solver(coef_12_1_a,coef_12_1_b,coef_12_1_c)
s12_2 <- quad_eq_solver(coef_12_2_a,coef_12_2_b,coef_12_2_c) + recL
s12_3 <- quad_eq_solver(coef_12_3_a,coef_12_3_b,coef_12_3_c) + recU
T12 <- ifelse(u12 > S12_recL,s12_1,ifelse(u12 > S12_recU, s12_2, s12_3))
A13_recL <- egz13*c13_1*(q1/(2*theta)*recL^2 + recL)
S13_recL <- exp(-omega * A13_recL)
A13_recU <- A13_recL + egz13*c13_2*((recL/theta * q1 + 1)*(recU-recL) + q2/(2*theta)*(recU-recL)^2)
S13_recU <- exp(-omega * A13_recU)
coef_13_1_a <- egz13*c13_1*q1/(2*theta)
coef_13_1_b <- egz13*c13_1
coef_13_1_c <- log(u13)/omega
coef_13_2_a <- egz13*c13_2*q2/(2*theta)
coef_13_2_b <- egz13*c13_2*(recL/theta*q1 + 1)
coef_13_2_c <- A13_recL + log(u13)/omega
coef_13_3_a <- egz13*c13_3*q3/(2*theta)
coef_13_3_b <- egz13*c13_3*(recL/theta*q1 + (recU-recL)/theta*q2 + 1)
coef_13_3_c <- A13_recU + log(u13)/omega
s13_1 <- quad_eq_solver(coef_13_1_a,coef_13_1_b,coef_13_1_c)
s13_2 <- quad_eq_solver(coef_13_2_a,coef_13_2_b,coef_13_2_c) + recL
s13_3 <- quad_eq_solver(coef_13_3_a,coef_13_3_b,coef_13_3_c) + recU
T13 <- ifelse(u13 > S13_recL,s13_1,ifelse(u13 > S13_recU, s13_2, s13_3))
Z23 <- z[,c(1,4)]
egz23 <- exp(Z23 %*% g23)
laplace_deriv <- function(x,theta)
{
tmp <- sqrt(1 + 2/theta*x)
-exp(theta*(1-tmp))/tmp
}
A23_func_tosearch <- function(x,theta,egz,timepoint)
{
tmp <- laplace_deriv(x,theta = theta) + exp(-H023_real(timepoint)*egz)
tmp
}
A23_T12 <- rep(NA,nn)
for(i in 1:nn)
{
A23_T12[i] <- uniroot(f=A23_func_tosearch,theta = theta,timepoint=T12[i],egz=egz23[i],interval = c(0,10000000),tol=10^(-20))$root
}
Q <- -log(-laplace_deriv(A23_T12 -log(u23)/omega,theta = theta)) / egz23
H023_recL <- H023_real(recL)
H023_recU <- H023_real(recU)
s23_1 <- Q/c23_1
s23_2 <- Q/c23_2 + (c23_2 - c23_1)*lower23/c23_2
s23_3 <- Q/c23_3 + (c23_2 - c23_1)*lower23/c23_3 + (c23_3-c23_2)*recU/c23_3
T23 <- ifelse(Q < H023_recL, s23_1,ifelse(Q < H023_recU, s23_2, s23_3))
R <- runif(nn,recL,recU)
inout <- ifelse(T12 < T13, R < T23, R < T13) #observed in the sample
n.obs <- sum(inout)
Z12 <- Z12[inout,]; Z13 <- Z13[inout,]; Z23 <- Z23[inout,]
T12 <- T12[inout]; T13 <- T13[inout]; T23 <- T23[inout]; R <- R[inout]
sample.indx <- sample(1:n.obs,n,replace=FALSE) #sampling n observations out of nn
Z12 <- Z12[sample.indx,]; Z13 <- Z13[sample.indx,]; Z23 <- Z23[sample.indx,]
T12 <- T12[sample.indx]; T13 <- T13[sample.indx]; T23 <- T23[sample.indx]; R <- R[sample.indx]
C <- rexp(n,2)
V <- pmin(T12,T13,R+C,r)
VpostR <- (V >= R)
delta1 <- (V == T12)
delta1postR <- (V == T12) & VpostR
delta2 <- V == T13
W <- ifelse(!delta1,0,pmin(T23,R+C,r))
delta3 <- as.vector((W == T23) & delta1)
|
#' Dictionary of the popler metadata variables
#'
#' @description Describes the metadata variables contained
#' in the popler database, and shows their content.
#'
#' @param ... A sequence of (unquoted) variables specifying one
#' or more variables of popler's main table for which dictionary
#' information is needed
#' @param full_tbl logical; If \code{TRUE}, the function
#' returns a table describing the variables of the full main table.
#' If \code{FALSE}, the function returns a table describing the standard
#' variables. Default is \code{FALSE}.
#'
#' @export
#' @examples
#' \dontrun{
#' # Column names
#' column_names <- pplr_dictionary(full_tbl = FALSE)
#'
#' # Dictionary information
#' dictionary_lter <- pplr_dictionary(lterid, full_tbl = FALSE)
#'
#' # multiple columns
#' dictionary_lter_lat <- pplr_dictionary(lterid,lat_lter, full_tbl = FALSE)
#' }
pplr_dictionary <- function(..., full_tbl = FALSE){
# summary table ------------------------------------------------------------
# load summary table
summary_table <- pplr_summary_table_import()
# variables ------------------------------------------------
# variables of which user defined wishes to know the content
vars <- vars_dict(...)
# produce output -------------------------------------------
# if no column specified, return ALL column names
if(is.null(vars)){
# select data based on
tmp <- if(full_tbl){
summary_table
} else {
# variables of default (full_tbl=FALSE) main table
summary_table[ ,default_vars()]
}
out <- dictionary_explain(tmp)
# if colums specified.
} else {
out <- dict_list(summary_table, vars)
}
return(out)
}
# lazy evaluation in dictionary
#' @importFrom lazyeval lazy_dots
#' @noRd
vars_dict <- function(...){
eval_that <- lazyeval::lazy_dots(...)
out <- sapply(eval_that, function(x) as.character(x$expr))
if(length(out) > 0) {
return(out)
} else {
return(NULL)
}
}
# verify whether provided variables match one of the potential variables
#' @noRd
verify_vars <- function(sel_col){
i <- which(sel_col %in% c(int.data$explanations$variable,
"structure", "treatment", "species") )
if( length(i) < length(sel_col) ){
unmatched <- setdiff(seq_len(length(sel_col)),i)
stop(paste0("variable '", sel_col[unmatched],
"' does not match any of the variables contained in popler"))
}
}
unique_or_summary <- function(col) {
if(is.numeric(col) |
is.integer(col)) {
summary(col)
} else {
unique(col)
}
}
# produce the lists of unique dictionary values
#' @importFrom stats setNames
#' @noRd
dict_list <- function(x, select_columns){
# first, verify user input matches with variables contained in popler
verify_vars(select_columns)
# index "special" and "normal"
i_spec <- which(select_columns %in% c("structure",
"treatment",
"species",
"proj_metadata_key"))
i_norm <- setdiff(c(1:length(select_columns)), i_spec)
norm_cols <- select_columns[i_norm]
# get unique values of "normal" variables -------------------------------------------
out_norm <- lapply(x[ ,norm_cols, drop = FALSE],
function(y) unique_or_summary(y))
# get unique values of "special" variables ------------------------------------------
out_spc <- list()
if(any("species" == select_columns)){
out_spc$species <- unique(x[ ,c("genus", "species")])
}
if(any("proj_metadata_key" == select_columns)) {
out_spc$proj_metadata_key <- unique(x[ ,'proj_metadata_key'])
}
if( any("structure" == select_columns) ){
# stash all structure data in a single vector
str_vec <- unlist(c(x[ ,paste0("structured_type_", 1:4)]))
out_spc$structure <- unique(str_vec)
}
if(any("treatment" == select_columns)){
# stash all structure data in a single vector
tr_vec <- unlist(c(x[ ,paste0("treatment_type_", 1:3)]))
out_spc$treatment <- unique(tr_vec)
}
# Variable descriptions ----------------------------------------------------------------
# Special variables
descr_spec <- c("species (species name)",
"structure (types of indidivual structure)",
"treatment (type of treatment)",
"proj_metadata_key")
if(length(out_spc) > 0){
d_s_ind <- sapply(names(out_spc), function(x) grep(x, descr_spec))
descr_spc <- descr_spec[d_s_ind]
} else {
descr_spc <- NULL
}
# Normal variables
description <- int.data$explanations$description[match(names(out_norm),
int.data$explanations$description)]
descr_norm <- paste0(names(out_norm), " (", description,")" )
# final descriptions
names_out <- rep(NA, length(select_columns))
names_out[i_norm] <- descr_norm
names_out[i_spec] <- descr_spc
# description of output -----------------------------------------------------------------
out <- rep(list(NULL), length(select_columns))
out[i_norm] <- out_norm
out[i_spec] <- out_spc
out <- setNames(out, names_out)
# remove NAs or "NA"
out <- lapply(out, function(x) x <- x[!is.na(x)])
out <- lapply(out, function(x) x <- x[x != "NA"])
return(out)
}
#' @noRd
# explain meaning of dictionary variables
dictionary_explain <- function(x){
if(ncol(x) < 60){
out <- int.data$explain_short
} else {
out <- int.data$explanations
}
return(out)
}
| /R/dictionary.R | permissive | alegent/popler | R | false | false | 5,631 | r | #' Dictionary of the popler metadata variables
#'
#' @description Describes the metadata variables contained
#' in the popler database, and shows their content.
#'
#' @param ... A sequence of (unquoted) variables specifying one
#' or more variables of popler's main table for which dictionary
#' information is needed
#' @param full_tbl logical; If \code{TRUE}, the function
#' returns a table describing the variables of the full main table.
#' If \code{FALSE}, the function returns a table describing the standard
#' variables. Default is \code{FALSE}.
#'
#' @export
#' @examples
#' \dontrun{
#' # Column names
#' column_names <- pplr_dictionary(full_tbl = FALSE)
#'
#' # Dictionary information
#' dictionary_lter <- pplr_dictionary(lterid, full_tbl = FALSE)
#'
#' # multiple columns
#' dictionary_lter_lat <- pplr_dictionary(lterid,lat_lter, full_tbl = FALSE)
#' }
pplr_dictionary <- function(..., full_tbl = FALSE){
# summary table ------------------------------------------------------------
# load summary table
summary_table <- pplr_summary_table_import()
# variables ------------------------------------------------
# variables of which user defined wishes to know the content
vars <- vars_dict(...)
# produce output -------------------------------------------
# if no column specified, return ALL column names
if(is.null(vars)){
# select data based on
tmp <- if(full_tbl){
summary_table
} else {
# variables of default (full_tbl=FALSE) main table
summary_table[ ,default_vars()]
}
out <- dictionary_explain(tmp)
# if colums specified.
} else {
out <- dict_list(summary_table, vars)
}
return(out)
}
# lazy evaluation in dictionary
#' @importFrom lazyeval lazy_dots
#' @noRd
vars_dict <- function(...){
eval_that <- lazyeval::lazy_dots(...)
out <- sapply(eval_that, function(x) as.character(x$expr))
if(length(out) > 0) {
return(out)
} else {
return(NULL)
}
}
# verify whether provided variables match one of the potential variables
#' @noRd
verify_vars <- function(sel_col){
i <- which(sel_col %in% c(int.data$explanations$variable,
"structure", "treatment", "species") )
if( length(i) < length(sel_col) ){
unmatched <- setdiff(seq_len(length(sel_col)),i)
stop(paste0("variable '", sel_col[unmatched],
"' does not match any of the variables contained in popler"))
}
}
unique_or_summary <- function(col) {
if(is.numeric(col) |
is.integer(col)) {
summary(col)
} else {
unique(col)
}
}
# produce the lists of unique dictionary values
#' @importFrom stats setNames
#' @noRd
dict_list <- function(x, select_columns){
# first, verify user input matches with variables contained in popler
verify_vars(select_columns)
# index "special" and "normal"
i_spec <- which(select_columns %in% c("structure",
"treatment",
"species",
"proj_metadata_key"))
i_norm <- setdiff(c(1:length(select_columns)), i_spec)
norm_cols <- select_columns[i_norm]
# get unique values of "normal" variables -------------------------------------------
out_norm <- lapply(x[ ,norm_cols, drop = FALSE],
function(y) unique_or_summary(y))
# get unique values of "special" variables ------------------------------------------
out_spc <- list()
if(any("species" == select_columns)){
out_spc$species <- unique(x[ ,c("genus", "species")])
}
if(any("proj_metadata_key" == select_columns)) {
out_spc$proj_metadata_key <- unique(x[ ,'proj_metadata_key'])
}
if( any("structure" == select_columns) ){
# stash all structure data in a single vector
str_vec <- unlist(c(x[ ,paste0("structured_type_", 1:4)]))
out_spc$structure <- unique(str_vec)
}
if(any("treatment" == select_columns)){
# stash all structure data in a single vector
tr_vec <- unlist(c(x[ ,paste0("treatment_type_", 1:3)]))
out_spc$treatment <- unique(tr_vec)
}
# Variable descriptions ----------------------------------------------------------------
# Special variables
descr_spec <- c("species (species name)",
"structure (types of indidivual structure)",
"treatment (type of treatment)",
"proj_metadata_key")
if(length(out_spc) > 0){
d_s_ind <- sapply(names(out_spc), function(x) grep(x, descr_spec))
descr_spc <- descr_spec[d_s_ind]
} else {
descr_spc <- NULL
}
# Normal variables
description <- int.data$explanations$description[match(names(out_norm),
int.data$explanations$description)]
descr_norm <- paste0(names(out_norm), " (", description,")" )
# final descriptions
names_out <- rep(NA, length(select_columns))
names_out[i_norm] <- descr_norm
names_out[i_spec] <- descr_spc
# description of output -----------------------------------------------------------------
out <- rep(list(NULL), length(select_columns))
out[i_norm] <- out_norm
out[i_spec] <- out_spc
out <- setNames(out, names_out)
# remove NAs or "NA"
out <- lapply(out, function(x) x <- x[!is.na(x)])
out <- lapply(out, function(x) x <- x[x != "NA"])
return(out)
}
#' @noRd
# explain meaning of dictionary variables
dictionary_explain <- function(x){
if(ncol(x) < 60){
out <- int.data$explain_short
} else {
out <- int.data$explanations
}
return(out)
}
|
#' 277 measurements of the cross sections for
#' \eqn{\pi^{-}p} collision (nuclear
#' physics).
#'
#' 277 measurements of the cross sections for
#' \eqn{\pi^{-}p} collision (nuclear
#' physics).
#'
#' @format A numeric vector with 277 elements.
#' @source \url{https://link.springer.com/article/10.1007/BF02683433}
"CERN"
CERN_DF <- function() {
return(1)
}
#assign("CERN_DF", .cern_df(), envir = .GlobalEnv)
| /R/CERN.R | no_license | vildibald/ICSsmoothing | R | false | false | 413 | r | #' 277 measurements of the cross sections for
#' \eqn{\pi^{-}p} collision (nuclear
#' physics).
#'
#' 277 measurements of the cross sections for
#' \eqn{\pi^{-}p} collision (nuclear
#' physics).
#'
#' @format A numeric vector with 277 elements.
#' @source \url{https://link.springer.com/article/10.1007/BF02683433}
"CERN"
CERN_DF <- function() {
return(1)
}
#assign("CERN_DF", .cern_df(), envir = .GlobalEnv)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model-gamma.R
\docType{class}
\name{Zelig-gamma-class}
\alias{Zelig-gamma-class}
\alias{zgamma}
\title{Gamma Regression for Continuous, Positive Dependent Variables}
\description{
Vignette: \url{http://docs.zeligproject.org/articles/zelig_gamma.html}
}
| /man/Zelig-gamma-class.Rd | no_license | mbsabath/Zelig | R | false | true | 332 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model-gamma.R
\docType{class}
\name{Zelig-gamma-class}
\alias{Zelig-gamma-class}
\alias{zgamma}
\title{Gamma Regression for Continuous, Positive Dependent Variables}
\description{
Vignette: \url{http://docs.zeligproject.org/articles/zelig_gamma.html}
}
|
remove(list = ls())
#' Load Libraries
library(lavaan)
library(sem)
library(lavaanPlot)
library(modelsummary)
#' Load Data
calth.palus <- read.csv( file = "Data/calth.palus.csv",
header = T, stringsAsFactors = F)
str(calth.palus)
#' ### Path analysis model specification
model<-'
# DOBG is predicted by TSNOW, HI, and SP
DOBG ~ 1+ a*TSNOW + A*HI + e*SP
SP ~ 1+ C*HI + c*TSNOW
FFD ~ 1+ b*DOBG + d*SP + f*HI
#estimtating the variances of the exogenous variables
TSNOW ~~ TSNOW
HI ~~ HI
#estimtating the covariances of the exogenous variables (ses, mastery,performance)
TSNOW ~~ HI
#estimating the residual variances for endogenous variables (interest, anxiety, achieve)
DOBG ~~ DOBG
SP ~~ SP
FFD ~~ FFD
#Indirect effects of TSNOW on FFD
TSNOWie1:= 1+ a*b
TSNOWie2:= 1+ c*d
TSNOWiet:= 1+ TSNOWie1 + TSNOWie2
#Indirect effects of HI on FFD
HIie1:= 1+ A*b
HIie2:= 1+ C*d
HIiet:= 1+ HIie1 + HIie2 + f
#Indirect effect of SP on FFD
SPie1:= 1+ e*b
TSNOW ~ 1
HI ~ 1'
#' ### Lavaan function
fit<-lavaan(model,data=calth.palus, missing = "fiml")
summary(fit,fit.measures=TRUE)
modelsummary(fit)
#' ### Standardized Measurements
summary(fit,fit.measures=TRUE,standardized=TRUE,rsquare=TRUE)
standardizedSolution(fit)
#' ### Confidence Intervals
parameterEstimates(fit)
#' ### Comprehensive set of fit measures
fitMeasures(fit)
#' ### Modification indicies
modificationIndices(fit)
#' ### Example path plots
lavaanPlot(model = fit, node_options = list(shape = "box", fontname =
"serif"), edge_options = list(color = "grey"),
coefs = TRUE, stand = TRUE,covs=
TRUE,stars = c("regress"))
# ezknitr::ezspin(file = "Program/Path_all/SEM_calthpalus.R", out_dir = "Output", keep_rmd = F, keep_md = F)
#https://nmmichalak.github.io/nicholas_michalak/blog_entries/2018/nrg01/nrg01.html | /Program/HIandSP/Path_all/SEM_calthpalus.R | no_license | echandle2228/bareground2020_2021 | R | false | false | 1,875 | r | remove(list = ls())
#' Load Libraries
library(lavaan)
library(sem)
library(lavaanPlot)
library(modelsummary)
#' Load Data
calth.palus <- read.csv( file = "Data/calth.palus.csv",
header = T, stringsAsFactors = F)
str(calth.palus)
#' ### Path analysis model specification
model<-'
# DOBG is predicted by TSNOW, HI, and SP
DOBG ~ 1+ a*TSNOW + A*HI + e*SP
SP ~ 1+ C*HI + c*TSNOW
FFD ~ 1+ b*DOBG + d*SP + f*HI
#estimtating the variances of the exogenous variables
TSNOW ~~ TSNOW
HI ~~ HI
#estimtating the covariances of the exogenous variables (ses, mastery,performance)
TSNOW ~~ HI
#estimating the residual variances for endogenous variables (interest, anxiety, achieve)
DOBG ~~ DOBG
SP ~~ SP
FFD ~~ FFD
#Indirect effects of TSNOW on FFD
TSNOWie1:= 1+ a*b
TSNOWie2:= 1+ c*d
TSNOWiet:= 1+ TSNOWie1 + TSNOWie2
#Indirect effects of HI on FFD
HIie1:= 1+ A*b
HIie2:= 1+ C*d
HIiet:= 1+ HIie1 + HIie2 + f
#Indirect effect of SP on FFD
SPie1:= 1+ e*b
TSNOW ~ 1
HI ~ 1'
#' ### Lavaan function
fit<-lavaan(model,data=calth.palus, missing = "fiml")
summary(fit,fit.measures=TRUE)
modelsummary(fit)
#' ### Standardized Measurements
summary(fit,fit.measures=TRUE,standardized=TRUE,rsquare=TRUE)
standardizedSolution(fit)
#' ### Confidence Intervals
parameterEstimates(fit)
#' ### Comprehensive set of fit measures
fitMeasures(fit)
#' ### Modification indicies
modificationIndices(fit)
#' ### Example path plots
lavaanPlot(model = fit, node_options = list(shape = "box", fontname =
"serif"), edge_options = list(color = "grey"),
coefs = TRUE, stand = TRUE,covs=
TRUE,stars = c("regress"))
# ezknitr::ezspin(file = "Program/Path_all/SEM_calthpalus.R", out_dir = "Output", keep_rmd = F, keep_md = F)
#https://nmmichalak.github.io/nicholas_michalak/blog_entries/2018/nrg01/nrg01.html |
# do cross-validation and keep images whole.
# refer to lr_analysis (for format to feed to python) and lr_analysis3.R in version_final
# Split the images into train and test sets and built the lr model on the training set of images
# tested on the remaining. 5-fold cross validation.
library(tidyr)
library(reshape2)
library(caTools)
library(nnet)
library(fields)
source("/Volumes/DATA/Project 3/Code_Smoke Detection/version_final/utils.R")
##
##
## logistic regression:
## Yt ~ b1_t + b2_t + b3_t + b4_t + b5_t + temp_t + frp_t
##
##
# read in data
load("/Volumes/DATA/Project 3/Code_Smoke Detection/version_g/data_preprocessed.RData")
# save data dimemsions
m <- length(unique(data$TIMEPOINT)) # 1079 timepoints
n <- length(unique(data$AHI_ID)) # 16905 pixels = 161*105
# create image IDs
data$IMAGE_ID <- rep(1:m, each=n)
# create binary ground truth variable
y <- matrix(0, nrow = n*m, ncol = 1)
xx <- data$CLOUD_MASK_TYPE %in% as.integer(c(101, 111, 23, 27, 33, 37, 100, 110))
y[xx] <- 1
data$y <- as.factor(y)
# create folds for the cross-validation
set.seed(1234)
im_shuffle <- sample(m, m, replace = F) # shuffle image indices
n_folds <- 5 # number of folds
folds <- split(im_shuffle, as.factor(1:n_folds)) # split evenly into n_folds groups
# empty variables for the loop
conf_mat <- list()
iou_mn <- NULL
iou_tp <- NULL
iou_tn <- NULL
pixel_acc <- NULL
roc_data <- NULL
# logistic regression variables
lr_vrbs <- c("y", "B1", "B2", "B3", "B4", "B5", "TMPR_B14", "FRP")
# loop through each fold; k-fold cross-validation, LR
for (k in 1:n_folds) {
# keep track of place in loop
print(k)
# split data into train and test sets
test_ind <- data$IMAGE_ID %in% folds[[k]]
df_train <- data[!test_ind, lr_vrbs]
df_test <- data[test_ind, lr_vrbs]
# run lr on train set
reg_model <- multinom(y ~ ., data = df_train, trace = F)
# make predictions on test data
predicted_vals <- predict(reg_model, newdata = df_test)
# compute and save performance metrics
tab <- table(predicted_vals, df_test$y)
conf_mat[[k]] <- tab
iou_out <- iou(tab)
iou_mn[k] <- iou_out[1]
iou_tn[k] <- iou_out[2]
iou_tp[k] <- iou_out[3]
pixel_acc[k] <- mean(as.character(na.omit(predicted_vals)) == as.character(na.omit(df_test)$y))
# save roc data
roc_data <- rbind(roc_data, cbind(as.numeric(as.character(predicted_vals)),
as.numeric(as.character(df_test$y))))
}
res <- c(mean(pixel_acc), mean(iou_mn), mean(iou_tp), mean(iou_tn))
round(res, 3)
# [1] 0.57 0.31 0.06 0.56
# numbers in thesis:
# 0.976708 (pixel acc.)
# 0.489237 (IoU)
# 0.001766 (IoU TP)
# 0.976707 (IoU TN)
# 0.954551 (f.w. IoU)
# 0.000040 (f.w. IoU TP)
# 0.954511 (f.w. IoU TN)
write.csv(roc_data, "version_g/roc_data.csv")
#write.csv(roc_data, "output/roc_data.csv")
| /analysis_LR.R | no_license | aelarsen/code_smoke_detection_github | R | false | false | 2,965 | r | # do cross-validation and keep images whole.
# refer to lr_analysis (for format to feed to python) and lr_analysis3.R in version_final
# Split the images into train and test sets and built the lr model on the training set of images
# tested on the remaining. 5-fold cross validation.
library(tidyr)
library(reshape2)
library(caTools)
library(nnet)
library(fields)
source("/Volumes/DATA/Project 3/Code_Smoke Detection/version_final/utils.R")
##
##
## logistic regression:
## Yt ~ b1_t + b2_t + b3_t + b4_t + b5_t + temp_t + frp_t
##
##
# read in data
load("/Volumes/DATA/Project 3/Code_Smoke Detection/version_g/data_preprocessed.RData")
# save data dimemsions
m <- length(unique(data$TIMEPOINT)) # 1079 timepoints
n <- length(unique(data$AHI_ID)) # 16905 pixels = 161*105
# create image IDs
data$IMAGE_ID <- rep(1:m, each=n)
# create binary ground truth variable
y <- matrix(0, nrow = n*m, ncol = 1)
xx <- data$CLOUD_MASK_TYPE %in% as.integer(c(101, 111, 23, 27, 33, 37, 100, 110))
y[xx] <- 1
data$y <- as.factor(y)
# create folds for the cross-validation
set.seed(1234)
im_shuffle <- sample(m, m, replace = F) # shuffle image indices
n_folds <- 5 # number of folds
folds <- split(im_shuffle, as.factor(1:n_folds)) # split evenly into n_folds groups
# empty variables for the loop
conf_mat <- list()
iou_mn <- NULL
iou_tp <- NULL
iou_tn <- NULL
pixel_acc <- NULL
roc_data <- NULL
# logistic regression variables
lr_vrbs <- c("y", "B1", "B2", "B3", "B4", "B5", "TMPR_B14", "FRP")
# loop through each fold; k-fold cross-validation, LR
for (k in 1:n_folds) {
# keep track of place in loop
print(k)
# split data into train and test sets
test_ind <- data$IMAGE_ID %in% folds[[k]]
df_train <- data[!test_ind, lr_vrbs]
df_test <- data[test_ind, lr_vrbs]
# run lr on train set
reg_model <- multinom(y ~ ., data = df_train, trace = F)
# make predictions on test data
predicted_vals <- predict(reg_model, newdata = df_test)
# compute and save performance metrics
tab <- table(predicted_vals, df_test$y)
conf_mat[[k]] <- tab
iou_out <- iou(tab)
iou_mn[k] <- iou_out[1]
iou_tn[k] <- iou_out[2]
iou_tp[k] <- iou_out[3]
pixel_acc[k] <- mean(as.character(na.omit(predicted_vals)) == as.character(na.omit(df_test)$y))
# save roc data
roc_data <- rbind(roc_data, cbind(as.numeric(as.character(predicted_vals)),
as.numeric(as.character(df_test$y))))
}
res <- c(mean(pixel_acc), mean(iou_mn), mean(iou_tp), mean(iou_tn))
round(res, 3)
# [1] 0.57 0.31 0.06 0.56
# numbers in thesis:
# 0.976708 (pixel acc.)
# 0.489237 (IoU)
# 0.001766 (IoU TP)
# 0.976707 (IoU TN)
# 0.954551 (f.w. IoU)
# 0.000040 (f.w. IoU TP)
# 0.954511 (f.w. IoU TN)
write.csv(roc_data, "version_g/roc_data.csv")
#write.csv(roc_data, "output/roc_data.csv")
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 1.26037932371487e+296, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(8L, 3L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result) | /CNull/inst/testfiles/communities_individual_based_sampling_alpha/AFL_communities_individual_based_sampling_alpha/communities_individual_based_sampling_alpha_valgrind_files/1615782534-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 329 | r | testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 1.26037932371487e+296, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(8L, 3L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result) |
#=========================================================================================================================================
fncvROCR <- function(){
varPosListn <- function(vars, var){
if (is.null(var)) return(NULL)
if (any(!var %in% vars)) NULL
else apply(outer(var, vars, "=="), 1, which) - 1
}
#require(ROCR)
#Daniel
performancelist <- c("acc", "err", "fpr", "fall", "tpr", "rec", "sens", "fnr", "miss", "tnr", "spec", "ppv",
"prec", "npv", "pcfall", "pcmiss", "rpp", "rnp", "phi", "mat", "mi", "chisq", "odds",
"lift", "f", "rch", "auc", "prbe", "cal", "mxe", "rmse", "sar", "ecost", "cost")
performancelistlong <- c("Accuracy", "Error rate", "False positive rate", "Fallout (fpr)", "True positive rate", "Recall (tpr)", "Sensitivity", "False negative rate", "Miss (fnr)", "True negative rate", "Specificity", "Positive predictive value",
"Precision (ppv)", "Negative predictive value", "Prediction-conditioned fallout", "Prediction-conditioned miss", "Rate of positive predictions", "Rate of negative predictions", "Phi correlation coefficient", "Mattheus correlation coefficient (phi)", "Mutual information", "Chi square test statistic", "Odds ratio",
"Lift value", "Precision-recall F measure", "ROC convex hull", "AUC", "Precision-recall break-even point", "Callibration error", "Mean cross-entropy", "Root-mean-squared error", "Sar", "Expected cost", "Cost")
defaults <- list(initial.prediction = NULL, initial.label = NULL, initial.ymeasure = performancelistlong[5], initial.xmeasure = performancelistlong[3],
initial.colorize = 0, initial.add = 0,
initial.printcutoffs = 0, initial.cutoffs = "seq(0,1,by=0.1)",
initial.printroc = 0,
initial.costfp = 1, initial.costfn = 1,
initial.calwindowsize = 100,
initial.partialfprstop = 1,
initial.xlab=gettextRcmdr("<auto>"), initial.ylab=gettextRcmdr("<auto>"),
initial.main=gettextRcmdr("<auto>"),
initial.tab=0) # tab
dialog.values <- getDialog("ROCR", defaults)
initializeDialog(title=gettext("Plot ROC curve", domain="R-RcmdrPlugin.ROC"), use.tabs=TRUE) # tab
#Daniel
.factors <- Factors()
.numeric <- Numeric()
predictionBox <- variableListBox(dataTab, .numeric, title=gettext("Predictions variable (pick one)", domain="R-RcmdrPlugin.ROC"),# tab
initialSelection=varPosn(dialog.values$initial.prediction, "numeric"))
labelBox <- variableListBox(dataTab, .numeric, title=gettext("Labels variable (pick one)", domain="R-RcmdrPlugin.ROC"),
initialSelection=varPosn(dialog.values$initial.label, "numeric"))
optionsParFrame <- tkframe(optionsTab)# tab
parFrame <- ttklabelframe(optionsParFrame, text=gettext("Plot Labels and Points", domain="R-RcmdrPlugin.ROC"))# tab
performanceFrame <- ttklabelframe(optionsParFrame, text=gettext("Performance measures", domain="R-RcmdrPlugin.ROC"))# tab
#performanceoptFrame <- ttklabelframe(optionsParFrame, text=gettext("Performance options", domain="R-RcmdrPlugin.ROC"))# tab
costfpVar <- tclVar(dialog.values$initial.costfp) # tab
costfpEntry <- ttkentry(performanceFrame, width = "25", textvariable = costfpVar)# tab
costfnVar <- tclVar(dialog.values$initial.costfn) # tab
costfnEntry <- ttkentry(performanceFrame, width = "25", textvariable = costfnVar)# tab
calwindowsizeVar <- tclVar(dialog.values$initial.calwindowsize) # tab
calwindowsizeEntry <- ttkentry(performanceFrame, width = "25", textvariable = calwindowsizeVar)# tab
fprstopVar <- tclVar(dialog.values$initial.partialfprstop) # tab
fprstopEntry <- ttkentry(performanceFrame, width = "25", textvariable = fprstopVar)# tab
checkBoxes(window = optionsParFrame, frame = "optionsFrame",# tab
boxes = c("printroc", "colorize", "add", "printcutoffs"), initialValues = c(
dialog.values$initial.printroc, dialog.values$initial.colorize, dialog.values$initial.add, dialog.values$initial.printcutoffs),labels = gettextRcmdr(c(
"Print performance object", "Colorize according to cutoff", "Add curve to existing plot","Print cutoffs")), title = gettext("Plot Options", domain="R-RcmdrPlugin.ROC"), ttk=TRUE)
ymeasureBox <- variableListBox(performanceFrame, performancelistlong, title=gettext("Performance measure (y) (pick one)", domain="R-RcmdrPlugin.ROC"),# tab
initialSelection=varPosListn(performancelistlong, dialog.values$initial.ymeasure))
xmeasureBox <- variableListBox(performanceFrame, performancelistlong, title=gettext("Performance measure (x) (pick one or none)", domain="R-RcmdrPlugin.ROC"),
initialSelection=varPosListn(performancelistlong, dialog.values$initial.xmeasure))
cutoffsVar <- tclVar(dialog.values$initial.cutoffs) # tab
cutoffsEntry <- ttkentry(optionsFrame, width = "25", textvariable = cutoffsVar)# tab
cutoffsScroll <- ttkscrollbar(optionsFrame, orient = "horizontal",
command = function(...) tkxview(cutoffsEntry, ...))
tkconfigure(cutoffsEntry, xscrollcommand = function(...) tkset(cutoffsScroll,
...))
tkbind(cutoffsEntry, "<FocusIn>", function() tkselection.clear(cutoffsEntry))
tkgrid(labelRcmdr(optionsFrame, text = gettext("Print cutoffs at", domain="R-RcmdrPlugin.ROC")), cutoffsEntry, sticky = "ew", padx=6)
tkgrid(labelRcmdr(optionsFrame, text =""), cutoffsScroll, sticky = "ew", padx=6)
xlabVar <- tclVar(dialog.values$initial.xlab) # tab
ylabVar <- tclVar(dialog.values$initial.ylab)
mainVar <- tclVar(dialog.values$initial.main)
xlabEntry <- ttkentry(parFrame, width = "25", textvariable = xlabVar)
xlabScroll <- ttkscrollbar(parFrame, orient = "horizontal",
command = function(...) tkxview(xlabEntry, ...))
tkconfigure(xlabEntry, xscrollcommand = function(...) tkset(xlabScroll,
...))
tkbind(xlabEntry, "<FocusIn>", function() tkselection.clear(xlabEntry))
tkgrid(labelRcmdr(parFrame, text = gettextRcmdr("x-axis label")), xlabEntry, sticky = "ew", padx=6)
tkgrid(labelRcmdr(parFrame, text =""), xlabScroll, sticky = "ew", padx=6)
ylabEntry <- ttkentry(parFrame, width = "25", textvariable = ylabVar)
ylabScroll <- ttkscrollbar(parFrame, orient = "horizontal",
command = function(...) tkxview(ylabEntry, ...))
tkconfigure(ylabEntry, xscrollcommand = function(...) tkset(ylabScroll,
...))
tkgrid(labelRcmdr(parFrame, text = gettextRcmdr("y-axis label")), ylabEntry, sticky = "ew", padx=6)
tkgrid(labelRcmdr(parFrame, text=""), ylabScroll, sticky = "ew", padx=6)
mainEntry <- ttkentry(parFrame, width = "25", textvariable = mainVar)
mainScroll <- ttkscrollbar(parFrame, orient = "horizontal",
command = function(...) tkxview(mainEntry, ...))
tkconfigure(mainEntry, xscrollcommand = function(...) tkset(mainScroll,
...))
tkgrid(labelRcmdr(parFrame, text = gettextRcmdr("Graph title")), mainEntry, sticky = "ew", padx=6)
tkgrid(labelRcmdr(parFrame, text=""), mainScroll, sticky = "ew", padx=6)
onOK <- function(){
tab <- if (as.character(tkselect(notebook)) == dataTab$ID) 0 else 1 # tab
#Daniel
prediction <- getSelection(predictionBox)
label <- getSelection(labelBox)
ymeasure <- getSelection(ymeasureBox)
xmeasure <- getSelection(xmeasureBox)
colorize <- as.character("1" == tclvalue(colorizeVariable))
add <- as.character("1" == tclvalue(addVariable))
printroc <- as.character("1" == tclvalue(printrocVariable))
costfp = as.numeric(as.character(tclvalue(costfpVar)))
costfn = as.numeric(as.character(tclvalue(costfnVar)))
calwindowsize = as.numeric(as.character(tclvalue(calwindowsizeVar)))
fprstop = as.numeric(as.character(tclvalue(fprstopVar)))
printcutoffsat <- if ("0" == tclvalue(printcutoffsVariable))
""
else paste(", print.cutoffs.at=", tclvalue(cutoffsVar), sep = "")
xlab <- trim.blanks(tclvalue(xlabVar))
xlab <- if (xlab == gettextRcmdr("<auto>"))
""
else paste(", xlab=\"", xlab, "\"", sep = "")
ylab <- trim.blanks(tclvalue(ylabVar))
ylab <- if (ylab == gettextRcmdr("<auto>"))
""
else paste(", ylab=\"", ylab, "\"", sep = "")
main <- trim.blanks(tclvalue(mainVar))
main <- if (main == gettextRcmdr("<auto>"))
""
else paste(", main=\"", main, "\"", sep = "")
putDialog ("ROCR", list(initial.prediction = prediction, initial.label = label, initial.ymeasure = ymeasure, initial.xmeasure = xmeasure,
initial.colorize = tclvalue(colorizeVariable), initial.add = tclvalue(addVariable),
initial.printcutoffs = tclvalue(printcutoffsVariable), initial.cutoffs = tclvalue(cutoffsVar),
initial.printroc = tclvalue(printrocVariable),
initial.costfp = as.numeric(as.character(tclvalue(costfpVar))),
initial.costfn = as.numeric(as.character(tclvalue(costfnVar))),
initial.calwindowsize = as.numeric(as.character(tclvalue(calwindowsizeVar))),
initial.partialfprstop = as.numeric(as.character(tclvalue(fprstopVar))),
initial.xlab=tclvalue(xlabVar), initial.ylab=tclvalue(ylabVar),
initial.main=tclvalue(mainVar),
initial.tab=tab)) # tab
closeDialog()
if (0 == length(prediction)) {
errorCondition(recall=fncvROCR, message=gettext("You must select a prediction variable.", domain="R-RcmdrPlugin.ROC"))
return()
}
if (0 == length(label)) {
errorCondition(recall=fncvROCR, message=gettext("No labels variables selected.", domain="R-RcmdrPlugin.ROC"))
return()
}
if (0 == length(ymeasure)) {
errorCondition(recall=fncvROCR, message=gettext("You must select a performance measure (y) variable.", domain="R-RcmdrPlugin.ROC"))
return()
}
if (0 != length(xmeasure)) {
if (ymeasure == xmeasure) {
errorCondition(recall=fncvROCR, message=gettext("The performance measures, x and y should be different.", domain="R-RcmdrPlugin.ROC"))
return()
}
}
.activeDataSet <- ActiveDataSet()
#Daniel
command <- paste("pred <- prediction(", .activeDataSet, "$", prediction, ", ",
.activeDataSet, "$", label, ")", sep = "")
doItAndPrint(command)
ymeasure <- performancelist[which(performancelistlong == ymeasure)]
if (ymeasure == "auc") {
.partialfprstop <- paste(", fpr.stop=", fprstop, sep = "")
} else {
.partialfprstop <- ""
}
if (ymeasure == "cal") {
.calwindowsize <- paste(", window.size=", calwindowsize, sep = "")
} else {
.calwindowsize <- ""
}
if (ymeasure == "cost") {
.cost <- paste(", cost.fp=", costfp, ", cost.fn=", costfn, sep = "")
} else {
.cost <- ""
}
if (0 == length(xmeasure)) {
command <- paste("perf <- performance(pred, '", ymeasure, "'", .partialfprstop, .calwindowsize, .cost, ")", sep = "")
doItAndPrint(command)
} else {
command <- paste("perf <- performance(pred, '", ymeasure, "', '",
performancelist[which(performancelistlong == xmeasure)], "'", .partialfprstop, .calwindowsize, .cost, ")", sep = "")
doItAndPrint(command)
}
if (printroc == "TRUE") {
command <- paste("perf", sep = "")
doItAndPrint(command)
}
command <- paste("plot(perf, colorize=", colorize, ", add=", add, printcutoffsat, xlab, ylab, main, ")", sep = "")
doItAndPrint(command)
command <- paste("remove(perf)", sep = "")
doItAndPrint(command)
command <- paste("remove(pred)", sep = "")
doItAndPrint(command)
tkfocus(CommanderWindow())
}
OKCancelHelp(helpSubject="performance", reset = "fncvROCR", apply="fncvROCR")
tkgrid(getFrame(predictionBox), getFrame(labelBox), sticky = "nw", padx=6, pady=c(6, 0))
tkgrid(getFrame(ymeasureBox), getFrame(xmeasureBox), sticky="nw", padx=6, pady=c(6, 0))
tkgrid(performanceFrame, sticky = "we", padx=6, pady=c(6, 6))
tkgrid(labelRcmdr(performanceFrame, text = gettext("Partial AUC upt to fpr of", domain="R-RcmdrPlugin.ROC")), fprstopEntry, sticky = "ew", padx=6, pady=c(6, 0))
tkgrid(labelRcmdr(performanceFrame, text = gettext("Calibration error window size", domain="R-RcmdrPlugin.ROC")), calwindowsizeEntry, sticky = "ew", padx=6, pady=c(0, 0))
tkgrid(labelRcmdr(performanceFrame, text = gettext("Cost fp adjustment", domain="R-RcmdrPlugin.ROC")), costfpEntry, sticky = "ew", padx=6, pady=c(0, 0))
tkgrid(labelRcmdr(performanceFrame, text = gettext("Cost fn adjustment", domain="R-RcmdrPlugin.ROC")), costfnEntry, sticky = "ew", padx=6, pady=c(0, 6))
#tkgrid(performanceoptFrame, sticky = "we", padx=6, pady=c(6, 6))
#tkgrid(getFrame(performanceFrame), getFrame(performanceoptFrame), sticky="nw", padx=6, pady=c(6, 6))
tkgrid(optionsParFrame, sticky = "we", padx=6, pady=c(6, 0))
tkgrid(optionsFrame, parFrame, sticky = "nswe", padx=6, pady=6)
tkgrid(ttklabel(dataTab, text=""))
tkgrid(ttklabel(dataTab, text=""))
tkgrid(labelRcmdr(top, text = " "), padx=6)
dialogSuffix(use.tabs=TRUE, grid.buttons=TRUE)
}
#=========================================================================================================================================
| /RcmdrPlugin.ROC/R/vROCR.R | no_license | ingted/R-Examples | R | false | false | 13,803 | r |
#=========================================================================================================================================
fncvROCR <- function(){
varPosListn <- function(vars, var){
if (is.null(var)) return(NULL)
if (any(!var %in% vars)) NULL
else apply(outer(var, vars, "=="), 1, which) - 1
}
#require(ROCR)
#Daniel
performancelist <- c("acc", "err", "fpr", "fall", "tpr", "rec", "sens", "fnr", "miss", "tnr", "spec", "ppv",
"prec", "npv", "pcfall", "pcmiss", "rpp", "rnp", "phi", "mat", "mi", "chisq", "odds",
"lift", "f", "rch", "auc", "prbe", "cal", "mxe", "rmse", "sar", "ecost", "cost")
performancelistlong <- c("Accuracy", "Error rate", "False positive rate", "Fallout (fpr)", "True positive rate", "Recall (tpr)", "Sensitivity", "False negative rate", "Miss (fnr)", "True negative rate", "Specificity", "Positive predictive value",
"Precision (ppv)", "Negative predictive value", "Prediction-conditioned fallout", "Prediction-conditioned miss", "Rate of positive predictions", "Rate of negative predictions", "Phi correlation coefficient", "Mattheus correlation coefficient (phi)", "Mutual information", "Chi square test statistic", "Odds ratio",
"Lift value", "Precision-recall F measure", "ROC convex hull", "AUC", "Precision-recall break-even point", "Callibration error", "Mean cross-entropy", "Root-mean-squared error", "Sar", "Expected cost", "Cost")
defaults <- list(initial.prediction = NULL, initial.label = NULL, initial.ymeasure = performancelistlong[5], initial.xmeasure = performancelistlong[3],
initial.colorize = 0, initial.add = 0,
initial.printcutoffs = 0, initial.cutoffs = "seq(0,1,by=0.1)",
initial.printroc = 0,
initial.costfp = 1, initial.costfn = 1,
initial.calwindowsize = 100,
initial.partialfprstop = 1,
initial.xlab=gettextRcmdr("<auto>"), initial.ylab=gettextRcmdr("<auto>"),
initial.main=gettextRcmdr("<auto>"),
initial.tab=0) # tab
dialog.values <- getDialog("ROCR", defaults)
initializeDialog(title=gettext("Plot ROC curve", domain="R-RcmdrPlugin.ROC"), use.tabs=TRUE) # tab
#Daniel
.factors <- Factors()
.numeric <- Numeric()
predictionBox <- variableListBox(dataTab, .numeric, title=gettext("Predictions variable (pick one)", domain="R-RcmdrPlugin.ROC"),# tab
initialSelection=varPosn(dialog.values$initial.prediction, "numeric"))
labelBox <- variableListBox(dataTab, .numeric, title=gettext("Labels variable (pick one)", domain="R-RcmdrPlugin.ROC"),
initialSelection=varPosn(dialog.values$initial.label, "numeric"))
optionsParFrame <- tkframe(optionsTab)# tab
parFrame <- ttklabelframe(optionsParFrame, text=gettext("Plot Labels and Points", domain="R-RcmdrPlugin.ROC"))# tab
performanceFrame <- ttklabelframe(optionsParFrame, text=gettext("Performance measures", domain="R-RcmdrPlugin.ROC"))# tab
#performanceoptFrame <- ttklabelframe(optionsParFrame, text=gettext("Performance options", domain="R-RcmdrPlugin.ROC"))# tab
costfpVar <- tclVar(dialog.values$initial.costfp) # tab
costfpEntry <- ttkentry(performanceFrame, width = "25", textvariable = costfpVar)# tab
costfnVar <- tclVar(dialog.values$initial.costfn) # tab
costfnEntry <- ttkentry(performanceFrame, width = "25", textvariable = costfnVar)# tab
calwindowsizeVar <- tclVar(dialog.values$initial.calwindowsize) # tab
calwindowsizeEntry <- ttkentry(performanceFrame, width = "25", textvariable = calwindowsizeVar)# tab
fprstopVar <- tclVar(dialog.values$initial.partialfprstop) # tab
fprstopEntry <- ttkentry(performanceFrame, width = "25", textvariable = fprstopVar)# tab
checkBoxes(window = optionsParFrame, frame = "optionsFrame",# tab
boxes = c("printroc", "colorize", "add", "printcutoffs"), initialValues = c(
dialog.values$initial.printroc, dialog.values$initial.colorize, dialog.values$initial.add, dialog.values$initial.printcutoffs),labels = gettextRcmdr(c(
"Print performance object", "Colorize according to cutoff", "Add curve to existing plot","Print cutoffs")), title = gettext("Plot Options", domain="R-RcmdrPlugin.ROC"), ttk=TRUE)
ymeasureBox <- variableListBox(performanceFrame, performancelistlong, title=gettext("Performance measure (y) (pick one)", domain="R-RcmdrPlugin.ROC"),# tab
initialSelection=varPosListn(performancelistlong, dialog.values$initial.ymeasure))
xmeasureBox <- variableListBox(performanceFrame, performancelistlong, title=gettext("Performance measure (x) (pick one or none)", domain="R-RcmdrPlugin.ROC"),
initialSelection=varPosListn(performancelistlong, dialog.values$initial.xmeasure))
cutoffsVar <- tclVar(dialog.values$initial.cutoffs) # tab
cutoffsEntry <- ttkentry(optionsFrame, width = "25", textvariable = cutoffsVar)# tab
cutoffsScroll <- ttkscrollbar(optionsFrame, orient = "horizontal",
command = function(...) tkxview(cutoffsEntry, ...))
tkconfigure(cutoffsEntry, xscrollcommand = function(...) tkset(cutoffsScroll,
...))
tkbind(cutoffsEntry, "<FocusIn>", function() tkselection.clear(cutoffsEntry))
tkgrid(labelRcmdr(optionsFrame, text = gettext("Print cutoffs at", domain="R-RcmdrPlugin.ROC")), cutoffsEntry, sticky = "ew", padx=6)
tkgrid(labelRcmdr(optionsFrame, text =""), cutoffsScroll, sticky = "ew", padx=6)
xlabVar <- tclVar(dialog.values$initial.xlab) # tab
ylabVar <- tclVar(dialog.values$initial.ylab)
mainVar <- tclVar(dialog.values$initial.main)
xlabEntry <- ttkentry(parFrame, width = "25", textvariable = xlabVar)
xlabScroll <- ttkscrollbar(parFrame, orient = "horizontal",
command = function(...) tkxview(xlabEntry, ...))
tkconfigure(xlabEntry, xscrollcommand = function(...) tkset(xlabScroll,
...))
tkbind(xlabEntry, "<FocusIn>", function() tkselection.clear(xlabEntry))
tkgrid(labelRcmdr(parFrame, text = gettextRcmdr("x-axis label")), xlabEntry, sticky = "ew", padx=6)
tkgrid(labelRcmdr(parFrame, text =""), xlabScroll, sticky = "ew", padx=6)
ylabEntry <- ttkentry(parFrame, width = "25", textvariable = ylabVar)
ylabScroll <- ttkscrollbar(parFrame, orient = "horizontal",
command = function(...) tkxview(ylabEntry, ...))
tkconfigure(ylabEntry, xscrollcommand = function(...) tkset(ylabScroll,
...))
tkgrid(labelRcmdr(parFrame, text = gettextRcmdr("y-axis label")), ylabEntry, sticky = "ew", padx=6)
tkgrid(labelRcmdr(parFrame, text=""), ylabScroll, sticky = "ew", padx=6)
mainEntry <- ttkentry(parFrame, width = "25", textvariable = mainVar)
mainScroll <- ttkscrollbar(parFrame, orient = "horizontal",
command = function(...) tkxview(mainEntry, ...))
tkconfigure(mainEntry, xscrollcommand = function(...) tkset(mainScroll,
...))
tkgrid(labelRcmdr(parFrame, text = gettextRcmdr("Graph title")), mainEntry, sticky = "ew", padx=6)
tkgrid(labelRcmdr(parFrame, text=""), mainScroll, sticky = "ew", padx=6)
onOK <- function(){
tab <- if (as.character(tkselect(notebook)) == dataTab$ID) 0 else 1 # tab
#Daniel
prediction <- getSelection(predictionBox)
label <- getSelection(labelBox)
ymeasure <- getSelection(ymeasureBox)
xmeasure <- getSelection(xmeasureBox)
colorize <- as.character("1" == tclvalue(colorizeVariable))
add <- as.character("1" == tclvalue(addVariable))
printroc <- as.character("1" == tclvalue(printrocVariable))
costfp = as.numeric(as.character(tclvalue(costfpVar)))
costfn = as.numeric(as.character(tclvalue(costfnVar)))
calwindowsize = as.numeric(as.character(tclvalue(calwindowsizeVar)))
fprstop = as.numeric(as.character(tclvalue(fprstopVar)))
printcutoffsat <- if ("0" == tclvalue(printcutoffsVariable))
""
else paste(", print.cutoffs.at=", tclvalue(cutoffsVar), sep = "")
xlab <- trim.blanks(tclvalue(xlabVar))
xlab <- if (xlab == gettextRcmdr("<auto>"))
""
else paste(", xlab=\"", xlab, "\"", sep = "")
ylab <- trim.blanks(tclvalue(ylabVar))
ylab <- if (ylab == gettextRcmdr("<auto>"))
""
else paste(", ylab=\"", ylab, "\"", sep = "")
main <- trim.blanks(tclvalue(mainVar))
main <- if (main == gettextRcmdr("<auto>"))
""
else paste(", main=\"", main, "\"", sep = "")
putDialog ("ROCR", list(initial.prediction = prediction, initial.label = label, initial.ymeasure = ymeasure, initial.xmeasure = xmeasure,
initial.colorize = tclvalue(colorizeVariable), initial.add = tclvalue(addVariable),
initial.printcutoffs = tclvalue(printcutoffsVariable), initial.cutoffs = tclvalue(cutoffsVar),
initial.printroc = tclvalue(printrocVariable),
initial.costfp = as.numeric(as.character(tclvalue(costfpVar))),
initial.costfn = as.numeric(as.character(tclvalue(costfnVar))),
initial.calwindowsize = as.numeric(as.character(tclvalue(calwindowsizeVar))),
initial.partialfprstop = as.numeric(as.character(tclvalue(fprstopVar))),
initial.xlab=tclvalue(xlabVar), initial.ylab=tclvalue(ylabVar),
initial.main=tclvalue(mainVar),
initial.tab=tab)) # tab
closeDialog()
if (0 == length(prediction)) {
errorCondition(recall=fncvROCR, message=gettext("You must select a prediction variable.", domain="R-RcmdrPlugin.ROC"))
return()
}
if (0 == length(label)) {
errorCondition(recall=fncvROCR, message=gettext("No labels variables selected.", domain="R-RcmdrPlugin.ROC"))
return()
}
if (0 == length(ymeasure)) {
errorCondition(recall=fncvROCR, message=gettext("You must select a performance measure (y) variable.", domain="R-RcmdrPlugin.ROC"))
return()
}
if (0 != length(xmeasure)) {
if (ymeasure == xmeasure) {
errorCondition(recall=fncvROCR, message=gettext("The performance measures, x and y should be different.", domain="R-RcmdrPlugin.ROC"))
return()
}
}
.activeDataSet <- ActiveDataSet()
#Daniel
command <- paste("pred <- prediction(", .activeDataSet, "$", prediction, ", ",
.activeDataSet, "$", label, ")", sep = "")
doItAndPrint(command)
ymeasure <- performancelist[which(performancelistlong == ymeasure)]
if (ymeasure == "auc") {
.partialfprstop <- paste(", fpr.stop=", fprstop, sep = "")
} else {
.partialfprstop <- ""
}
if (ymeasure == "cal") {
.calwindowsize <- paste(", window.size=", calwindowsize, sep = "")
} else {
.calwindowsize <- ""
}
if (ymeasure == "cost") {
.cost <- paste(", cost.fp=", costfp, ", cost.fn=", costfn, sep = "")
} else {
.cost <- ""
}
if (0 == length(xmeasure)) {
command <- paste("perf <- performance(pred, '", ymeasure, "'", .partialfprstop, .calwindowsize, .cost, ")", sep = "")
doItAndPrint(command)
} else {
command <- paste("perf <- performance(pred, '", ymeasure, "', '",
performancelist[which(performancelistlong == xmeasure)], "'", .partialfprstop, .calwindowsize, .cost, ")", sep = "")
doItAndPrint(command)
}
if (printroc == "TRUE") {
command <- paste("perf", sep = "")
doItAndPrint(command)
}
command <- paste("plot(perf, colorize=", colorize, ", add=", add, printcutoffsat, xlab, ylab, main, ")", sep = "")
doItAndPrint(command)
command <- paste("remove(perf)", sep = "")
doItAndPrint(command)
command <- paste("remove(pred)", sep = "")
doItAndPrint(command)
tkfocus(CommanderWindow())
}
OKCancelHelp(helpSubject="performance", reset = "fncvROCR", apply="fncvROCR")
tkgrid(getFrame(predictionBox), getFrame(labelBox), sticky = "nw", padx=6, pady=c(6, 0))
tkgrid(getFrame(ymeasureBox), getFrame(xmeasureBox), sticky="nw", padx=6, pady=c(6, 0))
tkgrid(performanceFrame, sticky = "we", padx=6, pady=c(6, 6))
tkgrid(labelRcmdr(performanceFrame, text = gettext("Partial AUC upt to fpr of", domain="R-RcmdrPlugin.ROC")), fprstopEntry, sticky = "ew", padx=6, pady=c(6, 0))
tkgrid(labelRcmdr(performanceFrame, text = gettext("Calibration error window size", domain="R-RcmdrPlugin.ROC")), calwindowsizeEntry, sticky = "ew", padx=6, pady=c(0, 0))
tkgrid(labelRcmdr(performanceFrame, text = gettext("Cost fp adjustment", domain="R-RcmdrPlugin.ROC")), costfpEntry, sticky = "ew", padx=6, pady=c(0, 0))
tkgrid(labelRcmdr(performanceFrame, text = gettext("Cost fn adjustment", domain="R-RcmdrPlugin.ROC")), costfnEntry, sticky = "ew", padx=6, pady=c(0, 6))
#tkgrid(performanceoptFrame, sticky = "we", padx=6, pady=c(6, 6))
#tkgrid(getFrame(performanceFrame), getFrame(performanceoptFrame), sticky="nw", padx=6, pady=c(6, 6))
tkgrid(optionsParFrame, sticky = "we", padx=6, pady=c(6, 0))
tkgrid(optionsFrame, parFrame, sticky = "nswe", padx=6, pady=6)
tkgrid(ttklabel(dataTab, text=""))
tkgrid(ttklabel(dataTab, text=""))
tkgrid(labelRcmdr(top, text = " "), padx=6)
dialogSuffix(use.tabs=TRUE, grid.buttons=TRUE)
}
#=========================================================================================================================================
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/layers.embeddings.R
\name{Embedding}
\alias{Embedding}
\title{Embedding layer}
\usage{
Embedding(input_dim, output_dim, embeddings_initializer = "uniform",
embeddings_regularizer = NULL, embeddings_constraint = NULL,
mask_zero = FALSE, input_length = NULL, input_shape = NULL)
}
\arguments{
\item{input_dim}{int > 0. Size of the vocabulary, ie. 1 + maximum integer
index occurring in the input data.}
\item{output_dim}{int >= 0. Dimension of the dense embedding.}
\item{embeddings_initializer}{Initializer for the embeddings matrix}
\item{embeddings_regularizer}{Regularizer function applied to the embeddings matrix}
\item{embeddings_constraint}{Constraint function applied to the embeddings matrix}
\item{mask_zero}{Whether or not the input value 0 is a special "padding"
value that should be masked out.}
\item{input_length}{Length of input sequences, when it is constant.}
\item{input_shape}{only need when first layer of a model; sets the input shape
of the data}
}
\description{
Turns positive integers (indexes) into dense vectors of fixed size.
}
\examples{
if(keras_available()) {
X_train <- matrix(sample(0:19, 100 * 100, TRUE), ncol = 100)
Y_train <- rnorm(100)
mod <- Sequential()
mod$add(Embedding(input_dim = 20, output_dim = 10,
input_length = 100))
mod$add(Dropout(0.5))
mod$add(GRU(16))
mod$add(Dense(1))
mod$add(Activation("sigmoid"))
keras_compile(mod, loss = "mse", optimizer = RMSprop())
keras_fit(mod, X_train, Y_train, epochs = 3, verbose = 0)
}
}
\references{
Chollet, Francois. 2015. \href{https://keras.io/}{Keras: Deep Learning library for Theano and TensorFlow}.
}
\seealso{
Other layers: \code{\link{Activation}},
\code{\link{ActivityRegularization}},
\code{\link{AdvancedActivation}},
\code{\link{BatchNormalization}}, \code{\link{Conv}},
\code{\link{Dense}}, \code{\link{Dropout}},
\code{\link{Flatten}}, \code{\link{GaussianNoise}},
\code{\link{LayerWrapper}},
\code{\link{LocallyConnected}}, \code{\link{Masking}},
\code{\link{MaxPooling}}, \code{\link{Permute}},
\code{\link{RNN}}, \code{\link{RepeatVector}},
\code{\link{Reshape}}, \code{\link{Sequential}}
}
\author{
Taylor B. Arnold, \email{taylor.arnold@acm.org}
}
| /man/Embedding.Rd | no_license | Yannael/kerasR | R | false | true | 2,311 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/layers.embeddings.R
\name{Embedding}
\alias{Embedding}
\title{Embedding layer}
\usage{
Embedding(input_dim, output_dim, embeddings_initializer = "uniform",
embeddings_regularizer = NULL, embeddings_constraint = NULL,
mask_zero = FALSE, input_length = NULL, input_shape = NULL)
}
\arguments{
\item{input_dim}{int > 0. Size of the vocabulary, ie. 1 + maximum integer
index occurring in the input data.}
\item{output_dim}{int >= 0. Dimension of the dense embedding.}
\item{embeddings_initializer}{Initializer for the embeddings matrix}
\item{embeddings_regularizer}{Regularizer function applied to the embeddings matrix}
\item{embeddings_constraint}{Constraint function applied to the embeddings matrix}
\item{mask_zero}{Whether or not the input value 0 is a special "padding"
value that should be masked out.}
\item{input_length}{Length of input sequences, when it is constant.}
\item{input_shape}{only need when first layer of a model; sets the input shape
of the data}
}
\description{
Turns positive integers (indexes) into dense vectors of fixed size.
}
\examples{
if(keras_available()) {
X_train <- matrix(sample(0:19, 100 * 100, TRUE), ncol = 100)
Y_train <- rnorm(100)
mod <- Sequential()
mod$add(Embedding(input_dim = 20, output_dim = 10,
input_length = 100))
mod$add(Dropout(0.5))
mod$add(GRU(16))
mod$add(Dense(1))
mod$add(Activation("sigmoid"))
keras_compile(mod, loss = "mse", optimizer = RMSprop())
keras_fit(mod, X_train, Y_train, epochs = 3, verbose = 0)
}
}
\references{
Chollet, Francois. 2015. \href{https://keras.io/}{Keras: Deep Learning library for Theano and TensorFlow}.
}
\seealso{
Other layers: \code{\link{Activation}},
\code{\link{ActivityRegularization}},
\code{\link{AdvancedActivation}},
\code{\link{BatchNormalization}}, \code{\link{Conv}},
\code{\link{Dense}}, \code{\link{Dropout}},
\code{\link{Flatten}}, \code{\link{GaussianNoise}},
\code{\link{LayerWrapper}},
\code{\link{LocallyConnected}}, \code{\link{Masking}},
\code{\link{MaxPooling}}, \code{\link{Permute}},
\code{\link{RNN}}, \code{\link{RepeatVector}},
\code{\link{Reshape}}, \code{\link{Sequential}}
}
\author{
Taylor B. Arnold, \email{taylor.arnold@acm.org}
}
|
testlist <- list(doy = -1.72131968218895e+83, latitude = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.8003352013777e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161))
result <- do.call(meteor:::ET0_ThornthwaiteWilmott,testlist)
str(result) | /meteor/inst/testfiles/ET0_ThornthwaiteWilmott/AFL_ET0_ThornthwaiteWilmott/ET0_ThornthwaiteWilmott_valgrind_files/1615827375-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 733 | r | testlist <- list(doy = -1.72131968218895e+83, latitude = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.8003352013777e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161))
result <- do.call(meteor:::ET0_ThornthwaiteWilmott,testlist)
str(result) |
#' @title Download analysis records as a CASTEML file
#'
#' @description Download analysis records as a CASTEML file. This
#' function returns path to the file. The file is stored in a
#' temporary directory unless specified. Note with the same
#' arguments, this function downloads only once per a R session.
#'
#' @param stone Unique indentification number of stones in Medusa.
#' Really, those will pass to `casteml download' and thus you can
#' include options.
#' @param file File path to save downloaded CASTEML file
#' @param force Force download CASTEML file
#' @return Path to CASTEML file that was downloaded in temporary
#' directory.
#' @export
#' @seealso \code{casteml download},
#' \url{https://github.com/misasa/casteml},
#' \code{\link{cbk.convert.casteml}}
#' @examples
#' stone <- c("20080616170000.hk","20080616170056.hk","20080616170054.hk")
#' pmlfile <- cbk.download.casteml(stone)
#'
#' pmlfile <- cbk.download.casteml("20081202172326.hkitagawa")
cbk.download.casteml <- function(stone,file=NULL,force=FALSE) {
cmd <- paste(c("casteml download",stone),collapse=" ")
## file <- tempfile(pattern = paste(stone[1],"@",sep=""), fileext=".pml")
## system(paste("casteml download",stone[ii],">",file))
if(is.null(file)){
## file <- tempfile(fileext=".pml")
file <- file.path(tempdir(),paste0(digest::digest(cmd,algo='md5'),".pml"))
}
## Download file only when it does not exist
if (force || !file.exists(file)) {
cat(system(cmd, intern = TRUE),file=file,sep="\n")
}
return(file)
}
| /R/cbk.download.casteml.R | no_license | MasaYamanaka/chelyabinsk | R | false | false | 1,553 | r | #' @title Download analysis records as a CASTEML file
#'
#' @description Download analysis records as a CASTEML file. This
#' function returns path to the file. The file is stored in a
#' temporary directory unless specified. Note with the same
#' arguments, this function downloads only once per a R session.
#'
#' @param stone Unique indentification number of stones in Medusa.
#' Really, those will pass to `casteml download' and thus you can
#' include options.
#' @param file File path to save downloaded CASTEML file
#' @param force Force download CASTEML file
#' @return Path to CASTEML file that was downloaded in temporary
#' directory.
#' @export
#' @seealso \code{casteml download},
#' \url{https://github.com/misasa/casteml},
#' \code{\link{cbk.convert.casteml}}
#' @examples
#' stone <- c("20080616170000.hk","20080616170056.hk","20080616170054.hk")
#' pmlfile <- cbk.download.casteml(stone)
#'
#' pmlfile <- cbk.download.casteml("20081202172326.hkitagawa")
cbk.download.casteml <- function(stone,file=NULL,force=FALSE) {
cmd <- paste(c("casteml download",stone),collapse=" ")
## file <- tempfile(pattern = paste(stone[1],"@",sep=""), fileext=".pml")
## system(paste("casteml download",stone[ii],">",file))
if(is.null(file)){
## file <- tempfile(fileext=".pml")
file <- file.path(tempdir(),paste0(digest::digest(cmd,algo='md5'),".pml"))
}
## Download file only when it does not exist
if (force || !file.exists(file)) {
cat(system(cmd, intern = TRUE),file=file,sep="\n")
}
return(file)
}
|
\name{model2hyperdraw}
\encoding{latin1}
\Rdversion{1.1}
\alias{model2hyperdraw}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Draws a hypergraph representation from a model file}
\description{Convert a model file to a \code{hypergraph} representation}
\usage{model2hyperdraw(modelFile,uptake,minimal,levels,layout)}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{modelFile}{is a file created from \code{createModel} or \code{pruneModel}}
\item{uptake}{is a character vector representing the substrate uptake of in a metabolic process}
\item{minimal}{is a logical value TRUE or FALSE to visualize externals on a graph}
\item{levels}{is a numeric value to determine the levels of thickness of edges}
\item{layout}{is a character string representing the layout engine to be used for visualization for example "dot", "twopi","neato","fdp","sfdp" and "circo"}
}
\value{
\item{graphNEL object}{returns an \code{graphNEL} object representation. }
}
\author{Anand K. Gavai <anand.gavai@bioinformatics.nl>}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\link[hyperdraw:graphBPH]{graphBPH}
}
\examples{
\dontrun{
data("Glycolysis")
uptake<-"glcD"
minimal<-"TRUE"
levels<-7
layout<-"neato"
gnel<-model2hyperdraw(Glycolysis,"glcD",TRUE,levels,layout)
gnel
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{hypergraph}
\keyword{hyperdraw}
| /man/model2hyperdraw.Rd | no_license | cran/BiGGR | R | false | false | 1,558 | rd | \name{model2hyperdraw}
\encoding{latin1}
\Rdversion{1.1}
\alias{model2hyperdraw}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Draws a hypergraph representation from a model file}
\description{Convert a model file to a \code{hypergraph} representation}
\usage{model2hyperdraw(modelFile,uptake,minimal,levels,layout)}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{modelFile}{is a file created from \code{createModel} or \code{pruneModel}}
\item{uptake}{is a character vector representing the substrate uptake of in a metabolic process}
\item{minimal}{is a logical value TRUE or FALSE to visualize externals on a graph}
\item{levels}{is a numeric value to determine the levels of thickness of edges}
\item{layout}{is a character string representing the layout engine to be used for visualization for example "dot", "twopi","neato","fdp","sfdp" and "circo"}
}
\value{
\item{graphNEL object}{returns an \code{graphNEL} object representation. }
}
\author{Anand K. Gavai <anand.gavai@bioinformatics.nl>}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\link[hyperdraw:graphBPH]{graphBPH}
}
\examples{
\dontrun{
data("Glycolysis")
uptake<-"glcD"
minimal<-"TRUE"
levels<-7
layout<-"neato"
gnel<-model2hyperdraw(Glycolysis,"glcD",TRUE,levels,layout)
gnel
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{hypergraph}
\keyword{hyperdraw}
|
library(tidyverse)
library(vroom)
#load data
my_data = vroom::vroom("https://raw.githubusercontent.com/BarbaraDiazE/CABANA_CHEMOINFORMATICS/master/Day_3/UnsupervisedLearning_Clustering/K_Means/Data_cluster.csv")
#make feature set for quant features
features = c('HBA',
'HBD',
'RB',
'LogP',
'TPSA',
'MW',
'Heavy Atom',
'Ring Count',
'Fraction CSP3')
#make a hierarchical clustering object for heatmap reordering
my_clust_order <-
my_data[features] %>%
cor() %>%
as.data.frame() %>%
rownames_to_column(var = "feature_1") %>%
dist() %>%
hclust()
my_clust_order <- my_clust_order$order
#reorder features
features <- features[my_clust_order]
#make heatmap
my_data[features] %>%
cor() %>% #calculate pearson cor
as.data.frame() %>%
rownames_to_column(var = "feature_1") %>%
tidyr::pivot_longer(cols = -feature_1, #pivot to long format
names_to = "feature_2",
values_to = "value") %>% #consider features as factors, for plotting
mutate(feature_1 = as_factor(feature_1),
feature_2 = as_factor(feature_2)) %>%
ggplot(aes(x = feature_1,
y = feature_2,
fill = value)) +
geom_tile() + #heatmap style
scale_fill_distiller(palette = "Spectral") #define pallete
#kmeans clustering
my_clusters <-
my_data[features] %>%
kmeans(centers = 3)
#plot, colored by cluster
my_data[features] %>%
mutate(cluster = my_clusters$cluster) %>%
ggplot(aes(x = TPSA,
y = MW,
color = as_factor(cluster))) +
geom_point()
| /kmeans_chemo.R | permissive | guillermodeandajauregui/cabana_chemoinformatics_2019 | R | false | false | 1,663 | r | library(tidyverse)
library(vroom)
#load data
my_data = vroom::vroom("https://raw.githubusercontent.com/BarbaraDiazE/CABANA_CHEMOINFORMATICS/master/Day_3/UnsupervisedLearning_Clustering/K_Means/Data_cluster.csv")
#make feature set for quant features
features = c('HBA',
'HBD',
'RB',
'LogP',
'TPSA',
'MW',
'Heavy Atom',
'Ring Count',
'Fraction CSP3')
#make a hierarchical clustering object for heatmap reordering
my_clust_order <-
my_data[features] %>%
cor() %>%
as.data.frame() %>%
rownames_to_column(var = "feature_1") %>%
dist() %>%
hclust()
my_clust_order <- my_clust_order$order
#reorder features
features <- features[my_clust_order]
#make heatmap
my_data[features] %>%
cor() %>% #calculate pearson cor
as.data.frame() %>%
rownames_to_column(var = "feature_1") %>%
tidyr::pivot_longer(cols = -feature_1, #pivot to long format
names_to = "feature_2",
values_to = "value") %>% #consider features as factors, for plotting
mutate(feature_1 = as_factor(feature_1),
feature_2 = as_factor(feature_2)) %>%
ggplot(aes(x = feature_1,
y = feature_2,
fill = value)) +
geom_tile() + #heatmap style
scale_fill_distiller(palette = "Spectral") #define pallete
#kmeans clustering
my_clusters <-
my_data[features] %>%
kmeans(centers = 3)
#plot, colored by cluster
my_data[features] %>%
mutate(cluster = my_clusters$cluster) %>%
ggplot(aes(x = TPSA,
y = MW,
color = as_factor(cluster))) +
geom_point()
|
getwd()
directorio <- setwd("C:/Users/Ma.Fernanda/Desktop/Programaci-n_Actuarial_III/Specdata")
completos <- function(directorio,id=1:332){
a <- vector("numeric")
for (i in id) {
if(i<10){
i = paste("00",i,sep="")
} else if(i>=10 && i<100){
i = paste("0",i,sep="")
} else{
i = paste(i,sep="")
}
leer <- read.csv(paste(i,".csv",sep=""),header = TRUE)
datos <- complete.cases(leer)
reales <- leer[datos,2:3]
numeda <- nrow(reales)
a <- c(a,numeda)
}
dafra <- data.frame(ID = id,NOBS = a)
print(dafra)
}
ler <- read.csv("001.csv")
dat <- complete.cases(ler)
real <- ler[dat, 2:3]
relaci <- cor(real[,1],real[,2])
relaci
completos(directorio,1:10) | /Specdata/completos.R | no_license | Fers16/Programaci-n_Actuarial_III | R | false | false | 773 | r | getwd()
directorio <- setwd("C:/Users/Ma.Fernanda/Desktop/Programaci-n_Actuarial_III/Specdata")
completos <- function(directorio,id=1:332){
a <- vector("numeric")
for (i in id) {
if(i<10){
i = paste("00",i,sep="")
} else if(i>=10 && i<100){
i = paste("0",i,sep="")
} else{
i = paste(i,sep="")
}
leer <- read.csv(paste(i,".csv",sep=""),header = TRUE)
datos <- complete.cases(leer)
reales <- leer[datos,2:3]
numeda <- nrow(reales)
a <- c(a,numeda)
}
dafra <- data.frame(ID = id,NOBS = a)
print(dafra)
}
ler <- read.csv("001.csv")
dat <- complete.cases(ler)
real <- ler[dat, 2:3]
relaci <- cor(real[,1],real[,2])
relaci
completos(directorio,1:10) |
baseball <- read.csv("dati/baseball.csv")
moneyball <- subset(baseball, Year<2002)
str(moneyball)
moneyball$RD <- moneyball$RS - moneyball$RA
plot(moneyball$RD,moneyball$W, xlab = 'Difference between Runs allowed and Runs', ylab = 'Wins')
WinsReg <- lm(W ~ RD, data = moneyball)
summary(WinsReg)
numero_runs <- function(valore){
return(WinsReg$coefficients[1] + (WinsReg$coefficients[2]*valore))
}
numero_wins <- calc_val(135)
necessary_runs = (95.0 - WinsReg$coefficients[1]) / WinsReg$coefficients[2]
rd_ra <- 713-614
equazione_video <- 80.8814 + 0.1058 * rd_ra
RunsReg <- lm(RS ~ OBP + SLG , data = moneyball)
summary(RunsReg)
runs_case1 <- -804.63 + (2737.77*0.311) + (1584.91 * 0.405)
runs_case2 <- -837.38 + (2913.60*0.297) + (1514.29 * 0.370)
teamRank = c(1,2,3,3,4,4,4,4,5,5)
wins2012 = c(94,88,95,88,93,94,98,97,93,94)
wins2013 = c(97,97,92,93,92,96,94,96,92,90)
cor(teamRank,wins2012)
cor(teamRank,wins2013) | /OperazioniComuni/moneyBall.R | no_license | geosconsulting/analyticEdge-MIT | R | false | false | 930 | r | baseball <- read.csv("dati/baseball.csv")
moneyball <- subset(baseball, Year<2002)
str(moneyball)
moneyball$RD <- moneyball$RS - moneyball$RA
plot(moneyball$RD,moneyball$W, xlab = 'Difference between Runs allowed and Runs', ylab = 'Wins')
WinsReg <- lm(W ~ RD, data = moneyball)
summary(WinsReg)
numero_runs <- function(valore){
return(WinsReg$coefficients[1] + (WinsReg$coefficients[2]*valore))
}
numero_wins <- calc_val(135)
necessary_runs = (95.0 - WinsReg$coefficients[1]) / WinsReg$coefficients[2]
rd_ra <- 713-614
equazione_video <- 80.8814 + 0.1058 * rd_ra
RunsReg <- lm(RS ~ OBP + SLG , data = moneyball)
summary(RunsReg)
runs_case1 <- -804.63 + (2737.77*0.311) + (1584.91 * 0.405)
runs_case2 <- -837.38 + (2913.60*0.297) + (1514.29 * 0.370)
teamRank = c(1,2,3,3,4,4,4,4,5,5)
wins2012 = c(94,88,95,88,93,94,98,97,93,94)
wins2013 = c(97,97,92,93,92,96,94,96,92,90)
cor(teamRank,wins2012)
cor(teamRank,wins2013) |
export_results_modal <- function(){
# library(shiny)
# ns <- NS(id)
modalDialog(
style = "background-color: #ecf0f5",
title = "Download Modules",
size = "s",
checkboxGroupInput(
inputId = "modulesToExport",
label = "",
choices = c("Event Table 1" = "eventTable1",
"Event Table 2" = "eventTable2"),
selected = c("eventTable1", "eventTable2")
),
footer = tagList(
actionButton(
"cancelExport",
"Cancel"
),
downloadLink(class = 'btn btn-default',
"exportModules",
"Export",
icon = icon("download"))
)
)
} | /app/functions/export-results-modal.R | no_license | annacnev/shinyModuleStorybook | R | false | false | 678 | r | export_results_modal <- function(){
# library(shiny)
# ns <- NS(id)
modalDialog(
style = "background-color: #ecf0f5",
title = "Download Modules",
size = "s",
checkboxGroupInput(
inputId = "modulesToExport",
label = "",
choices = c("Event Table 1" = "eventTable1",
"Event Table 2" = "eventTable2"),
selected = c("eventTable1", "eventTable2")
),
footer = tagList(
actionButton(
"cancelExport",
"Cancel"
),
downloadLink(class = 'btn btn-default',
"exportModules",
"Export",
icon = icon("download"))
)
)
} |
#' SfnData custom get generics
#'
#' Generics for getting the info in the slots of SfnData
#'
#' @name sfn_get_generics
#' @include SfnData_class.R
NULL
#' @rdname sfn_get_generics
#' @export
setGeneric(
"get_sapf",
function(object, ...) {
standardGeneric("get_sapf")
}
)
#' @rdname sfn_get_generics
#' @export
setGeneric(
"get_env",
function(object, ...) {
standardGeneric("get_env")
}
)
#' @rdname sfn_get_generics
#' @export
setGeneric(
"get_sapf_flags",
function(object, ...) {
standardGeneric("get_sapf_flags")
}
)
#' @rdname sfn_get_generics
#' @export
setGeneric(
"get_env_flags",
function(object, ...) {
standardGeneric("get_env_flags")
}
)
#' @rdname sfn_get_generics
#' @export
setGeneric(
"get_timestamp",
function(object, ...) {
standardGeneric("get_timestamp")
}
)
#' @rdname sfn_get_generics
#' @export
setGeneric(
"get_solar_timestamp",
function(object, ...) {
standardGeneric("get_solar_timestamp")
}
)
#' @rdname sfn_get_generics
#' @export
setGeneric(
"get_si_code",
function(object, ...) {
standardGeneric("get_si_code")
}
)
#' @rdname sfn_get_generics
#' @export
setGeneric(
"get_site_md",
function(object, ...) {
standardGeneric("get_site_md")
}
)
#' @rdname sfn_get_generics
#' @export
setGeneric(
"get_stand_md",
function(object, ...) {
standardGeneric("get_stand_md")
}
)
#' @rdname sfn_get_generics
#' @export
setGeneric(
"get_species_md",
function(object, ...) {
standardGeneric("get_species_md")
}
)
#' @rdname sfn_get_generics
#' @export
setGeneric(
"get_plant_md",
function(object, ...) {
standardGeneric("get_plant_md")
}
)
#' @rdname sfn_get_generics
#' @export
setGeneric(
"get_env_md",
function(object, ...) {
standardGeneric("get_env_md")
}
)
#' Replacement generics
#'
#' Generic functions for replacement functions for SfnData
#'
#' @name sfn_replacement_generics
NULL
#' @rdname sfn_replacement_generics
#' @export
setGeneric(
"get_sapf<-",
function(object, value) {
standardGeneric("get_sapf<-")
}
)
#' @rdname sfn_replacement_generics
#' @export
setGeneric(
"get_env<-",
function(object, value) {
standardGeneric("get_env<-")
}
)
#' @rdname sfn_replacement_generics
#' @export
setGeneric(
"get_sapf_flags<-",
function(object, value) {
standardGeneric("get_sapf_flags<-")
}
)
#' @rdname sfn_replacement_generics
#' @export
setGeneric(
"get_env_flags<-",
function(object, value) {
standardGeneric("get_env_flags<-")
}
)
#' @rdname sfn_replacement_generics
#' @export
setGeneric(
"get_timestamp<-",
function(object, value) {
standardGeneric("get_timestamp<-")
}
)
#' @rdname sfn_replacement_generics
#' @export
setGeneric(
"get_solar_timestamp<-",
function(object, value) {
standardGeneric("get_solar_timestamp<-")
}
)
#' @rdname sfn_replacement_generics
#' @export
setGeneric(
"get_si_code<-",
function(object, value) {
standardGeneric("get_si_code<-")
}
)
#' @rdname sfn_replacement_generics
#' @export
setGeneric(
"get_site_md<-",
function(object, value) {
standardGeneric("get_site_md<-")
}
)
#' @rdname sfn_replacement_generics
#' @export
setGeneric(
"get_stand_md<-",
function(object, value) {
standardGeneric("get_stand_md<-")
}
)
#' @rdname sfn_replacement_generics
#' @export
setGeneric(
"get_species_md<-",
function(object, value) {
standardGeneric("get_species_md<-")
}
)
#' @rdname sfn_replacement_generics
#' @export
setGeneric(
"get_plant_md<-",
function(object, value) {
standardGeneric("get_plant_md<-")
}
)
#' @rdname sfn_replacement_generics
#' @export
setGeneric(
"get_env_md<-",
function(object, value) {
standardGeneric("get_env_md<-")
}
)
| /R/SfnData_generics.R | no_license | sapfluxnet/sapfluxnetQC1 | R | false | false | 3,762 | r | #' SfnData custom get generics
#'
#' Generics for getting the info in the slots of SfnData
#'
#' @name sfn_get_generics
#' @include SfnData_class.R
NULL
#' @rdname sfn_get_generics
#' @export
setGeneric(
"get_sapf",
function(object, ...) {
standardGeneric("get_sapf")
}
)
#' @rdname sfn_get_generics
#' @export
setGeneric(
"get_env",
function(object, ...) {
standardGeneric("get_env")
}
)
#' @rdname sfn_get_generics
#' @export
setGeneric(
"get_sapf_flags",
function(object, ...) {
standardGeneric("get_sapf_flags")
}
)
#' @rdname sfn_get_generics
#' @export
setGeneric(
"get_env_flags",
function(object, ...) {
standardGeneric("get_env_flags")
}
)
#' @rdname sfn_get_generics
#' @export
setGeneric(
"get_timestamp",
function(object, ...) {
standardGeneric("get_timestamp")
}
)
#' @rdname sfn_get_generics
#' @export
setGeneric(
"get_solar_timestamp",
function(object, ...) {
standardGeneric("get_solar_timestamp")
}
)
#' @rdname sfn_get_generics
#' @export
setGeneric(
"get_si_code",
function(object, ...) {
standardGeneric("get_si_code")
}
)
#' @rdname sfn_get_generics
#' @export
setGeneric(
"get_site_md",
function(object, ...) {
standardGeneric("get_site_md")
}
)
#' @rdname sfn_get_generics
#' @export
setGeneric(
"get_stand_md",
function(object, ...) {
standardGeneric("get_stand_md")
}
)
#' @rdname sfn_get_generics
#' @export
setGeneric(
"get_species_md",
function(object, ...) {
standardGeneric("get_species_md")
}
)
#' @rdname sfn_get_generics
#' @export
setGeneric(
"get_plant_md",
function(object, ...) {
standardGeneric("get_plant_md")
}
)
#' @rdname sfn_get_generics
#' @export
setGeneric(
"get_env_md",
function(object, ...) {
standardGeneric("get_env_md")
}
)
#' Replacement generics
#'
#' Generic functions for replacement functions for SfnData
#'
#' @name sfn_replacement_generics
NULL
#' @rdname sfn_replacement_generics
#' @export
setGeneric(
"get_sapf<-",
function(object, value) {
standardGeneric("get_sapf<-")
}
)
#' @rdname sfn_replacement_generics
#' @export
setGeneric(
"get_env<-",
function(object, value) {
standardGeneric("get_env<-")
}
)
#' @rdname sfn_replacement_generics
#' @export
setGeneric(
"get_sapf_flags<-",
function(object, value) {
standardGeneric("get_sapf_flags<-")
}
)
#' @rdname sfn_replacement_generics
#' @export
setGeneric(
"get_env_flags<-",
function(object, value) {
standardGeneric("get_env_flags<-")
}
)
#' @rdname sfn_replacement_generics
#' @export
setGeneric(
"get_timestamp<-",
function(object, value) {
standardGeneric("get_timestamp<-")
}
)
#' @rdname sfn_replacement_generics
#' @export
setGeneric(
"get_solar_timestamp<-",
function(object, value) {
standardGeneric("get_solar_timestamp<-")
}
)
#' @rdname sfn_replacement_generics
#' @export
setGeneric(
"get_si_code<-",
function(object, value) {
standardGeneric("get_si_code<-")
}
)
#' @rdname sfn_replacement_generics
#' @export
setGeneric(
"get_site_md<-",
function(object, value) {
standardGeneric("get_site_md<-")
}
)
#' @rdname sfn_replacement_generics
#' @export
setGeneric(
"get_stand_md<-",
function(object, value) {
standardGeneric("get_stand_md<-")
}
)
#' @rdname sfn_replacement_generics
#' @export
setGeneric(
"get_species_md<-",
function(object, value) {
standardGeneric("get_species_md<-")
}
)
#' @rdname sfn_replacement_generics
#' @export
setGeneric(
"get_plant_md<-",
function(object, value) {
standardGeneric("get_plant_md<-")
}
)
#' @rdname sfn_replacement_generics
#' @export
setGeneric(
"get_env_md<-",
function(object, value) {
standardGeneric("get_env_md<-")
}
)
|
library(ggplot2)
source('~/Dropbox/Teaching/ProbStatUProg/Lectures/MVutils.R')
## ---- GammaPlot
par(mfrow = c(2,1))
x = seq(0,10, by = 0.01)
lambda = 2
# PDF
pdf1 = dgamma(x, shape = 1, rate = lambda)
pdf3 = dgamma(x, shape = 3, rate = lambda)
pdf5 = dgamma(x, shape = 5, rate = lambda)
pdf10 = dgamma(x, shape = 10, rate = lambda)
plot(x,pdf1, type = "l", lwd = 3, ylab = "f(X)")
lines(x,pdf3, col = "red", lwd = 3)
lines(x,pdf5, col = "blue", lwd = 3)
lines(x,pdf10, col = "green", lwd = 3)
legend("right", legend = c( expression(paste(alpha,"=1")),expression(paste(alpha,"=3")),
expression(paste(alpha,"=5")),expression(paste(alpha,"=10"))),inset = .05, lty=c(1,1,1,1), lwd=c(3,3,3,3), col=c("black","red","blue","green"))
# CDF
cdf1 = pgamma(x, shape = 1, rate = lambda)
cdf3 = pgamma(x, shape = 3, rate = lambda)
cdf5 = pgamma(x, shape = 5, rate = lambda)
cdf10 = pgamma(x, shape = 10, rate = lambda)
plot(x,cdf1, type = "l", lwd = 3, ylab = "F(X)")
lines(x,cdf3, col = "red", lwd = 3)
lines(x,cdf5, col = "blue", lwd = 3)
lines(x,cdf10, col = "green", lwd = 3)
legend("right", legend = c( expression(paste(alpha,"=1")),expression(paste(alpha,"=3")),
expression(paste(alpha,"=5")),expression(paste(alpha,"=10"))),inset = .05, lty=c(1,1,1,1), lwd=c(3,3,3,3), col=c("black","red","blue","green"))
## ---- NormalPlot
par(mfrow = c(2,1))
x = seq(-10,10, by = 0.01)
# PDF
pdf1 = dnorm(x, mean = 0, sd = 1)
pdf3 = dnorm(x, mean = 0, sd = 3)
pdf5 = dnorm(x, mean = 2, sd = 1)
pdf10 = dnorm(x, mean = -2, sd = 2)
plot(x,pdf1, type = "l", lwd = 3, ylab = "f(X)")
lines(x,pdf3, col = "red", lwd = 3)
lines(x,pdf5, col = "blue", lwd = 3)
lines(x,pdf10, col = "green", lwd = 3)
legend("right", legend = c( expression(paste(mu,"=0, ",sigma,"=1")),expression(paste(mu,"=0, ",sigma,"=3")),
expression(paste(mu,"=2, ",sigma,"=1")),expression(paste(mu,"=-2, ",sigma,"=2"))),
inset = .05, lty=c(1,1,1,1), lwd=c(3,3,3,3), col=c("black","red","blue","green"))
# CDF
cdf1 = pnorm(x, mean = 0, sd = 1)
cdf3 = pnorm(x, mean = 0, sd = 3)
cdf5 = pnorm(x, mean = 2, sd = 1)
cdf10 = pnorm(x, mean = -2, sd = 2)
plot(x,cdf1, type = "l", lwd = 3, ylab = "F(X)")
lines(x,cdf3, col = "red", lwd = 3)
lines(x,cdf5, col = "blue", lwd = 3)
lines(x,cdf10, col = "green", lwd = 3)
legend("right", legend = c( expression(paste(mu,"=0, ",sigma,"=1")),expression(paste(mu,"=0, ",sigma,"=3")),
expression(paste(mu,"=2, ",sigma,"=1")),expression(paste(mu,"=-2, ",sigma,"=2"))),
inset = .05, lty=c(1,1,1,1), lwd=c(3,3,3,3), col=c("black","red","blue","green")) | /Lectures/Lecture4SlideCode.R | no_license | STIMALiU/IntroStatsForCSCourse | R | false | false | 2,692 | r | library(ggplot2)
source('~/Dropbox/Teaching/ProbStatUProg/Lectures/MVutils.R')
## ---- GammaPlot
par(mfrow = c(2,1))
x = seq(0,10, by = 0.01)
lambda = 2
# PDF
pdf1 = dgamma(x, shape = 1, rate = lambda)
pdf3 = dgamma(x, shape = 3, rate = lambda)
pdf5 = dgamma(x, shape = 5, rate = lambda)
pdf10 = dgamma(x, shape = 10, rate = lambda)
plot(x,pdf1, type = "l", lwd = 3, ylab = "f(X)")
lines(x,pdf3, col = "red", lwd = 3)
lines(x,pdf5, col = "blue", lwd = 3)
lines(x,pdf10, col = "green", lwd = 3)
legend("right", legend = c( expression(paste(alpha,"=1")),expression(paste(alpha,"=3")),
expression(paste(alpha,"=5")),expression(paste(alpha,"=10"))),inset = .05, lty=c(1,1,1,1), lwd=c(3,3,3,3), col=c("black","red","blue","green"))
# CDF
cdf1 = pgamma(x, shape = 1, rate = lambda)
cdf3 = pgamma(x, shape = 3, rate = lambda)
cdf5 = pgamma(x, shape = 5, rate = lambda)
cdf10 = pgamma(x, shape = 10, rate = lambda)
plot(x,cdf1, type = "l", lwd = 3, ylab = "F(X)")
lines(x,cdf3, col = "red", lwd = 3)
lines(x,cdf5, col = "blue", lwd = 3)
lines(x,cdf10, col = "green", lwd = 3)
legend("right", legend = c( expression(paste(alpha,"=1")),expression(paste(alpha,"=3")),
expression(paste(alpha,"=5")),expression(paste(alpha,"=10"))),inset = .05, lty=c(1,1,1,1), lwd=c(3,3,3,3), col=c("black","red","blue","green"))
## ---- NormalPlot
par(mfrow = c(2,1))
x = seq(-10,10, by = 0.01)
# PDF
pdf1 = dnorm(x, mean = 0, sd = 1)
pdf3 = dnorm(x, mean = 0, sd = 3)
pdf5 = dnorm(x, mean = 2, sd = 1)
pdf10 = dnorm(x, mean = -2, sd = 2)
plot(x,pdf1, type = "l", lwd = 3, ylab = "f(X)")
lines(x,pdf3, col = "red", lwd = 3)
lines(x,pdf5, col = "blue", lwd = 3)
lines(x,pdf10, col = "green", lwd = 3)
legend("right", legend = c( expression(paste(mu,"=0, ",sigma,"=1")),expression(paste(mu,"=0, ",sigma,"=3")),
expression(paste(mu,"=2, ",sigma,"=1")),expression(paste(mu,"=-2, ",sigma,"=2"))),
inset = .05, lty=c(1,1,1,1), lwd=c(3,3,3,3), col=c("black","red","blue","green"))
# CDF
cdf1 = pnorm(x, mean = 0, sd = 1)
cdf3 = pnorm(x, mean = 0, sd = 3)
cdf5 = pnorm(x, mean = 2, sd = 1)
cdf10 = pnorm(x, mean = -2, sd = 2)
plot(x,cdf1, type = "l", lwd = 3, ylab = "F(X)")
lines(x,cdf3, col = "red", lwd = 3)
lines(x,cdf5, col = "blue", lwd = 3)
lines(x,cdf10, col = "green", lwd = 3)
legend("right", legend = c( expression(paste(mu,"=0, ",sigma,"=1")),expression(paste(mu,"=0, ",sigma,"=3")),
expression(paste(mu,"=2, ",sigma,"=1")),expression(paste(mu,"=-2, ",sigma,"=2"))),
inset = .05, lty=c(1,1,1,1), lwd=c(3,3,3,3), col=c("black","red","blue","green")) |
# Variable:
# dataset = bevat de complete dataset
# functie = bevat de job-functies alleen van de personen uit Nederland! (alles staat in 1 kolom)
# jobfunctie = alle functies uit DevType kolom van Dataset verder uit-gesplit.
# Onderstaande installeert de packages voor R-studio wat je nodig hebt voor de data bewerking
install.packages("readr")
library(readr)
# Onderstaande packages zijn nodig voor het filteren van de data. Die ook geinstalleerd moeten worden.
install.packages("dplyr")
library(dplyr)
install.packages("tidyr")
library(tidyr)
# Working Directory wijzigen.
getwd()
setwd("N:/Studeren/Novi Hogeschool/Leerlijnen/Data Science/Dataset/Bewerkte Data-set/")
# Working Directory (prive)laptop
setwd("C:/Users/Steur/SynologyDrive/Leerlijnen/Data Science/Dataset/Bewerkte Data-set/")
# Working Directory (PwC)laptop
setwd("H:/My Drive/DATA SCIENCE Werkstuk/Dataset/developer_survey_2019/")
# Subvraag: Wat is de polulatie van programmeer taal in Nederland voor System Administrator?
# (Analyse uitvoeren Nederland uit de data te filteren en kijken hoe dat verschilt in de rest van de wereld.. )
# Benodigd > Kolom Country +
#Data uitlezen
# package > library(readr) is hiervoor nodig.
dataset <- read_csv("survey_results_public.csv")
View(dataset)
# Geeft structure weer uit Dataset
str(dataset)
# Geeft het aantal rijen en kolommen weer uit de Dataset
dim(dataset)
# Geeft alle Variable / kolom namen weer uit de Dataset.
names(dataset)
# Geeft een samenvatting van de Data
summary(dataset)
#View plot van 2 kolommen / variables
#plot(dataset$Country, dataset$DevType) #Werkt nog niet correct.
# Aantal rijen dataset laten zien
nrow(dataset)
#Aantal kolommen dataset laten zien
ncol(dataset)
# 1e 10 rijen weergeven van dataset
head(dataset, 10)
# Bepaalde Variables / Kolommen weergeven vanuit de Dataset.
# Country geeft de plaats weer
# DevType geeft de jobtype weer. (System Administrator)
# WERKT NIET splitdataset<- read_csv("survey_results_public.csv")
# Kolommen splitsen ( Waarvan alles met Nederland eruit gefilterd wordt en word opgesplit in nummer van deelnemer + Devtype)
functie <- dataset %>%
filter(Country == "Netherlands")%>%
select(Respondent, DevType)
View(functie)
# Inhoud Kolommen splitten in correcte job / kolom namen.
jobfunctie <- functie %>%
separate(DevType, c("Academic researcher","Data or business analyst","Data scientist or machine learning specialist","Database administrator",
"Designer","Developer, backend","Developer, desktop or enterprise applications","Developer, embedded applications or devices",
"Developer, frontend","Developer, fullstack","Developer, game or graphics","Developer, mobile","Developer, QA or test","DevOps specialist",
"Educator","Engineer, data","Engineer, site reliability","Engineering manager","Marketing or sales professional","Product manager",
"Scientist", "Senior Executive (CSuite, VP, etc.)", "Student", "System administrator", "Other"), sep = ";")
# jobfunctie is nu uitgewerkt met de correcte Kolom namen vanuit varialble functie.
View(jobfunctie)
# Geeft alle Variable / kolom namen weer uit de jobfunctie dataset.
names(jobfunctie)
# Mooiste zou zijn om de job's ook ook nog onder elkaar te plaatsen in de kolom naam. AAAAAAAAAAAAAA
# rijen onder elkaar geplaatst. Nu per Job een nieuwe Variable maken.
jobsonderelkaar <- separate_rows(functie,DevType,sep=";")
View(jobsonderelkaar)
# Onderstaande variable's worden aangemaakt vanuit jobsonderelkaar variable. Dit is een variable met alle Jobs vanuit Nederland.
# Nieuwe Variable aanmaken om alle Respondenten van System Administrator eruit te filteren en plaatsen in variable.
SystemAdministrator <- filter(jobsonderelkaar, DevType == "System administrator")
View(SystemAdministrator)
# Nieuwe Variable aanmaken om alle Respondenten van Development ( Developer, back-end )eruit te filteren en plaatsen in variable.
Development <- filter(jobsonderelkaar, DevType == "Developer, back-end")
View(Development)
# Nieuwe Variable aanmaken om alle Respondenten van Development ( Developer, back-end )eruit te filteren en plaatsen in variable.
DatabaseAdministrator <- filter(jobsonderelkaar, DevType == "Database administrator")
View(DatabaseAdministrator)
# Tot bovenstaande gaat goed. Onderstaande is nog in bewerking voor R-Script.
# ================================================================================================
# Wat wil ik doen? > Nu moeten de rijen uitgewerkt worden naar dezelfde kolom namen.
jobfunctie1 <- jobfunctie %>%
separate_rows(jobfunctie, jobfunctie1, DevType,sep=","),DevType,sep=";")
jobfunctieTEST <- jobfunctie %>%
gather(Academic researcher,Data or business analyst,C,D, c("Academic researcher","Data or business analyst","Data scientist or machine learning specialist","Database administrator",
"Designer","Developer, backend","Developer, desktop or enterprise applications","Developer, embedded applications or devices",
"Developer, frontend","Developer, fullstack","Developer, game or graphics","Developer, mobile","Developer, QA or test","DevOps specialist",
"Educator","Engineer, data","Engineer, site reliability","Engineering manager","Marketing or sales professional","Product manager",
"Scientist", "Senior Executive (CSuite, VP, etc.)", "Student", "System administrator", "Other"), 1:20)
names(functie)
View(jobfunctieTEST)
# Onderstaande is Backup tekst.
functie <- separate(functie, DevType, into = c("Academic researcher","Data or business analyst","Data scientist or machine learning specialist","Database administrator",
"Designer","Developer, backend","Developer, desktop or enterprise applications","Developer, embedded applications or devices",
"Developer, frontend","Developer, fullstack","Developer, game or graphics","Developer, mobile","Developer, QA or test","DevOps specialist",
"Educator","Engineer, data","Engineer, site reliability","Engineering manager","Marketing or sales professional","Product manager",
"Scientist", "Senior Executive (CSuite,VP, etc.", "Student", "System administrator", "Other"), sep = ";")
View(functie)
# Inhoud Kolommen splitten
jobfunctie <- functie %>%
separate(DevType, c("A","B","C","D","E","F","H","L","M","N","O","P","Q","R","S","T","U","V","W"), sep = ";")
separate(DevType, c("A","B","C","D","E","F","H","L","M","N","O","P","Q","R","S","T","U","V","W"), sep = ";")
jobfunctie1 <- jobfunctie %>%
gather(DevType, c(A,B,"C","D","E","F","H","L","M","N","O","P","Q","R","S","T","U","V","W"))
gather(A, key = "A", value = "Academic")
rlang::last_error()
rlang::last_trace()
View(jobfunctie)
View(jobfunctie1)
glimpse(jobfunctie)
data <- read_csv("survey_results_public.csv") ("N:/Studeren/Novi Hogeschool/Leerlijnen/Data Science/Dataset/Bewerkte Data-set/")
View(data)
names(data)
# Omgaan met lege waarden. R document NAS. Pagina 50
| /R-Script/Evert/R-Script werkstuk.R | no_license | prinudickson/evert-study | R | false | false | 7,261 | r |
# Variable:
# dataset = bevat de complete dataset
# functie = bevat de job-functies alleen van de personen uit Nederland! (alles staat in 1 kolom)
# jobfunctie = alle functies uit DevType kolom van Dataset verder uit-gesplit.
# Onderstaande installeert de packages voor R-studio wat je nodig hebt voor de data bewerking
install.packages("readr")
library(readr)
# Onderstaande packages zijn nodig voor het filteren van de data. Die ook geinstalleerd moeten worden.
install.packages("dplyr")
library(dplyr)
install.packages("tidyr")
library(tidyr)
# Working Directory wijzigen.
getwd()
setwd("N:/Studeren/Novi Hogeschool/Leerlijnen/Data Science/Dataset/Bewerkte Data-set/")
# Working Directory (prive)laptop
setwd("C:/Users/Steur/SynologyDrive/Leerlijnen/Data Science/Dataset/Bewerkte Data-set/")
# Working Directory (PwC)laptop
setwd("H:/My Drive/DATA SCIENCE Werkstuk/Dataset/developer_survey_2019/")
# Subvraag: Wat is de polulatie van programmeer taal in Nederland voor System Administrator?
# (Analyse uitvoeren Nederland uit de data te filteren en kijken hoe dat verschilt in de rest van de wereld.. )
# Benodigd > Kolom Country +
#Data uitlezen
# package > library(readr) is hiervoor nodig.
dataset <- read_csv("survey_results_public.csv")
View(dataset)
# Geeft structure weer uit Dataset
str(dataset)
# Geeft het aantal rijen en kolommen weer uit de Dataset
dim(dataset)
# Geeft alle Variable / kolom namen weer uit de Dataset.
names(dataset)
# Geeft een samenvatting van de Data
summary(dataset)
#View plot van 2 kolommen / variables
#plot(dataset$Country, dataset$DevType) #Werkt nog niet correct.
# Aantal rijen dataset laten zien
nrow(dataset)
#Aantal kolommen dataset laten zien
ncol(dataset)
# 1e 10 rijen weergeven van dataset
head(dataset, 10)
# Bepaalde Variables / Kolommen weergeven vanuit de Dataset.
# Country geeft de plaats weer
# DevType geeft de jobtype weer. (System Administrator)
# WERKT NIET splitdataset<- read_csv("survey_results_public.csv")
# Kolommen splitsen ( Waarvan alles met Nederland eruit gefilterd wordt en word opgesplit in nummer van deelnemer + Devtype)
functie <- dataset %>%
filter(Country == "Netherlands")%>%
select(Respondent, DevType)
View(functie)
# Inhoud Kolommen splitten in correcte job / kolom namen.
jobfunctie <- functie %>%
separate(DevType, c("Academic researcher","Data or business analyst","Data scientist or machine learning specialist","Database administrator",
"Designer","Developer, backend","Developer, desktop or enterprise applications","Developer, embedded applications or devices",
"Developer, frontend","Developer, fullstack","Developer, game or graphics","Developer, mobile","Developer, QA or test","DevOps specialist",
"Educator","Engineer, data","Engineer, site reliability","Engineering manager","Marketing or sales professional","Product manager",
"Scientist", "Senior Executive (CSuite, VP, etc.)", "Student", "System administrator", "Other"), sep = ";")
# jobfunctie is nu uitgewerkt met de correcte Kolom namen vanuit varialble functie.
View(jobfunctie)
# Geeft alle Variable / kolom namen weer uit de jobfunctie dataset.
names(jobfunctie)
# Mooiste zou zijn om de job's ook ook nog onder elkaar te plaatsen in de kolom naam. AAAAAAAAAAAAAA
# rijen onder elkaar geplaatst. Nu per Job een nieuwe Variable maken.
jobsonderelkaar <- separate_rows(functie,DevType,sep=";")
View(jobsonderelkaar)
# Onderstaande variable's worden aangemaakt vanuit jobsonderelkaar variable. Dit is een variable met alle Jobs vanuit Nederland.
# Nieuwe Variable aanmaken om alle Respondenten van System Administrator eruit te filteren en plaatsen in variable.
SystemAdministrator <- filter(jobsonderelkaar, DevType == "System administrator")
View(SystemAdministrator)
# Nieuwe Variable aanmaken om alle Respondenten van Development ( Developer, back-end )eruit te filteren en plaatsen in variable.
Development <- filter(jobsonderelkaar, DevType == "Developer, back-end")
View(Development)
# Nieuwe Variable aanmaken om alle Respondenten van Development ( Developer, back-end )eruit te filteren en plaatsen in variable.
DatabaseAdministrator <- filter(jobsonderelkaar, DevType == "Database administrator")
View(DatabaseAdministrator)
# Tot bovenstaande gaat goed. Onderstaande is nog in bewerking voor R-Script.
# ================================================================================================
# Wat wil ik doen? > Nu moeten de rijen uitgewerkt worden naar dezelfde kolom namen.
jobfunctie1 <- jobfunctie %>%
separate_rows(jobfunctie, jobfunctie1, DevType,sep=","),DevType,sep=";")
jobfunctieTEST <- jobfunctie %>%
gather(Academic researcher,Data or business analyst,C,D, c("Academic researcher","Data or business analyst","Data scientist or machine learning specialist","Database administrator",
"Designer","Developer, backend","Developer, desktop or enterprise applications","Developer, embedded applications or devices",
"Developer, frontend","Developer, fullstack","Developer, game or graphics","Developer, mobile","Developer, QA or test","DevOps specialist",
"Educator","Engineer, data","Engineer, site reliability","Engineering manager","Marketing or sales professional","Product manager",
"Scientist", "Senior Executive (CSuite, VP, etc.)", "Student", "System administrator", "Other"), 1:20)
names(functie)
View(jobfunctieTEST)
# Onderstaande is Backup tekst.
functie <- separate(functie, DevType, into = c("Academic researcher","Data or business analyst","Data scientist or machine learning specialist","Database administrator",
"Designer","Developer, backend","Developer, desktop or enterprise applications","Developer, embedded applications or devices",
"Developer, frontend","Developer, fullstack","Developer, game or graphics","Developer, mobile","Developer, QA or test","DevOps specialist",
"Educator","Engineer, data","Engineer, site reliability","Engineering manager","Marketing or sales professional","Product manager",
"Scientist", "Senior Executive (CSuite,VP, etc.", "Student", "System administrator", "Other"), sep = ";")
View(functie)
# Inhoud Kolommen splitten
jobfunctie <- functie %>%
separate(DevType, c("A","B","C","D","E","F","H","L","M","N","O","P","Q","R","S","T","U","V","W"), sep = ";")
separate(DevType, c("A","B","C","D","E","F","H","L","M","N","O","P","Q","R","S","T","U","V","W"), sep = ";")
jobfunctie1 <- jobfunctie %>%
gather(DevType, c(A,B,"C","D","E","F","H","L","M","N","O","P","Q","R","S","T","U","V","W"))
gather(A, key = "A", value = "Academic")
rlang::last_error()
rlang::last_trace()
View(jobfunctie)
View(jobfunctie1)
glimpse(jobfunctie)
data <- read_csv("survey_results_public.csv") ("N:/Studeren/Novi Hogeschool/Leerlijnen/Data Science/Dataset/Bewerkte Data-set/")
View(data)
names(data)
# Omgaan met lege waarden. R document NAS. Pagina 50
|
tableplot_processCols <- function(tab, colNames1, colNames2, IQR_bias, bias_brokenX, limitsX, nBins, sortColName) {
midspace <- .05
colNames_string <- ifelse(is.na(colNames2), colNames1, paste(colNames1, colNames2, sep="-"))
cols <- tab$columns
tab$columns <- mapply(function(c1, c2, cname) {
if (is.na(c2)) {
col <- cols[[c1]]
if (col$isnumeric) {
col <- scaleNumCol(col, IQR_bias)
col <- coorNumCol(col, limitsX = limitsX[col$name], bias_brokenX=bias_brokenX)
} else {
col <- coorCatCol(col, nBins)
}
col$type <- "normal"
col
} else {
col1 <- cols[[c1]]
col2 <- cols[[c2]]
col <- col1
if (col1$isnumeric) {
col$mean1 <- col1$mean
col$mean2 <- col2$mean
col$mean.diff <- col1$mean - col2$mean
col$mean.diff.rel <- ((col1$mean - col2$mean) / col1$mean)*100
col$sd1 <- col1$sd
col$sd2 <- col2$sd
col$sd.diff <- sqrt(col1$sd^2 + col2$sd^2)
col$sd.diff.rel <- col$sd.diff / col1$mean * 100
col$scale_init <- "lin"
col$compl <- pmin(col1$compl, col2$compl)
col[c("mean", "sd", "scale_final", "mean.scaled", "brokenX", "mean.diff.coor", "marks.labels", "marks.x", "xline", "widths")] <- NULL
col <- scaleNumCol(col, IQR_bias=5, compare=TRUE)
col <- coorNumCol(col, limitsX=list(), bias_brokenX=0.8, compare=TRUE)
} else {
# col <- tp$columns[[4]]
# col1 <- tp1$columns[[4]]
# col2 <- tp2$columns[[4]]
col$freq1 <- col1$freq
col$freq2 <- col2$freq
freq <- col$freq.diff <- col1$freq - col2$freq
xinit <- apply(freq, MARGIN=1, function(x)sum(x[x<0]))
ids <- t(apply(freq, MARGIN=1, orderRow))
freq.sorted <- sortRows(freq, ids)
widths <- abs(freq.sorted)
x <- t(apply(widths, 1, cumsum)) + xinit
x <- cbind(xinit, x[,1:(ncol(x)-1)])
ids2 <- t(apply(ids, 1, order))
col$x <- sortRows(x, ids2)
col$widths <- sortRows(widths, ids2)
col$x <- col$x * (1-midspace) / 2
col$widths <- col$widths * (1-midspace) / 2
col$x[col$x<0] <- col$x[col$x<0] - (midspace/2)
col$x[col$x>=0] <- col$x[col$x>=0] + (midspace/2)
col$x[col$widths==0] <- NA
col$widths[col$widths==0] <- NA
col$x <- (col$x) + 0.5
col$freq <- NULL
}
col$type <- "compare"
col$name <- cname
col
}
}, colNames1, colNames2, colNames_string, SIMPLIFY=FALSE)
tab$m <- length(colNames1)
tab$select <- colNames_string
tab$sortCol <- which(sortColName==colNames_string)[1]
names(tab$columns) <- colNames_string
tab
}
| /tabplot/R/tableplot_processCols.R | no_license | ingted/R-Examples | R | false | false | 2,687 | r | tableplot_processCols <- function(tab, colNames1, colNames2, IQR_bias, bias_brokenX, limitsX, nBins, sortColName) {
midspace <- .05
colNames_string <- ifelse(is.na(colNames2), colNames1, paste(colNames1, colNames2, sep="-"))
cols <- tab$columns
tab$columns <- mapply(function(c1, c2, cname) {
if (is.na(c2)) {
col <- cols[[c1]]
if (col$isnumeric) {
col <- scaleNumCol(col, IQR_bias)
col <- coorNumCol(col, limitsX = limitsX[col$name], bias_brokenX=bias_brokenX)
} else {
col <- coorCatCol(col, nBins)
}
col$type <- "normal"
col
} else {
col1 <- cols[[c1]]
col2 <- cols[[c2]]
col <- col1
if (col1$isnumeric) {
col$mean1 <- col1$mean
col$mean2 <- col2$mean
col$mean.diff <- col1$mean - col2$mean
col$mean.diff.rel <- ((col1$mean - col2$mean) / col1$mean)*100
col$sd1 <- col1$sd
col$sd2 <- col2$sd
col$sd.diff <- sqrt(col1$sd^2 + col2$sd^2)
col$sd.diff.rel <- col$sd.diff / col1$mean * 100
col$scale_init <- "lin"
col$compl <- pmin(col1$compl, col2$compl)
col[c("mean", "sd", "scale_final", "mean.scaled", "brokenX", "mean.diff.coor", "marks.labels", "marks.x", "xline", "widths")] <- NULL
col <- scaleNumCol(col, IQR_bias=5, compare=TRUE)
col <- coorNumCol(col, limitsX=list(), bias_brokenX=0.8, compare=TRUE)
} else {
# col <- tp$columns[[4]]
# col1 <- tp1$columns[[4]]
# col2 <- tp2$columns[[4]]
col$freq1 <- col1$freq
col$freq2 <- col2$freq
freq <- col$freq.diff <- col1$freq - col2$freq
xinit <- apply(freq, MARGIN=1, function(x)sum(x[x<0]))
ids <- t(apply(freq, MARGIN=1, orderRow))
freq.sorted <- sortRows(freq, ids)
widths <- abs(freq.sorted)
x <- t(apply(widths, 1, cumsum)) + xinit
x <- cbind(xinit, x[,1:(ncol(x)-1)])
ids2 <- t(apply(ids, 1, order))
col$x <- sortRows(x, ids2)
col$widths <- sortRows(widths, ids2)
col$x <- col$x * (1-midspace) / 2
col$widths <- col$widths * (1-midspace) / 2
col$x[col$x<0] <- col$x[col$x<0] - (midspace/2)
col$x[col$x>=0] <- col$x[col$x>=0] + (midspace/2)
col$x[col$widths==0] <- NA
col$widths[col$widths==0] <- NA
col$x <- (col$x) + 0.5
col$freq <- NULL
}
col$type <- "compare"
col$name <- cname
col
}
}, colNames1, colNames2, colNames_string, SIMPLIFY=FALSE)
tab$m <- length(colNames1)
tab$select <- colNames_string
tab$sortCol <- which(sortColName==colNames_string)[1]
names(tab$columns) <- colNames_string
tab
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tidyDiscreteSelfInformation.R
\name{calculateSelfInformation_Grassberger}
\alias{calculateSelfInformation_Grassberger}
\title{calculate self information of a discrete value (X) using a histogram approach using the following method}
\usage{
calculateSelfInformation_Grassberger(df, groupVars, countVar = NULL, ...)
}
\arguments{
\item{df}{- may be grouped, in which case the grouping is interpreted as different types of discrete variable}
\item{groupVars}{- the columns of the discrete value quoted by the vars() function (e.g. ggplot facet_wrap)}
\item{countVar}{- (optional) if this datafram represents summary counts, the columns of the summary variable.}
}
\value{
a dataframe containing the disctinct values of the groups of df, and for each group an entropy value (H). If df was not grouped this will be a single entry
}
\description{
P. Grassberger, โEntropy Estimates from Insufficient Samplings,โ arXiv [physics.data-an], 29-Jul-2003 [Online]. Available: http://arxiv.org/abs/physics/0307138
}
\details{
but with a digamma based function (rather than harmonics) detailed in eqns 31 & 35.
For our purposes we fix l=0 to give the form in eqn 27. The error in this method is supposedly better for undersampled cases (where number of bins similar to number of samples)
This is a bit of a cheat as works out the overall entropy and then scales that to get the self information but seems to produce the right answer
}
| /man/calculateSelfInformation_Grassberger.Rd | permissive | terminological/tidy-info-stats | R | false | true | 1,506 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tidyDiscreteSelfInformation.R
\name{calculateSelfInformation_Grassberger}
\alias{calculateSelfInformation_Grassberger}
\title{calculate self information of a discrete value (X) using a histogram approach using the following method}
\usage{
calculateSelfInformation_Grassberger(df, groupVars, countVar = NULL, ...)
}
\arguments{
\item{df}{- may be grouped, in which case the grouping is interpreted as different types of discrete variable}
\item{groupVars}{- the columns of the discrete value quoted by the vars() function (e.g. ggplot facet_wrap)}
\item{countVar}{- (optional) if this datafram represents summary counts, the columns of the summary variable.}
}
\value{
a dataframe containing the disctinct values of the groups of df, and for each group an entropy value (H). If df was not grouped this will be a single entry
}
\description{
P. Grassberger, โEntropy Estimates from Insufficient Samplings,โ arXiv [physics.data-an], 29-Jul-2003 [Online]. Available: http://arxiv.org/abs/physics/0307138
}
\details{
but with a digamma based function (rather than harmonics) detailed in eqns 31 & 35.
For our purposes we fix l=0 to give the form in eqn 27. The error in this method is supposedly better for undersampled cases (where number of bins similar to number of samples)
This is a bit of a cheat as works out the overall entropy and then scales that to get the self information but seems to produce the right answer
}
|
\name{defaultVEL}
\alias{defaultVEL}
\title{Default Velocity Function
}
\description{Default Velocity Function is returned
in the event no velocity function is available.
}
\usage{
defaultVEL(kind = 1)
}
\arguments{
\item{kind}{integer, 1=fuj1, 2=LITHOS
}
}
\details{
A set of default velocity functions are available.
}
\value{velocity list, P and S waves
}
\author{
Jonathan M. Lees<jonathan.lees@unc.edu>
}
\seealso{fuj1.vel
}
\examples{
v = defaultVEL(1)
}
\keyword{misc}
| /man/defaultVEL.Rd | no_license | cran/Rquake | R | false | false | 486 | rd | \name{defaultVEL}
\alias{defaultVEL}
\title{Default Velocity Function
}
\description{Default Velocity Function is returned
in the event no velocity function is available.
}
\usage{
defaultVEL(kind = 1)
}
\arguments{
\item{kind}{integer, 1=fuj1, 2=LITHOS
}
}
\details{
A set of default velocity functions are available.
}
\value{velocity list, P and S waves
}
\author{
Jonathan M. Lees<jonathan.lees@unc.edu>
}
\seealso{fuj1.vel
}
\examples{
v = defaultVEL(1)
}
\keyword{misc}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ggplots.R
\name{ggcaterpillar}
\alias{ggcaterpillar}
\title{Caterpillar plot}
\usage{
ggcaterpillar(re, qq = TRUE, likeDotplot = TRUE)
}
\arguments{
\item{re}{random effects from lmer object}
\item{qq}{if \code{TRUE}, returns normal q/q plot; else returns caterpillar
dotplot}
\item{likeDotplot}{if \code{TRUE}, uses different scales for random effects,
i.e., \code{\link[ggplot2]{facet_wrap}}}
}
\description{
Caterpillar plots for random effects models using \code{\link{ggplot}}.
}
\details{
Behaves like \code{\link[lattice]{qqmath}} and
\code{\link[lattice]{dotplot}} from the lattice package; also handles
models with multiple correlated random effects
}
\examples{
\donttest{
library('lme4')
fit <- lmer(Reaction ~ Days + (Days | Subject), data = sleepstudy)
ggcaterpillar(ranef(fit, condVar = TRUE))
## compare (requires lattice package)
lattice::qqmath(ranef(fit, condVar = TRUE))
}
}
| /man/ggcaterpillar.Rd | no_license | raredd/plotr | R | false | true | 977 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ggplots.R
\name{ggcaterpillar}
\alias{ggcaterpillar}
\title{Caterpillar plot}
\usage{
ggcaterpillar(re, qq = TRUE, likeDotplot = TRUE)
}
\arguments{
\item{re}{random effects from lmer object}
\item{qq}{if \code{TRUE}, returns normal q/q plot; else returns caterpillar
dotplot}
\item{likeDotplot}{if \code{TRUE}, uses different scales for random effects,
i.e., \code{\link[ggplot2]{facet_wrap}}}
}
\description{
Caterpillar plots for random effects models using \code{\link{ggplot}}.
}
\details{
Behaves like \code{\link[lattice]{qqmath}} and
\code{\link[lattice]{dotplot}} from the lattice package; also handles
models with multiple correlated random effects
}
\examples{
\donttest{
library('lme4')
fit <- lmer(Reaction ~ Days + (Days | Subject), data = sleepstudy)
ggcaterpillar(ranef(fit, condVar = TRUE))
## compare (requires lattice package)
lattice::qqmath(ranef(fit, condVar = TRUE))
}
}
|
legendre.quadrature.rules <- function( n, normalized=FALSE )
{
###
### This function returns a list with n elements
### containing the order k quadrature rule data frames
### for orders k=1,2,...n.
### An order k quadrature data frame contains the roots
### abscissas values for the Legendre polynomial of degree k
###
### Parameters
### n = integer highest order
### normalized = a boolean value. if true, the recurrences are for normalized polynomials
###
recurrences <- legendre.recurrences( n, normalized )
inner.products <- legendre.inner.products( n )
return( quadrature.rules( recurrences, inner.products ) )
}
| /R/legendre.quadrature.rules.R | no_license | cran/gaussquad | R | false | false | 648 | r | legendre.quadrature.rules <- function( n, normalized=FALSE )
{
###
### This function returns a list with n elements
### containing the order k quadrature rule data frames
### for orders k=1,2,...n.
### An order k quadrature data frame contains the roots
### abscissas values for the Legendre polynomial of degree k
###
### Parameters
### n = integer highest order
### normalized = a boolean value. if true, the recurrences are for normalized polynomials
###
recurrences <- legendre.recurrences( n, normalized )
inner.products <- legendre.inner.products( n )
return( quadrature.rules( recurrences, inner.products ) )
}
|
library(shiny)
require(ggplot2)
require(dplyr)
# Define UI for application that draws a histogram
ui <- fluidPage(
# Application title
titlePanel("Hello Shiny World!"),
# Sidebar with a slider input for the number of bins
sidebarPanel(
sliderInput("num",
"Number of Samples:",
min = 1,
max = 5000,
value = 2500),
numericInput("mean", "Mean: ", value=0),
numericInput("sd", "Standard deviation:",
value = 1, min=0.0001)
),
# Show a plot of the generated distribution
mainPanel(
plotOutput("hist")
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
output$hist <- renderPlot({
dist <- rnorm(n = input$num, mean = input$mean, sd = input$sd)
gg <- data.frame(dist) %>%
ggplot(aes(x = dist)) + geom_histogram(binwidth = 0.25) +
xlim(c(-10,10))
print(gg)
})
}
# Bind ui and server together
shinyApp(ui, server)
| /shiny/01_Hello/app.R | no_license | Stat579-at-ISU/materials | R | false | false | 998 | r | library(shiny)
require(ggplot2)
require(dplyr)
# Define UI for application that draws a histogram
ui <- fluidPage(
# Application title
titlePanel("Hello Shiny World!"),
# Sidebar with a slider input for the number of bins
sidebarPanel(
sliderInput("num",
"Number of Samples:",
min = 1,
max = 5000,
value = 2500),
numericInput("mean", "Mean: ", value=0),
numericInput("sd", "Standard deviation:",
value = 1, min=0.0001)
),
# Show a plot of the generated distribution
mainPanel(
plotOutput("hist")
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
output$hist <- renderPlot({
dist <- rnorm(n = input$num, mean = input$mean, sd = input$sd)
gg <- data.frame(dist) %>%
ggplot(aes(x = dist)) + geom_histogram(binwidth = 0.25) +
xlim(c(-10,10))
print(gg)
})
}
# Bind ui and server together
shinyApp(ui, server)
|
\name{cusum}
\alias{cusum}
\alias{cusum.qcc}
\alias{print.cusum.qcc}
\alias{summary.cusum.qcc}
\alias{plot.cusum.qcc}
\title{Cusum chart}
\description{Create an object of class 'cusum.qcc' to compute a Cusum chart for statistical quality control.}
\usage{
cusum(data, sizes, center, std.dev, head.start = 0,
decision.interval = 5, se.shift = 1, data.name, labels,
newdata, newsizes, newlabels, plot = TRUE, \dots)
\method{print}{cusum.qcc}(x, \dots)
\method{summary}{cusum.qcc}(object, digits = getOption("digits"), \dots)
\method{plot}{cusum.qcc}(x, add.stats = TRUE, chart.all = TRUE,
label.bounds = c("LDB", "UDB"), title, xlab, ylab, ylim,
axes.las = 0, digits = getOption("digits"),
restore.par = TRUE, \dots)
}
\arguments{
\item{data}{a data frame, a matrix or a vector containing observed data for the variable to chart. Each row of a data frame or a matrix, and each value of a vector, refers to a sample or ''rationale group''.}
\item{sizes}{a value or a vector of values specifying the sample sizes associated with each group. If not provided the sample sizes are obtained counting the non-\code{NA} elements of each row of a data frame or a matrix; sample sizes are set all equal to one if \code{data} is a vector.}
\item{center}{a value specifying the center of group statistics or the ''target'' value of the process.}
\item{std.dev}{a value or an available method specifying the within-group standard deviation(s) of the process. \cr
Several methods are available for estimating the standard deviation. See \code{\link{sd.xbar}} and \code{\link{sd.xbar.one}} for, respectively, the grouped data case and the individual observations case.
}
\item{head.start}{The initializing value for the above-target and
below-target cumulative sums, measured in standard errors of the summary
statistics. Use zero for the traditional Cusum chart, or a positive
value less than the \code{decision.interval} for a Fast Initial Response.}
\item{decision.interval}{A numeric value specifying the number of standard errors of the summary statistics at which the cumulative sum is out of control.}
\item{se.shift}{The amount of shift to detect in the process, measured in standard errors of the summary statistics.}
\item{data.name}{a string specifying the name of the variable which appears on the plots. If not provided is taken from the object given as data.}
\item{labels}{a character vector of labels for each group.}
\item{newdata}{a data frame, matrix or vector, as for the \code{data} argument, providing further data to plot but not included in the computations.}
\item{newsizes}{a vector as for the \code{sizes} argument providing further data sizes to plot but not included in the computations.}
\item{newlabels}{a character vector of labels for each new group defined in the argument \code{newdata}.}
\item{plot}{logical. If \code{TRUE} a Cusum chart is plotted.}
\item{add.stats}{a logical value indicating whether statistics and other information should be printed at the bottom of the chart.}
\item{chart.all}{a logical value indicating whether both statistics for \code{data} and for \code{newdata} (if given) should be plotted.}
\item{label.bounds}{a character vector specifying the labels for the the decision interval boundaries.}
\item{title}{a string giving the label for the main title.}
\item{xlab}{a string giving the label for the x-axis.}
\item{ylab}{a string giving the label for the y-axis.}
\item{ylim}{a numeric vector specifying the limits for the y-axis.}
\item{axes.las}{numeric in \{0,1,2,3\} specifying the style of axis labels. See \code{help(par)}.}
\item{digits}{the number of significant digits to use.}
\item{restore.par}{a logical value indicating whether the previous \code{par} settings must be restored. If you need to add points, lines, etc. to a control chart set this to \code{FALSE}.}
\item{object}{an object of class 'cusum.qcc'.}
\item{x}{an object of class 'cusum.qcc'.}
\item{\dots}{additional arguments to be passed to the generic function.}
}
\details{Cusum charts display how the group summary statistics deviate above or below the process center or target value, relative to the standard errors of the summary statistics. Useful to detect small and permanent variation on the mean of the process.
}
\value{Returns an object of class 'cusum.qcc'.}
\references{
Mason, R.L. and Young, J.C. (2002) \emph{Multivariate Statistical Process Control with Industrial Applications}, SIAM. \cr
Montgomery, D.C. (2005) \emph{Introduction to Statistical Quality Control}, 5th ed. New York: John Wiley & Sons. \cr
Ryan, T. P. (2000), \emph{Statistical Methods for Quality Improvement}, 2nd ed. New York: John Wiley & Sons, Inc. \cr
Scrucca, L. (2004). qcc: an R package for quality control charting and statistical process control. \emph{R News} 4/1, 11-17. \cr
Wetherill, G.B. and Brown, D.W. (1991) \emph{Statistical Process Control}. New York: Chapman & Hall.
}
\author{Luca Scrucca \email{luca@stat.unipg.it}}
%\note{ ~~further notes~~ }
\seealso{\code{\link{qcc}}, \code{\link{ewma}}}
\examples{
##
## Grouped-data
##
data(pistonrings)
attach(pistonrings)
diameter <- qcc.groups(diameter, sample)
q <- cusum(diameter[1:25,], decision.interval = 4, se.shift = 1)
summary(q)
q <- cusum(diameter[1:25,], newdata=diameter[26:40,])
summary(q)
plot(q, chart.all=FALSE)
detach(pistonrings)
}
\keyword{htest}
\keyword{hplot}
| /man/cusum.Rd | no_license | codyfrisby/qcc | R | false | false | 5,423 | rd | \name{cusum}
\alias{cusum}
\alias{cusum.qcc}
\alias{print.cusum.qcc}
\alias{summary.cusum.qcc}
\alias{plot.cusum.qcc}
\title{Cusum chart}
\description{Create an object of class 'cusum.qcc' to compute a Cusum chart for statistical quality control.}
\usage{
cusum(data, sizes, center, std.dev, head.start = 0,
decision.interval = 5, se.shift = 1, data.name, labels,
newdata, newsizes, newlabels, plot = TRUE, \dots)
\method{print}{cusum.qcc}(x, \dots)
\method{summary}{cusum.qcc}(object, digits = getOption("digits"), \dots)
\method{plot}{cusum.qcc}(x, add.stats = TRUE, chart.all = TRUE,
label.bounds = c("LDB", "UDB"), title, xlab, ylab, ylim,
axes.las = 0, digits = getOption("digits"),
restore.par = TRUE, \dots)
}
\arguments{
\item{data}{a data frame, a matrix or a vector containing observed data for the variable to chart. Each row of a data frame or a matrix, and each value of a vector, refers to a sample or ''rationale group''.}
\item{sizes}{a value or a vector of values specifying the sample sizes associated with each group. If not provided the sample sizes are obtained counting the non-\code{NA} elements of each row of a data frame or a matrix; sample sizes are set all equal to one if \code{data} is a vector.}
\item{center}{a value specifying the center of group statistics or the ''target'' value of the process.}
\item{std.dev}{a value or an available method specifying the within-group standard deviation(s) of the process. \cr
Several methods are available for estimating the standard deviation. See \code{\link{sd.xbar}} and \code{\link{sd.xbar.one}} for, respectively, the grouped data case and the individual observations case.
}
\item{head.start}{The initializing value for the above-target and
below-target cumulative sums, measured in standard errors of the summary
statistics. Use zero for the traditional Cusum chart, or a positive
value less than the \code{decision.interval} for a Fast Initial Response.}
\item{decision.interval}{A numeric value specifying the number of standard errors of the summary statistics at which the cumulative sum is out of control.}
\item{se.shift}{The amount of shift to detect in the process, measured in standard errors of the summary statistics.}
\item{data.name}{a string specifying the name of the variable which appears on the plots. If not provided is taken from the object given as data.}
\item{labels}{a character vector of labels for each group.}
\item{newdata}{a data frame, matrix or vector, as for the \code{data} argument, providing further data to plot but not included in the computations.}
\item{newsizes}{a vector as for the \code{sizes} argument providing further data sizes to plot but not included in the computations.}
\item{newlabels}{a character vector of labels for each new group defined in the argument \code{newdata}.}
\item{plot}{logical. If \code{TRUE} a Cusum chart is plotted.}
\item{add.stats}{a logical value indicating whether statistics and other information should be printed at the bottom of the chart.}
\item{chart.all}{a logical value indicating whether both statistics for \code{data} and for \code{newdata} (if given) should be plotted.}
\item{label.bounds}{a character vector specifying the labels for the the decision interval boundaries.}
\item{title}{a string giving the label for the main title.}
\item{xlab}{a string giving the label for the x-axis.}
\item{ylab}{a string giving the label for the y-axis.}
\item{ylim}{a numeric vector specifying the limits for the y-axis.}
\item{axes.las}{numeric in \{0,1,2,3\} specifying the style of axis labels. See \code{help(par)}.}
\item{digits}{the number of significant digits to use.}
\item{restore.par}{a logical value indicating whether the previous \code{par} settings must be restored. If you need to add points, lines, etc. to a control chart set this to \code{FALSE}.}
\item{object}{an object of class 'cusum.qcc'.}
\item{x}{an object of class 'cusum.qcc'.}
\item{\dots}{additional arguments to be passed to the generic function.}
}
\details{Cusum charts display how the group summary statistics deviate above or below the process center or target value, relative to the standard errors of the summary statistics. Useful to detect small and permanent variation on the mean of the process.
}
\value{Returns an object of class 'cusum.qcc'.}
\references{
Mason, R.L. and Young, J.C. (2002) \emph{Multivariate Statistical Process Control with Industrial Applications}, SIAM. \cr
Montgomery, D.C. (2005) \emph{Introduction to Statistical Quality Control}, 5th ed. New York: John Wiley & Sons. \cr
Ryan, T. P. (2000), \emph{Statistical Methods for Quality Improvement}, 2nd ed. New York: John Wiley & Sons, Inc. \cr
Scrucca, L. (2004). qcc: an R package for quality control charting and statistical process control. \emph{R News} 4/1, 11-17. \cr
Wetherill, G.B. and Brown, D.W. (1991) \emph{Statistical Process Control}. New York: Chapman & Hall.
}
\author{Luca Scrucca \email{luca@stat.unipg.it}}
%\note{ ~~further notes~~ }
\seealso{\code{\link{qcc}}, \code{\link{ewma}}}
\examples{
##
## Grouped-data
##
data(pistonrings)
attach(pistonrings)
diameter <- qcc.groups(diameter, sample)
q <- cusum(diameter[1:25,], decision.interval = 4, se.shift = 1)
summary(q)
q <- cusum(diameter[1:25,], newdata=diameter[26:40,])
summary(q)
plot(q, chart.all=FALSE)
detach(pistonrings)
}
\keyword{htest}
\keyword{hplot}
|
## vim:textwidth=80:expandtab:shiftwidth=2:softtabstop=2
## References used in this file:
##
## 1. Meeus, Jean, 1982. Astronomical formulae for calculators. Willmann-Bell. Richmond VA, USA. 201 pages.
## 2. Meeus, Jean, 1991. Astronomical algorithms. Willmann-Bell, Richmond VA, USA. 429 pages.
library(oce)
context("Astronomical")
RPD <- atan2(1, 1) / 45 # radians per degree
test_that("Times", {
## [1] chapter 3 page 24-25
## FIXME: previously this had the unintelligble tz="ET" but it is *exact* as is
t <- ISOdatetime(1957, 10, 4, hour=0, min=0, sec=0, tz="UTC")+0.81*86400
expect_equal(julianDay(t), 2436116.31, tolerance=0.01, scale=1)
## [1] example 15.a
t <- ISOdatetime(1978, 11, 13, 4, 35, 0, tz="UTC")
jd <- julianDay(t)
jca <- julianCenturyAnomaly(jd)
expect_equal(jd, 2443825.69, tolerance=0.01, scale=1)
expect_equal(jca, 0.788656810, tolerance=1e-7, scale=1) # fractional error 3e-8
## [1] page 40
t <- ISOdatetime(1978, 11, 13, 0, 0, 0, tz="UTC")
expect_equal(siderealTime(t), 3.4503696, tolerance=0.0000001)
t <- ISOdatetime(1978, 11, 13, 4, 34, 0, tz="UTC")
expect_equal(siderealTime(t), 8.0295394, tolerance=0.0000001)
})
test_that("Moon", {
## [2] example 45.a (pages 312-313)
## Do not check too many digits, because the code does not have all terms
## in formulae. (Note: this also tests eclipticalToEquatorial)
t <- ISOdatetime(1992, 04, 12, 0, 0, 0, tz="UTC")
m <- moonAngle(t, 0, 0) # lat and lon arbitrary
expect_less_than(abs(m$lambda - 133.162659), 0.02)
expect_less_than(abs(m$beta - -3.229127), 0.001)
##expect_equal(abs(m$obliquity - 23.440636) < 0.001)
expect_less_than(abs(m$rightAscension - 134.688473), 0.02)
expect_less_than(abs(m$declination - 13.768366), 0.01)
expect_less_than(abs(m$diameter - 0.991990), 0.0001)
expect_less_than(abs(m$distance - 368405.6), 20)
## moon illuminated fraction [1] ex 31.b page 156
illfrac <- (1 + cos(RPD * 105.8493)) / 2
expect_equal(moonAngle(ISOdatetime(1979,12,25,0,0,0,tz="UTC"),0,0)$illuminatedFraction,illfrac,tolerance=0.001)
})
| /tests/testthat/test_astronomical.R | no_license | marie-geissler/oce | R | false | false | 2,326 | r | ## vim:textwidth=80:expandtab:shiftwidth=2:softtabstop=2
## References used in this file:
##
## 1. Meeus, Jean, 1982. Astronomical formulae for calculators. Willmann-Bell. Richmond VA, USA. 201 pages.
## 2. Meeus, Jean, 1991. Astronomical algorithms. Willmann-Bell, Richmond VA, USA. 429 pages.
library(oce)
context("Astronomical")
RPD <- atan2(1, 1) / 45 # radians per degree
test_that("Times", {
## [1] chapter 3 page 24-25
## FIXME: previously this had the unintelligble tz="ET" but it is *exact* as is
t <- ISOdatetime(1957, 10, 4, hour=0, min=0, sec=0, tz="UTC")+0.81*86400
expect_equal(julianDay(t), 2436116.31, tolerance=0.01, scale=1)
## [1] example 15.a
t <- ISOdatetime(1978, 11, 13, 4, 35, 0, tz="UTC")
jd <- julianDay(t)
jca <- julianCenturyAnomaly(jd)
expect_equal(jd, 2443825.69, tolerance=0.01, scale=1)
expect_equal(jca, 0.788656810, tolerance=1e-7, scale=1) # fractional error 3e-8
## [1] page 40
t <- ISOdatetime(1978, 11, 13, 0, 0, 0, tz="UTC")
expect_equal(siderealTime(t), 3.4503696, tolerance=0.0000001)
t <- ISOdatetime(1978, 11, 13, 4, 34, 0, tz="UTC")
expect_equal(siderealTime(t), 8.0295394, tolerance=0.0000001)
})
test_that("Moon", {
## [2] example 45.a (pages 312-313)
## Do not check too many digits, because the code does not have all terms
## in formulae. (Note: this also tests eclipticalToEquatorial)
t <- ISOdatetime(1992, 04, 12, 0, 0, 0, tz="UTC")
m <- moonAngle(t, 0, 0) # lat and lon arbitrary
expect_less_than(abs(m$lambda - 133.162659), 0.02)
expect_less_than(abs(m$beta - -3.229127), 0.001)
##expect_equal(abs(m$obliquity - 23.440636) < 0.001)
expect_less_than(abs(m$rightAscension - 134.688473), 0.02)
expect_less_than(abs(m$declination - 13.768366), 0.01)
expect_less_than(abs(m$diameter - 0.991990), 0.0001)
expect_less_than(abs(m$distance - 368405.6), 20)
## moon illuminated fraction [1] ex 31.b page 156
illfrac <- (1 + cos(RPD * 105.8493)) / 2
expect_equal(moonAngle(ISOdatetime(1979,12,25,0,0,0,tz="UTC"),0,0)$illuminatedFraction,illfrac,tolerance=0.001)
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/skitools.R
\name{mafcount}
\alias{mafcount}
\title{mafcount}
\usage{
mafcount(tum.bam, norm.bam = NULL, maf, chunk.size = 100, verbose = T,
mc.cores = 1, ...)
}
\description{
Returns base counts for reference and alternative allele for an input tum and norm bam and maf data frame or GRAnges specifying substitutions
}
\details{
maf is a single width GRanges describing variants and field 'ref' (or 'Reference_Allele'), 'alt' (or 'Tum_Seq_Allele1') specifying reference and alt allele.
maf is assumed to have width 1 and strand is ignored.
}
| /man/mafcount.Rd | no_license | juliebehr/skitools | R | false | true | 622 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/skitools.R
\name{mafcount}
\alias{mafcount}
\title{mafcount}
\usage{
mafcount(tum.bam, norm.bam = NULL, maf, chunk.size = 100, verbose = T,
mc.cores = 1, ...)
}
\description{
Returns base counts for reference and alternative allele for an input tum and norm bam and maf data frame or GRAnges specifying substitutions
}
\details{
maf is a single width GRanges describing variants and field 'ref' (or 'Reference_Allele'), 'alt' (or 'Tum_Seq_Allele1') specifying reference and alt allele.
maf is assumed to have width 1 and strand is ignored.
}
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\docType{class}
\name{mongo}
\alias{mongo}
\title{The mongo (database connection) class}
\description{
Objects of class "mongo" are used to connect to a MongoDB server and to
perform database operations on that server.
}
\details{
mongo objects have "mongo" as their class and contain an externally managed
pointer to the connection data. This pointer is stored in the "mongo"
attribute of the object.
Note that the members of the mongo object only reflect\cr the initial
parameters of \code{\link{mongo.create}()}. Only the external data actually
changes if, for example, mongo.timeout is called after the initial call to
\code{mongo.create}.
}
\examples{
mongo <- mongo.create()
if (mongo.is.connected(mongo)) {
buf <- mongo.bson.buffer.create()
mongo.bson.buffer.append(buf, "name", "Joe")
mongo.bson.buffer.append(buf, "age", 22L)
b <- mongo.bson.from.buffer(buf)
mongo.insert(mongo, "test.people", b)
}
}
\seealso{
\code{\link{mongo.create}},\cr \code{\link{mongo.is.connected}},\cr
\code{\link{mongo.get.databases}},\cr
\code{\link{mongo.get.database.collections}},\cr
\code{\link{mongo.insert}},\cr \code{\link{mongo.find.one}},\cr
\code{\link{mongo.find}},\cr \code{\link{mongo.update}},\cr
\code{\link{mongo.remove}},\cr \code{\link{mongo.drop}},\cr
\code{\link{mongo.drop.database}}\cr \link{mongo.gridfs}.
}
| /man/mongo.Rd | no_license | agnaldodasilva/rmongodb | R | false | false | 1,394 | rd | % Generated by roxygen2 (4.0.2): do not edit by hand
\docType{class}
\name{mongo}
\alias{mongo}
\title{The mongo (database connection) class}
\description{
Objects of class "mongo" are used to connect to a MongoDB server and to
perform database operations on that server.
}
\details{
mongo objects have "mongo" as their class and contain an externally managed
pointer to the connection data. This pointer is stored in the "mongo"
attribute of the object.
Note that the members of the mongo object only reflect\cr the initial
parameters of \code{\link{mongo.create}()}. Only the external data actually
changes if, for example, mongo.timeout is called after the initial call to
\code{mongo.create}.
}
\examples{
mongo <- mongo.create()
if (mongo.is.connected(mongo)) {
buf <- mongo.bson.buffer.create()
mongo.bson.buffer.append(buf, "name", "Joe")
mongo.bson.buffer.append(buf, "age", 22L)
b <- mongo.bson.from.buffer(buf)
mongo.insert(mongo, "test.people", b)
}
}
\seealso{
\code{\link{mongo.create}},\cr \code{\link{mongo.is.connected}},\cr
\code{\link{mongo.get.databases}},\cr
\code{\link{mongo.get.database.collections}},\cr
\code{\link{mongo.insert}},\cr \code{\link{mongo.find.one}},\cr
\code{\link{mongo.find}},\cr \code{\link{mongo.update}},\cr
\code{\link{mongo.remove}},\cr \code{\link{mongo.drop}},\cr
\code{\link{mongo.drop.database}}\cr \link{mongo.gridfs}.
}
|
#' function for determining the path to the db used for storage
#' @param path path to database
db_path <- function(path=""){
if ( path != "" ) {
return(path)
}else{
ifelse(
Sys.getenv("ukm_bot_data_path")=="",
path.expand("~/.unikonstanzmensabot_data.sqlite3"),
Sys.getenv("ukm_bot_data_path")
)
}
}
#' function for connectiong to db
#' @param path path to database
db_connect <- function(path=""){
RSQLite::dbConnect( RSQLite::SQLite(), db_path( path ) )
}
#' function for disconnecting from db
#' @param db connection to db
db_disconnect <- function(db){
RSQLite::dbDisconnect(db)
}
#' function for ensuring that a particular table exists in db
db_ensure_table_exists <- function(table="", path=""){
# check if info on table exists
stopifnot( !is.null(storage$tables[[table]]) )
# connect to db
db <- db_connect(path)
# create table if not existent
if( !(table %in% RSQLite::dbListTables(db)) ){
create_table <- storage$tables[[table]]
res <- RSQLite::dbGetQuery(db, create_table)
if (is.null(res) ) res <- TRUE
}else{
res <- TRUE
}
db_disconnect(db)
return(res)
}
#' function for retrieving data from requests table in db
db_get_request_data <- function(date=Sys.Date(), status=200, loc="mensa_giessberg", lang="de"){
db <- db_connect()
sql_innize <- function(x){paste0("(", paste0("'",x ,"'", collapse = ", "), ")")}
sql <- paste0(
"SELECT * FROM requests WHERE ",
" \n status IN ", sql_innize(status),
" AND\n date IN ", sql_innize(date),
" AND\n loc IN ", sql_innize(loc),
" AND\n lang IN ", sql_innize(lang[1])
)
res <- RSQLite::dbGetQuery(db, sql)
db_disconnect(db)
return(res)
}
#' function for retrieving data from dishes table in db
db_get_dish_data <- function(date=Sys.Date(), loc="mensa_giessberg", lang="de"){
db <- db_connect()
sql_innize <- function(x){paste0("(", paste0("'",x ,"'", collapse = ", "), ")")}
sql <- paste0(
"SELECT * FROM dishes WHERE ",
" \n loc IN ", sql_innize(loc),
" AND\n lang IN ", sql_innize(lang),
" AND\n date IN ", sql_innize(date)
)
res <- RSQLite::dbGetQuery(db, sql)
db_disconnect(db)
return(res)
}
#' function for repairing encodings
function(text){
grep()
}
#' function for retrieving data from tweets table in db
db_get_tweet_data <- function(date=Sys.Date(), loc="mensa_giessberg", lang="de"){
db <- db_connect()
sql_innize <- function(x){paste0("(", paste0("'",x ,"'", collapse = ", "), ")")}
sql <- paste0(
"SELECT * FROM tweets WHERE ",
" \n loc IN ", sql_innize(loc),
" AND\n lang IN ", sql_innize(lang),
" AND\n date IN ", sql_innize(date)
)
res <- RSQLite::dbGetQuery(db, sql)
db_disconnect(db)
return(res)
}
| /R/database.R | no_license | petermeissner/unikonstanzmensabot | R | false | false | 2,894 | r | #' function for determining the path to the db used for storage
#' @param path path to database
db_path <- function(path=""){
if ( path != "" ) {
return(path)
}else{
ifelse(
Sys.getenv("ukm_bot_data_path")=="",
path.expand("~/.unikonstanzmensabot_data.sqlite3"),
Sys.getenv("ukm_bot_data_path")
)
}
}
#' function for connectiong to db
#' @param path path to database
db_connect <- function(path=""){
RSQLite::dbConnect( RSQLite::SQLite(), db_path( path ) )
}
#' function for disconnecting from db
#' @param db connection to db
db_disconnect <- function(db){
RSQLite::dbDisconnect(db)
}
#' function for ensuring that a particular table exists in db
db_ensure_table_exists <- function(table="", path=""){
# check if info on table exists
stopifnot( !is.null(storage$tables[[table]]) )
# connect to db
db <- db_connect(path)
# create table if not existent
if( !(table %in% RSQLite::dbListTables(db)) ){
create_table <- storage$tables[[table]]
res <- RSQLite::dbGetQuery(db, create_table)
if (is.null(res) ) res <- TRUE
}else{
res <- TRUE
}
db_disconnect(db)
return(res)
}
#' function for retrieving data from requests table in db
db_get_request_data <- function(date=Sys.Date(), status=200, loc="mensa_giessberg", lang="de"){
db <- db_connect()
sql_innize <- function(x){paste0("(", paste0("'",x ,"'", collapse = ", "), ")")}
sql <- paste0(
"SELECT * FROM requests WHERE ",
" \n status IN ", sql_innize(status),
" AND\n date IN ", sql_innize(date),
" AND\n loc IN ", sql_innize(loc),
" AND\n lang IN ", sql_innize(lang[1])
)
res <- RSQLite::dbGetQuery(db, sql)
db_disconnect(db)
return(res)
}
#' function for retrieving data from dishes table in db
db_get_dish_data <- function(date=Sys.Date(), loc="mensa_giessberg", lang="de"){
db <- db_connect()
sql_innize <- function(x){paste0("(", paste0("'",x ,"'", collapse = ", "), ")")}
sql <- paste0(
"SELECT * FROM dishes WHERE ",
" \n loc IN ", sql_innize(loc),
" AND\n lang IN ", sql_innize(lang),
" AND\n date IN ", sql_innize(date)
)
res <- RSQLite::dbGetQuery(db, sql)
db_disconnect(db)
return(res)
}
#' function for repairing encodings
function(text){
grep()
}
#' function for retrieving data from tweets table in db
db_get_tweet_data <- function(date=Sys.Date(), loc="mensa_giessberg", lang="de"){
db <- db_connect()
sql_innize <- function(x){paste0("(", paste0("'",x ,"'", collapse = ", "), ")")}
sql <- paste0(
"SELECT * FROM tweets WHERE ",
" \n loc IN ", sql_innize(loc),
" AND\n lang IN ", sql_innize(lang),
" AND\n date IN ", sql_innize(date)
)
res <- RSQLite::dbGetQuery(db, sql)
db_disconnect(db)
return(res)
}
|
vars <- setdiff(names(datasets::iris), "Species")
#' The application User-Interface
#'
#' @param request Internal parameter for `{shiny}`.
#' DO NOT REMOVE.
#' @import shiny
#' @noRd
app_ui <- function(request) {
tagList(
# Leave this function for adding external resources
golem_add_external_resources(),
# List the first level UI elements here
# k-means only works with numerical variables,
# so don't give the user the option to select
# a categorical variable
pageWithSidebar(
headerPanel('Iris k-means clustering'),
sidebarPanel(
selectInput('xcol', 'X Variable', vars),
selectInput('ycol', 'Y Variable', vars, selected = vars[[2]]),
numericInput('clusters', 'Cluster count', 3, min = 1, max = 9)
),
mainPanel(
plotOutput('plot1')
)
)
)
}
#' Add external Resources to the Application
#'
#' This function is internally used to add external
#' resources inside the Shiny application.
#'
#' @import shiny
#' @importFrom golem add_resource_path activate_js favicon bundle_resources
#' @noRd
golem_add_external_resources <- function(){
add_resource_path(
'www', app_sys('app/www')
)
tags$head(
favicon(),
bundle_resources(
path = app_sys('app/www'),
app_title = 'dshiny'
)
# Add here other external resources
# for example, you can add shinyalert::useShinyalert()
)
}
| /dshiny/R/app_ui.R | permissive | andrealvesambrosio/shiny-deploy-exemplo | R | false | false | 1,429 | r | vars <- setdiff(names(datasets::iris), "Species")
#' The application User-Interface
#'
#' @param request Internal parameter for `{shiny}`.
#' DO NOT REMOVE.
#' @import shiny
#' @noRd
app_ui <- function(request) {
tagList(
# Leave this function for adding external resources
golem_add_external_resources(),
# List the first level UI elements here
# k-means only works with numerical variables,
# so don't give the user the option to select
# a categorical variable
pageWithSidebar(
headerPanel('Iris k-means clustering'),
sidebarPanel(
selectInput('xcol', 'X Variable', vars),
selectInput('ycol', 'Y Variable', vars, selected = vars[[2]]),
numericInput('clusters', 'Cluster count', 3, min = 1, max = 9)
),
mainPanel(
plotOutput('plot1')
)
)
)
}
#' Add external Resources to the Application
#'
#' This function is internally used to add external
#' resources inside the Shiny application.
#'
#' @import shiny
#' @importFrom golem add_resource_path activate_js favicon bundle_resources
#' @noRd
golem_add_external_resources <- function(){
add_resource_path(
'www', app_sys('app/www')
)
tags$head(
favicon(),
bundle_resources(
path = app_sys('app/www'),
app_title = 'dshiny'
)
# Add here other external resources
# for example, you can add shinyalert::useShinyalert()
)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MeasEquiv_EffectSize_Base.R
\name{colSD}
\alias{colSD}
\title{Standard deviations of columns}
\usage{
colSD(x, ...)
}
\arguments{
\item{x}{is a matrix or data frame for which we want to obtain column sds
\@param ... are other arguments to be passed to \code{sd}, such as
\code{na.rm}}
}
\value{
A vector of standard deviations by column
}
\description{
\code{colSD} computes standard deviations of columns.
}
\keyword{internal}
| /man/colSD.Rd | no_license | cran/dmacs | R | false | true | 526 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MeasEquiv_EffectSize_Base.R
\name{colSD}
\alias{colSD}
\title{Standard deviations of columns}
\usage{
colSD(x, ...)
}
\arguments{
\item{x}{is a matrix or data frame for which we want to obtain column sds
\@param ... are other arguments to be passed to \code{sd}, such as
\code{na.rm}}
}
\value{
A vector of standard deviations by column
}
\description{
\code{colSD} computes standard deviations of columns.
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nn-loss.R
\name{nn_multi_margin_loss}
\alias{nn_multi_margin_loss}
\title{Multi margin loss}
\usage{
nn_multi_margin_loss(p = 1, margin = 1, weight = NULL, reduction = "mean")
}
\arguments{
\item{p}{(int, optional): Has a default value of \eqn{1}. \eqn{1} and \eqn{2}
are the only supported values.}
\item{margin}{(float, optional): Has a default value of \eqn{1}.}
\item{weight}{(Tensor, optional): a manual rescaling weight given to each
class. If given, it has to be a Tensor of size \code{C}. Otherwise, it is
treated as if having all ones.}
\item{reduction}{(string, optional): Specifies the reduction to apply to the output:
\code{'none'} | \code{'mean'} | \code{'sum'}. \code{'none'}: no reduction will be applied,
\code{'mean'}: the sum of the output will be divided by the number of
elements in the output, \code{'sum'}: the output will be summed.}
}
\description{
Creates a criterion that optimizes a multi-class classification hinge
loss (margin-based loss) between input \eqn{x} (a 2D mini-batch \code{Tensor}) and
output \eqn{y} (which is a 1D tensor of target class indices,
\eqn{0 \leq y \leq \mbox{x.size}(1)-1}):
}
\details{
For each mini-batch sample, the loss in terms of the 1D input \eqn{x} and scalar
output \eqn{y} is:
\deqn{
\mbox{loss}(x, y) = \frac{\sum_i \max(0, \mbox{margin} - x[y] + x[i]))^p}{\mbox{x.size}(0)}
}
where \eqn{x \in \left\{0, \; \cdots , \; \mbox{x.size}(0) - 1\right\}}
and \eqn{i \neq y}.
Optionally, you can give non-equal weighting on the classes by passing
a 1D \code{weight} tensor into the constructor.
The loss function then becomes:
\deqn{
\mbox{loss}(x, y) = \frac{\sum_i \max(0, w[y] * (\mbox{margin} - x[y] + x[i]))^p)}{\mbox{x.size}(0)}
}
}
| /man/nn_multi_margin_loss.Rd | permissive | mlverse/torch | R | false | true | 1,786 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nn-loss.R
\name{nn_multi_margin_loss}
\alias{nn_multi_margin_loss}
\title{Multi margin loss}
\usage{
nn_multi_margin_loss(p = 1, margin = 1, weight = NULL, reduction = "mean")
}
\arguments{
\item{p}{(int, optional): Has a default value of \eqn{1}. \eqn{1} and \eqn{2}
are the only supported values.}
\item{margin}{(float, optional): Has a default value of \eqn{1}.}
\item{weight}{(Tensor, optional): a manual rescaling weight given to each
class. If given, it has to be a Tensor of size \code{C}. Otherwise, it is
treated as if having all ones.}
\item{reduction}{(string, optional): Specifies the reduction to apply to the output:
\code{'none'} | \code{'mean'} | \code{'sum'}. \code{'none'}: no reduction will be applied,
\code{'mean'}: the sum of the output will be divided by the number of
elements in the output, \code{'sum'}: the output will be summed.}
}
\description{
Creates a criterion that optimizes a multi-class classification hinge
loss (margin-based loss) between input \eqn{x} (a 2D mini-batch \code{Tensor}) and
output \eqn{y} (which is a 1D tensor of target class indices,
\eqn{0 \leq y \leq \mbox{x.size}(1)-1}):
}
\details{
For each mini-batch sample, the loss in terms of the 1D input \eqn{x} and scalar
output \eqn{y} is:
\deqn{
\mbox{loss}(x, y) = \frac{\sum_i \max(0, \mbox{margin} - x[y] + x[i]))^p}{\mbox{x.size}(0)}
}
where \eqn{x \in \left\{0, \; \cdots , \; \mbox{x.size}(0) - 1\right\}}
and \eqn{i \neq y}.
Optionally, you can give non-equal weighting on the classes by passing
a 1D \code{weight} tensor into the constructor.
The loss function then becomes:
\deqn{
\mbox{loss}(x, y) = \frac{\sum_i \max(0, w[y] * (\mbox{margin} - x[y] + x[i]))^p)}{\mbox{x.size}(0)}
}
}
|
#' BR+ or BRplus for multi-label Classification
#'
#' Create a BR+ classifier to predict multi-label data. This is a simple approach
#' that enables the binary classifiers to discover existing label dependency by
#' themselves. The main idea of BR+ is to increment the feature space of the
#' binary classifiers to let them discover existing label dependency by
#' themselves.
#'
#' This implementation has different strategy to predict the final set of labels
#' for unlabeled examples, as proposed in original paper.
#'
#' @family Transformation methods
#' @family Stacking methods
#' @param mdata A mldr dataset used to train the binary models.
#' @param base.algorithm A string with the name of the base algorithm. (Default:
#' \code{options("utiml.base.algorithm", "SVM")})
#' @param ... Others arguments passed to the base algorithm for all subproblems.
#' @param cores The number of cores to parallelize the training. Values higher
#' than 1 require the \pkg{parallel} package. (Default:
#' \code{options("utiml.cores", 1)})
#' @param seed An optional integer used to set the seed. This is useful when
#' the method is run in parallel. (Default: \code{options("utiml.seed", NA)})
#' @return An object of class \code{BRPmodel} containing the set of fitted
#' models, including:
#' \describe{
#' \item{freq}{The label frequencies to use with the 'Stat' strategy}
#' \item{initial}{The BR model to predict the values for the labels to
#' initial step}
#' \item{models}{A list of final models named by the label names.}
#' }
#' @references
#' Cherman, E. A., Metz, J., & Monard, M. C. (2012). Incorporating label
#' dependency into the binary relevance framework for multi-label
#' classification. Expert Systems with Applications, 39(2), 1647-1655.
#' @export
#'
#' @examples
#' # Use SVM as base algorithm
#' model <- brplus(toyml, "RANDOM")
#' pred <- predict(model, toyml)
#'
#' \donttest{
#' # Use Random Forest as base algorithm and 2 cores
#' model <- brplus(toyml, 'RF', cores = 2, seed = 123)
#' }
brplus <- function(mdata,
base.algorithm = getOption("utiml.base.algorithm", "SVM"),
..., cores = getOption("utiml.cores", 1),
seed = getOption("utiml.seed", NA)) {
# Validations
if (!is(mdata, "mldr")) {
stop("First argument must be an mldr object")
}
if (cores < 1) {
stop("Cores must be a positive value")
}
# BRplus Model class
brpmodel <- list(labels = rownames(mdata$labels), call = match.call())
freq <- mdata$labels$freq
names(freq) <- brpmodel$labels
brpmodel$freq <- sort(freq)
brpmodel$initial <- br(mdata, base.algorithm, ..., cores = cores, seed = seed)
labeldata <- as.data.frame(mdata$dataset[mdata$labels$index])
for (i in seq(ncol(labeldata))) {
labeldata[, i] <- factor(labeldata[, i], levels=c(0, 1))
}
labels <- utiml_rename(seq(mdata$measures$num.labels), brpmodel$labels)
brpmodel$models <- utiml_lapply(labels, function(li) {
basedata <- utiml_create_binary_data(mdata, brpmodel$labels[li],
labeldata[-li])
dataset <- utiml_prepare_data(basedata, "mldBRP", mdata$name, "brplus",
base.algorithm)
utiml_create_model(dataset, ...)
}, cores, seed)
class(brpmodel) <- "BRPmodel"
brpmodel
}
#' Predict Method for BR+ (brplus)
#'
#' This function predicts values based upon a model trained by \code{brplus}.
#'
#' The strategies of estimate the values of the new features are separated in
#' two groups:
#' \describe{
#' \item{No Update (\code{NU})}{This use the initial prediction of BR to all
#' labels. This name is because no modification is made to the initial
#' estimates of the augmented features during the prediction phase}
#' \item{With Update}{This strategy update the initial prediction in that the
#' final predict occurs. There are three possibilities to define the order of
#' label sequences:
#' \describe{
#' \item{Specific order (\code{Ord})}{The order is define by the user,
#' require a new argument called \code{order}.}
#' \item{Static order (\code{Stat})}{Use the frequency of single labels in
#' the training set to define the sequence, where the least frequent
#' labels are predicted first}
#' \item{Dinamic order (\code{Dyn})}{Takes into account the confidence of
#' the initial prediction for each independent single label, to define a
#' sequence, where the labels predicted with less confidence are updated
#' first.}
#' }
#' }
#' }
#'
#' @param object Object of class '\code{BRPmodel}'.
#' @param newdata An object containing the new input data. This must be a
#' matrix, data.frame or a mldr object.
#' @param strategy The strategy prefix to determine how to estimate the values
#' of the augmented features of unlabeled examples.
#'
#' The possible values are: \code{'Dyn'}, \code{'Stat'}, \code{'Ord'} or
#' \code{'NU'}. See the description for more details. (Default: \code{'Dyn'}).
#' @param order The label sequence used to update the initial labels results
#' based on the final results. This argument is used only when the
#' \code{strategy = 'Ord'} (Default: \code{list()})
#' @param probability Logical indicating whether class probabilities should be
#' returned. (Default: \code{getOption("utiml.use.probs", TRUE)})
#' @param ... Others arguments passed to the base algorithm prediction for all
#' subproblems.
#' @param cores The number of cores to parallelize the training. Values higher
#' than 1 require the \pkg{parallel} package. (Default:
#' \code{options("utiml.cores", 1)})
#' @param seed An optional integer used to set the seed. This is useful when
#' the method is run in parallel. (Default: \code{options("utiml.seed", NA)})
#' @return An object of type mlresult, based on the parameter probability.
#' @references
#' Cherman, E. A., Metz, J., & Monard, M. C. (2012). Incorporating label
#' dependency into the binary relevance framework for multi-label
#' classification. Expert Systems with Applications, 39(2), 1647-1655.
#' @seealso \code{\link[=brplus]{BR+}}
#' @export
#'
#' @examples
#' # Predict SVM scores
#' model <- brplus(toyml, "RANDOM")
#' pred <- predict(model, toyml)
#'
#' \donttest{
#' # Predict SVM bipartitions and change the method to use No Update strategy
#' pred <- predict(model, toyml, strategy = 'NU', probability = FALSE)
#'
#' # Predict using a random sequence to update the labels
#' labels <- sample(rownames(toyml$labels))
#' pred <- predict(model, toyml, strategy = 'Ord', order = labels)
#'
#' # Passing a specif parameter for SVM predict method
#' pred <- predict(model, toyml, na.action = na.fail)
#' }
predict.BRPmodel <- function(object, newdata,
strategy = c("Dyn", "Stat", "Ord", "NU"),
order = list(),
probability = getOption("utiml.use.probs", TRUE),
..., cores = getOption("utiml.cores", 1),
seed = getOption("utiml.seed", NA)) {
# Validations
if (!is(object, "BRPmodel")) {
stop("First argument must be an BRPmodel object")
}
strategy <- match.arg(strategy)
labels <- object$labels
if (strategy == "Ord") {
if (!utiml_is_equal_sets(order, labels)) {
stop("Invalid order (all labels must be on the chain)")
}
}
if (cores < 1) {
stop("Cores must be a positive value")
}
if (!anyNA(seed)) {
set.seed(seed)
}
newdata <- utiml_newdata(newdata)
initial.preds <- predict.BRmodel(object$initial, newdata, probability=FALSE,
..., cores=cores, seed=seed)
labeldata <- as.data.frame(as.bipartition(initial.preds))
for (i in seq(ncol(labeldata))) {
labeldata[, i] <- factor(labeldata[, i], levels=c(0, 1))
}
if (strategy == "NU") {
indices <- utiml_rename(seq_along(labels), labels)
predictions <- utiml_lapply(indices, function(li) {
utiml_predict_binary_model(object$models[[li]],
cbind(newdata, labeldata[, -li]), ...)
}, cores, seed)
}
else {
order <- switch (strategy,
Dyn = names(sort(apply(as.probability(initial.preds), 2, mean))),
Stat = names(object$freq),
Ord = order
)
predictions <- list()
for (labelname in order) {
other.labels <- !labels %in% labelname
model <- object$models[[labelname]]
data <- cbind(newdata, labeldata[, other.labels, drop = FALSE])
predictions[[labelname]] <- utiml_predict_binary_model(model, data, ...)
labeldata[, labelname] <- factor(predictions[[labelname]]$bipartition,
levels=c(0, 1))
}
}
utiml_predict(predictions[labels], probability)
}
#' Print BRP model
#' @param x The brp model
#' @param ... ignored
#'
#' @return No return value, called for print model's detail
#'
#' @export
print.BRPmodel <- function(x, ...) {
cat("Classifier BRplus (also called BR+)\n\nCall:\n")
print(x$call)
cat("\n", length(x$models), "Models (labels):\n")
print(names(x$models))
}
| /R/method_brplus.R | no_license | cran/utiml | R | false | false | 9,165 | r | #' BR+ or BRplus for multi-label Classification
#'
#' Create a BR+ classifier to predict multi-label data. This is a simple approach
#' that enables the binary classifiers to discover existing label dependency by
#' themselves. The main idea of BR+ is to increment the feature space of the
#' binary classifiers to let them discover existing label dependency by
#' themselves.
#'
#' This implementation has different strategy to predict the final set of labels
#' for unlabeled examples, as proposed in original paper.
#'
#' @family Transformation methods
#' @family Stacking methods
#' @param mdata A mldr dataset used to train the binary models.
#' @param base.algorithm A string with the name of the base algorithm. (Default:
#' \code{options("utiml.base.algorithm", "SVM")})
#' @param ... Others arguments passed to the base algorithm for all subproblems.
#' @param cores The number of cores to parallelize the training. Values higher
#' than 1 require the \pkg{parallel} package. (Default:
#' \code{options("utiml.cores", 1)})
#' @param seed An optional integer used to set the seed. This is useful when
#' the method is run in parallel. (Default: \code{options("utiml.seed", NA)})
#' @return An object of class \code{BRPmodel} containing the set of fitted
#' models, including:
#' \describe{
#' \item{freq}{The label frequencies to use with the 'Stat' strategy}
#' \item{initial}{The BR model to predict the values for the labels to
#' initial step}
#' \item{models}{A list of final models named by the label names.}
#' }
#' @references
#' Cherman, E. A., Metz, J., & Monard, M. C. (2012). Incorporating label
#' dependency into the binary relevance framework for multi-label
#' classification. Expert Systems with Applications, 39(2), 1647-1655.
#' @export
#'
#' @examples
#' # Use SVM as base algorithm
#' model <- brplus(toyml, "RANDOM")
#' pred <- predict(model, toyml)
#'
#' \donttest{
#' # Use Random Forest as base algorithm and 2 cores
#' model <- brplus(toyml, 'RF', cores = 2, seed = 123)
#' }
brplus <- function(mdata,
base.algorithm = getOption("utiml.base.algorithm", "SVM"),
..., cores = getOption("utiml.cores", 1),
seed = getOption("utiml.seed", NA)) {
# Validations
if (!is(mdata, "mldr")) {
stop("First argument must be an mldr object")
}
if (cores < 1) {
stop("Cores must be a positive value")
}
# BRplus Model class
brpmodel <- list(labels = rownames(mdata$labels), call = match.call())
freq <- mdata$labels$freq
names(freq) <- brpmodel$labels
brpmodel$freq <- sort(freq)
brpmodel$initial <- br(mdata, base.algorithm, ..., cores = cores, seed = seed)
labeldata <- as.data.frame(mdata$dataset[mdata$labels$index])
for (i in seq(ncol(labeldata))) {
labeldata[, i] <- factor(labeldata[, i], levels=c(0, 1))
}
labels <- utiml_rename(seq(mdata$measures$num.labels), brpmodel$labels)
brpmodel$models <- utiml_lapply(labels, function(li) {
basedata <- utiml_create_binary_data(mdata, brpmodel$labels[li],
labeldata[-li])
dataset <- utiml_prepare_data(basedata, "mldBRP", mdata$name, "brplus",
base.algorithm)
utiml_create_model(dataset, ...)
}, cores, seed)
class(brpmodel) <- "BRPmodel"
brpmodel
}
#' Predict Method for BR+ (brplus)
#'
#' This function predicts values based upon a model trained by \code{brplus}.
#'
#' The strategies of estimate the values of the new features are separated in
#' two groups:
#' \describe{
#' \item{No Update (\code{NU})}{This use the initial prediction of BR to all
#' labels. This name is because no modification is made to the initial
#' estimates of the augmented features during the prediction phase}
#' \item{With Update}{This strategy update the initial prediction in that the
#' final predict occurs. There are three possibilities to define the order of
#' label sequences:
#' \describe{
#' \item{Specific order (\code{Ord})}{The order is define by the user,
#' require a new argument called \code{order}.}
#' \item{Static order (\code{Stat})}{Use the frequency of single labels in
#' the training set to define the sequence, where the least frequent
#' labels are predicted first}
#' \item{Dinamic order (\code{Dyn})}{Takes into account the confidence of
#' the initial prediction for each independent single label, to define a
#' sequence, where the labels predicted with less confidence are updated
#' first.}
#' }
#' }
#' }
#'
#' @param object Object of class '\code{BRPmodel}'.
#' @param newdata An object containing the new input data. This must be a
#' matrix, data.frame or a mldr object.
#' @param strategy The strategy prefix to determine how to estimate the values
#' of the augmented features of unlabeled examples.
#'
#' The possible values are: \code{'Dyn'}, \code{'Stat'}, \code{'Ord'} or
#' \code{'NU'}. See the description for more details. (Default: \code{'Dyn'}).
#' @param order The label sequence used to update the initial labels results
#' based on the final results. This argument is used only when the
#' \code{strategy = 'Ord'} (Default: \code{list()})
#' @param probability Logical indicating whether class probabilities should be
#' returned. (Default: \code{getOption("utiml.use.probs", TRUE)})
#' @param ... Others arguments passed to the base algorithm prediction for all
#' subproblems.
#' @param cores The number of cores to parallelize the training. Values higher
#' than 1 require the \pkg{parallel} package. (Default:
#' \code{options("utiml.cores", 1)})
#' @param seed An optional integer used to set the seed. This is useful when
#' the method is run in parallel. (Default: \code{options("utiml.seed", NA)})
#' @return An object of type mlresult, based on the parameter probability.
#' @references
#' Cherman, E. A., Metz, J., & Monard, M. C. (2012). Incorporating label
#' dependency into the binary relevance framework for multi-label
#' classification. Expert Systems with Applications, 39(2), 1647-1655.
#' @seealso \code{\link[=brplus]{BR+}}
#' @export
#'
#' @examples
#' # Predict SVM scores
#' model <- brplus(toyml, "RANDOM")
#' pred <- predict(model, toyml)
#'
#' \donttest{
#' # Predict SVM bipartitions and change the method to use No Update strategy
#' pred <- predict(model, toyml, strategy = 'NU', probability = FALSE)
#'
#' # Predict using a random sequence to update the labels
#' labels <- sample(rownames(toyml$labels))
#' pred <- predict(model, toyml, strategy = 'Ord', order = labels)
#'
#' # Passing a specif parameter for SVM predict method
#' pred <- predict(model, toyml, na.action = na.fail)
#' }
predict.BRPmodel <- function(object, newdata,
strategy = c("Dyn", "Stat", "Ord", "NU"),
order = list(),
probability = getOption("utiml.use.probs", TRUE),
..., cores = getOption("utiml.cores", 1),
seed = getOption("utiml.seed", NA)) {
# Validations
if (!is(object, "BRPmodel")) {
stop("First argument must be an BRPmodel object")
}
strategy <- match.arg(strategy)
labels <- object$labels
if (strategy == "Ord") {
if (!utiml_is_equal_sets(order, labels)) {
stop("Invalid order (all labels must be on the chain)")
}
}
if (cores < 1) {
stop("Cores must be a positive value")
}
if (!anyNA(seed)) {
set.seed(seed)
}
newdata <- utiml_newdata(newdata)
initial.preds <- predict.BRmodel(object$initial, newdata, probability=FALSE,
..., cores=cores, seed=seed)
labeldata <- as.data.frame(as.bipartition(initial.preds))
for (i in seq(ncol(labeldata))) {
labeldata[, i] <- factor(labeldata[, i], levels=c(0, 1))
}
if (strategy == "NU") {
indices <- utiml_rename(seq_along(labels), labels)
predictions <- utiml_lapply(indices, function(li) {
utiml_predict_binary_model(object$models[[li]],
cbind(newdata, labeldata[, -li]), ...)
}, cores, seed)
}
else {
order <- switch (strategy,
Dyn = names(sort(apply(as.probability(initial.preds), 2, mean))),
Stat = names(object$freq),
Ord = order
)
predictions <- list()
for (labelname in order) {
other.labels <- !labels %in% labelname
model <- object$models[[labelname]]
data <- cbind(newdata, labeldata[, other.labels, drop = FALSE])
predictions[[labelname]] <- utiml_predict_binary_model(model, data, ...)
labeldata[, labelname] <- factor(predictions[[labelname]]$bipartition,
levels=c(0, 1))
}
}
utiml_predict(predictions[labels], probability)
}
#' Print BRP model
#' @param x The brp model
#' @param ... ignored
#'
#' @return No return value, called for print model's detail
#'
#' @export
print.BRPmodel <- function(x, ...) {
cat("Classifier BRplus (also called BR+)\n\nCall:\n")
print(x$call)
cat("\n", length(x$models), "Models (labels):\n")
print(names(x$models))
}
|
## This function takes a string of chromosome as input. Calls the decode function which decodes the schedule.
## From the Schedule it calculates its objective function
## Since objective is minimization it returns a 1/avg value as GA by nature is maximization problem.
fitness<-function(string)
{
shedule=DecodeSeq_Single(string)
late=shedule$Lateness
Tardiness = c()
n=length(string)
## loop to calculate tardiness from lateness
for(i in 1:n)
{
if (late[i]< 0) {Tj = 0}
else { Tj = late_vect[i] }
Tardiness = c(Tardiness,late_vect[i])
}
c= sum(Tardiness)/n
d = 1/c
return(d)
}
| /fitness_average_Tardiness.R | no_license | ashudrift/scheduling-R-scripts | R | false | false | 672 | r | ## This function takes a string of chromosome as input. Calls the decode function which decodes the schedule.
## From the Schedule it calculates its objective function
## Since objective is minimization it returns a 1/avg value as GA by nature is maximization problem.
fitness<-function(string)
{
shedule=DecodeSeq_Single(string)
late=shedule$Lateness
Tardiness = c()
n=length(string)
## loop to calculate tardiness from lateness
for(i in 1:n)
{
if (late[i]< 0) {Tj = 0}
else { Tj = late_vect[i] }
Tardiness = c(Tardiness,late_vect[i])
}
c= sum(Tardiness)/n
d = 1/c
return(d)
}
|
#
# R ๋ฌธ์ฅ
#
5 + 8
3 + ( 4 * 5 )
a <- 10
print( a )
#
# ๋ณ์์ ์ฐ์ ์ฐ์ฐ
#
# ์ฐ์ ์ฐ์ฐ์
3 + 5 + 8
9 - 3
7 * 5
8 / 3
8 %% 3
2 ^ 3 # 2์ ์ธ์ ๊ณฑ
# ์ฐ์ ์ฐ์ฐ ํจ์
log( 10 ) + 5 # ๋ก๊ทธํจ์
log( 10, base = 2 )
sqrt( 25 ) # ์ ๊ณฑ๊ทผ
max( 5, 3, 2 ) # ๊ฐ์ฅ ํฐ ๊ฐ
min( 3, 9, 5 ) # ๊ฐ์ฅ ์์ ๊ฐ
abs( -10 ) # ์ ๋๊ฐ
factorial( 5 ) # ํฉํ ๋ฆฌ์ผ
sin( pi / 2 ) # ์ผ๊ฐํจ์
# ๋ณ์
a <- 10
b <- 20
c <- a + b
print( c )
# ๋ณ์ ๋ด์ฉ ํ์ธ
a <- 125
a
print( a )
# ๋ณ์๊ฐ ๋ณ๊ฒฝ
a <- 10
b <- 20
a + b
a <- "A"
a + b
# <- ๋์ = ์ฌ์ฉ
a = 10
b = 20
c = a + b
a
b
c
#
# ๋ฒกํฐ
#
# ๋ฒกํฐ ์์ฑ
x <- c( 1, 2, 3 ) # ์ซ์ํ ๋ฒกํฐ
y <- c( "a", "b", "c" ) # ๋ฌธ์ํ ๋ฒกํฐ
z <- c( TRUE, TRUE, FALSE, TRUE ) # ๋
ผ๋ฆฌํ ๋ฒกํฐ
x
y
z
# ๋ฒกํฐ๋ ๋์ผ์๋ฃํ์ผ๋ก๋ง ๊ตฌ์ฑ
w <- c( 1, 2, 3, "a", "b","c" )
w
# ์ฐ์์ ์ธ ์ซ์๋ก ๊ตฌ์ฑ๋ ๋ฒกํฐ ์์ฑ
v1 <- 50:90
v1
v2 <- c( 1, 2, 3, 50:90 )
v2
# ์ผ์ ๊ฐ๊ฒฉ์ ์ซ์๋ก ์ด๋ฃจ์ด์ง ๋ฒกํฐ ์์ฑ
v3 <- seq( 1, 101, 3 )
v3
v4 <- seq( 0.1, 1.0, 0.1 )
v4
# ๋ฐ๋ณต๋ ์ซ์๋ก ์ด๋ฃจ์ด์ง ๋ฒกํฐ ์์ฑ
v5 <- rep( 1, times = 5 )
v5
v6 <- rep( 1:5, times = 3 )
v6
v7 <- rep( c( 1, 5, 9 ), times = 3 )
v7
# ๋ฒกํฐ ์์๊ฐ์ ์ด๋ฆ ์ง์
score <- c( 90, 85, 70 )
score
names( score )
names( score ) <- c( "Hong", "Kim", "Nam" )
names( score )
score
# ๋ฒกํฐ์์ ์์๊ฐ ์ถ์ถ
d <- c( 1, 4, 3, 7, 8 )
d[ 1 ]
d[ 2 ]
d[ 3 ]
d[ 4 ]
d[ 5 ]
d[ 6 ]
# ๋ฒกํฐ์์ ์ฌ๋ฌ ๊ฐ์ ๊ฐ์ ํ๋ฒ์ ์ถ์ถ
d <- c( 1, 4, 3, 7, 8 )
d[ c( 1, 3, 5 ) ]
d[ 1:3 ]
d[ seq( 1, 5, 2 ) ]
d[ -2 ]
d[ -c( 3:5 ) ]
# ๋ฒกํฐ์์ ์ด๋ฆ์ผ๋ก ๊ฐ์ ์ถ์ถ
GNP <- c( 2090, 2450, 960 )
GNP
names( GNP ) <- c( "Korea", "Japan", "Nepal" )
GNP
GNP[ 1 ]
GNP[ "Korea" ]
GNP[ c( "Korea", "Nepal" ) ]
# ๋ฒกํฐ์ ์ ์ฅ๋ ์์๊ฐ ๋ณ๊ฒฝ
v1 <- c( 1, 5, 7, 8, 9 )
v1
v1[ 2 ] <- 3
v1
v1[ c( 1, 5 ) ] <- c( 10, 20 )
v1
# ๋ฒกํฐ ์ฐ์ฐ
d <- c( 1, 4, 3, 7, 8 )
2 * d
d - 5
3 * d + 4
# ๋ฒกํฐ์ ๋ฒกํฐ๊ฐ์ ์ฐ์ฐ
x <- c( 1, 2, 3 )
y <- c( 4, 5, 6 )
x + y
x * y
z <- x + y
z
# ๋ฒกํฐ์ ์ ์ฉ๊ฐ๋ฅํ ํจ์
d <- c( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 )
sum( d ) # ๋ฒกํฐ์ ํฌํจ๋ ๊ฐ๋ค์ ํฉ
sum( 2 * d )
length( d ) # ๋ฒกํฐ์ ํฌํจ๋ ๊ฐ๋ค์ ๊ฐ์(๊ธธ์ด)
mean( d[ 1:5 ] ) # ๋ฒกํฐ์ ํฌํจ๋ ๊ฐ๋ค์ ํ๊ท
mean( d )
median( d[ 1:5 ] ) # ๋ฒกํฐ์ ํฌํจ๋ ๊ฐ๋ค์ ์ค์๊ฐ
median( d )
max( d ) # ๋ฒกํฐ์ ํฌํจ๋ ๊ฐ๋ค์ ์ต๋๊ฐ
min( d ) # ๋ฒกํฐ์ ํฌํจ๋ ๊ฐ๋ค์ ์ต์๊ฐ
sort( d ) # ๋ฒกํฐ์ ํฌํจ๋ ๊ฐ๋ค์ ์ ๋ ฌ(์ค๋ฆ์ฐจ์์ด ๊ธฐ๋ณธ)
sort( d, decreasing = FALSE ) # ์ค๋ฆ์ฐจ์ ์ ๋ ฌ
sort( d, decreasing = TRUE ) # ๋ด๋ฆผ์ฐจ์ ์ ๋ ฌ
range( d ) # ๋ฒกํฐ์ ํฌํจ๋ ๊ฐ๋ค์ ๋ฒ์(์ต์๊ฐ~์ต๋๊ฐ)
var( d ) # ๋ฒกํฐ์ ํฌํจ๋ ๊ฐ๋ค์ ๋ถ์ฐ
sd( d ) # ๋ฒกํฐ์ ํฌํจ๋ ๊ฐ๋ค์ ํ์คํธ์ฐจ
v1 <- median( d )
v1
v2 <- sum( d ) / length( d )
v2
# ๋ฒกํฐ์ ๋
ผ๋ฆฌ์ฐ์ฐ์ ์ ์ฉ
d <- c( 1, 2, 3, 4, 5, 6, 7, 8, 9 )
d >= 5
d[ d > 5 ]
sum( d > 5 )
sum( d[ d > 5 ] )
d == 5
condi <- d > 5 & d < 8
condi
d[ condi ]
# ์ฌ๋ฌ ๊ฐ์ ๋ฒกํฐ๋ฅผ ํฉ์ณ ์๋ก์ด ๋ฒกํฐ ๋ง๋ค๊ธฐ
x <- c( 1, 2, 3 )
x
y <- c( 4, 5 )
y
c( x, y )
#
# ๋ฆฌ์คํธ(list)์ ํฉํฐ(factor)
#
# ๋ฆฌ์คํธ
ds <- c( 90, 85, 70, 84 )
my.info <- list( name = 'Hong', age = 60, status = TRUE, score = ds )
my.info
my.info[[ 1 ]]
my.info$name
my.info[[ 4 ]]
# ํฉํฐ(factor)
bt <- c( 'A', 'B', 'B', 'O', 'AB', 'A' )
bt.new <- factor( bt )
bt
bt.new
bt[ 5 ]
bt.new[ 5 ]
levels( bt.new )
as.integer( bt.new )
bt.new[ 7 ] <- 'B'
bt.new[ 8 ] <- 'C'
bt.new
| /02-Variable&Vector.R | no_license | wolee777/WorkR | R | false | false | 4,011 | r | #
# R ๋ฌธ์ฅ
#
5 + 8
3 + ( 4 * 5 )
a <- 10
print( a )
#
# ๋ณ์์ ์ฐ์ ์ฐ์ฐ
#
# ์ฐ์ ์ฐ์ฐ์
3 + 5 + 8
9 - 3
7 * 5
8 / 3
8 %% 3
2 ^ 3 # 2์ ์ธ์ ๊ณฑ
# ์ฐ์ ์ฐ์ฐ ํจ์
log( 10 ) + 5 # ๋ก๊ทธํจ์
log( 10, base = 2 )
sqrt( 25 ) # ์ ๊ณฑ๊ทผ
max( 5, 3, 2 ) # ๊ฐ์ฅ ํฐ ๊ฐ
min( 3, 9, 5 ) # ๊ฐ์ฅ ์์ ๊ฐ
abs( -10 ) # ์ ๋๊ฐ
factorial( 5 ) # ํฉํ ๋ฆฌ์ผ
sin( pi / 2 ) # ์ผ๊ฐํจ์
# ๋ณ์
a <- 10
b <- 20
c <- a + b
print( c )
# ๋ณ์ ๋ด์ฉ ํ์ธ
a <- 125
a
print( a )
# ๋ณ์๊ฐ ๋ณ๊ฒฝ
a <- 10
b <- 20
a + b
a <- "A"
a + b
# <- ๋์ = ์ฌ์ฉ
a = 10
b = 20
c = a + b
a
b
c
#
# ๋ฒกํฐ
#
# ๋ฒกํฐ ์์ฑ
x <- c( 1, 2, 3 ) # ์ซ์ํ ๋ฒกํฐ
y <- c( "a", "b", "c" ) # ๋ฌธ์ํ ๋ฒกํฐ
z <- c( TRUE, TRUE, FALSE, TRUE ) # ๋
ผ๋ฆฌํ ๋ฒกํฐ
x
y
z
# ๋ฒกํฐ๋ ๋์ผ์๋ฃํ์ผ๋ก๋ง ๊ตฌ์ฑ
w <- c( 1, 2, 3, "a", "b","c" )
w
# ์ฐ์์ ์ธ ์ซ์๋ก ๊ตฌ์ฑ๋ ๋ฒกํฐ ์์ฑ
v1 <- 50:90
v1
v2 <- c( 1, 2, 3, 50:90 )
v2
# ์ผ์ ๊ฐ๊ฒฉ์ ์ซ์๋ก ์ด๋ฃจ์ด์ง ๋ฒกํฐ ์์ฑ
v3 <- seq( 1, 101, 3 )
v3
v4 <- seq( 0.1, 1.0, 0.1 )
v4
# ๋ฐ๋ณต๋ ์ซ์๋ก ์ด๋ฃจ์ด์ง ๋ฒกํฐ ์์ฑ
v5 <- rep( 1, times = 5 )
v5
v6 <- rep( 1:5, times = 3 )
v6
v7 <- rep( c( 1, 5, 9 ), times = 3 )
v7
# ๋ฒกํฐ ์์๊ฐ์ ์ด๋ฆ ์ง์
score <- c( 90, 85, 70 )
score
names( score )
names( score ) <- c( "Hong", "Kim", "Nam" )
names( score )
score
# ๋ฒกํฐ์์ ์์๊ฐ ์ถ์ถ
d <- c( 1, 4, 3, 7, 8 )
d[ 1 ]
d[ 2 ]
d[ 3 ]
d[ 4 ]
d[ 5 ]
d[ 6 ]
# ๋ฒกํฐ์์ ์ฌ๋ฌ ๊ฐ์ ๊ฐ์ ํ๋ฒ์ ์ถ์ถ
d <- c( 1, 4, 3, 7, 8 )
d[ c( 1, 3, 5 ) ]
d[ 1:3 ]
d[ seq( 1, 5, 2 ) ]
d[ -2 ]
d[ -c( 3:5 ) ]
# ๋ฒกํฐ์์ ์ด๋ฆ์ผ๋ก ๊ฐ์ ์ถ์ถ
GNP <- c( 2090, 2450, 960 )
GNP
names( GNP ) <- c( "Korea", "Japan", "Nepal" )
GNP
GNP[ 1 ]
GNP[ "Korea" ]
GNP[ c( "Korea", "Nepal" ) ]
# ๋ฒกํฐ์ ์ ์ฅ๋ ์์๊ฐ ๋ณ๊ฒฝ
v1 <- c( 1, 5, 7, 8, 9 )
v1
v1[ 2 ] <- 3
v1
v1[ c( 1, 5 ) ] <- c( 10, 20 )
v1
# ๋ฒกํฐ ์ฐ์ฐ
d <- c( 1, 4, 3, 7, 8 )
2 * d
d - 5
3 * d + 4
# ๋ฒกํฐ์ ๋ฒกํฐ๊ฐ์ ์ฐ์ฐ
x <- c( 1, 2, 3 )
y <- c( 4, 5, 6 )
x + y
x * y
z <- x + y
z
# ๋ฒกํฐ์ ์ ์ฉ๊ฐ๋ฅํ ํจ์
d <- c( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 )
sum( d ) # ๋ฒกํฐ์ ํฌํจ๋ ๊ฐ๋ค์ ํฉ
sum( 2 * d )
length( d ) # ๋ฒกํฐ์ ํฌํจ๋ ๊ฐ๋ค์ ๊ฐ์(๊ธธ์ด)
mean( d[ 1:5 ] ) # ๋ฒกํฐ์ ํฌํจ๋ ๊ฐ๋ค์ ํ๊ท
mean( d )
median( d[ 1:5 ] ) # ๋ฒกํฐ์ ํฌํจ๋ ๊ฐ๋ค์ ์ค์๊ฐ
median( d )
max( d ) # ๋ฒกํฐ์ ํฌํจ๋ ๊ฐ๋ค์ ์ต๋๊ฐ
min( d ) # ๋ฒกํฐ์ ํฌํจ๋ ๊ฐ๋ค์ ์ต์๊ฐ
sort( d ) # ๋ฒกํฐ์ ํฌํจ๋ ๊ฐ๋ค์ ์ ๋ ฌ(์ค๋ฆ์ฐจ์์ด ๊ธฐ๋ณธ)
sort( d, decreasing = FALSE ) # ์ค๋ฆ์ฐจ์ ์ ๋ ฌ
sort( d, decreasing = TRUE ) # ๋ด๋ฆผ์ฐจ์ ์ ๋ ฌ
range( d ) # ๋ฒกํฐ์ ํฌํจ๋ ๊ฐ๋ค์ ๋ฒ์(์ต์๊ฐ~์ต๋๊ฐ)
var( d ) # ๋ฒกํฐ์ ํฌํจ๋ ๊ฐ๋ค์ ๋ถ์ฐ
sd( d ) # ๋ฒกํฐ์ ํฌํจ๋ ๊ฐ๋ค์ ํ์คํธ์ฐจ
v1 <- median( d )
v1
v2 <- sum( d ) / length( d )
v2
# ๋ฒกํฐ์ ๋
ผ๋ฆฌ์ฐ์ฐ์ ์ ์ฉ
d <- c( 1, 2, 3, 4, 5, 6, 7, 8, 9 )
d >= 5
d[ d > 5 ]
sum( d > 5 )
sum( d[ d > 5 ] )
d == 5
condi <- d > 5 & d < 8
condi
d[ condi ]
# ์ฌ๋ฌ ๊ฐ์ ๋ฒกํฐ๋ฅผ ํฉ์ณ ์๋ก์ด ๋ฒกํฐ ๋ง๋ค๊ธฐ
x <- c( 1, 2, 3 )
x
y <- c( 4, 5 )
y
c( x, y )
#
# ๋ฆฌ์คํธ(list)์ ํฉํฐ(factor)
#
# ๋ฆฌ์คํธ
ds <- c( 90, 85, 70, 84 )
my.info <- list( name = 'Hong', age = 60, status = TRUE, score = ds )
my.info
my.info[[ 1 ]]
my.info$name
my.info[[ 4 ]]
# ํฉํฐ(factor)
bt <- c( 'A', 'B', 'B', 'O', 'AB', 'A' )
bt.new <- factor( bt )
bt
bt.new
bt[ 5 ]
bt.new[ 5 ]
levels( bt.new )
as.integer( bt.new )
bt.new[ 7 ] <- 'B'
bt.new[ 8 ] <- 'C'
bt.new
|
##################################################
# Graphs for Database presentation
# Bridget Balkaran
# 4/4/16
##################################################
library(tidyverse)
library( magrittr)
dataPerYear <- read_csv("Data/data collected per year.csv")
dataPerYear %>%
head()
dataPerYear %>%
filter(Year %in% c(1992:2017)) %>%
ggplot(aes(Year)) +
geom_smooth(aes(y = Elementary, color = "Elementary")) +
geom_smooth(aes(y = Middle, color = "Middle")) +
geom_smooth(aes( y= High, color = "High")) +
ylab( "Number of Students Surveyed") +
scale_x_continuous(breaks = pretty(dataPerYear$Year, n = 10)) +
ggtitle("Trajectory of Data Growth 1992 - 2017")
| /Code/Graphs_for_DB_Presentation.R | no_license | bpowley/HealthyHeartsDB | R | false | false | 693 | r | ##################################################
# Graphs for Database presentation
# Bridget Balkaran
# 4/4/16
##################################################
library(tidyverse)
library( magrittr)
dataPerYear <- read_csv("Data/data collected per year.csv")
dataPerYear %>%
head()
dataPerYear %>%
filter(Year %in% c(1992:2017)) %>%
ggplot(aes(Year)) +
geom_smooth(aes(y = Elementary, color = "Elementary")) +
geom_smooth(aes(y = Middle, color = "Middle")) +
geom_smooth(aes( y= High, color = "High")) +
ylab( "Number of Students Surveyed") +
scale_x_continuous(breaks = pretty(dataPerYear$Year, n = 10)) +
ggtitle("Trajectory of Data Growth 1992 - 2017")
|
context("hmisc")
skip_if_not_installed("Hmisc")
test_that("tidy.rcorr", {
check_arguments(tidy.rcorr)
mat <- replicate(52, rnorm(100))
mat[sample(length(mat), 2000)] <- NA
colnames(mat) <- c(LETTERS, letters)
rc <- Hmisc::rcorr(mat)
td <- tidy(rc)
check_tidy_output(td)
check_dims(td, expected_cols = 5)
})
| /packrat/lib/x86_64-apple-darwin18.2.0/3.5.2/broom/tests/testthat/test-hmisc.R | no_license | teyden/asthma-research | R | false | false | 351 | r | context("hmisc")
skip_if_not_installed("Hmisc")
test_that("tidy.rcorr", {
check_arguments(tidy.rcorr)
mat <- replicate(52, rnorm(100))
mat[sample(length(mat), 2000)] <- NA
colnames(mat) <- c(LETTERS, letters)
rc <- Hmisc::rcorr(mat)
td <- tidy(rc)
check_tidy_output(td)
check_dims(td, expected_cols = 5)
})
|
# summ_distance() ---------------------------------------------------------
#' Summarize pair of distributions with distance
#'
#' This function computes distance between two distributions represented by
#' pdqr-functions. Here "distance" is used in a broad sense: a single
#' non-negative number representing how much two distributions differ from one
#' another. Bigger values indicate bigger difference. Zero value means that
#' input distributions are equivalent based on the method used (except method
#' "avgdist" which is almost always returns positive value). The notion of
#' "distance" is useful for doing statistical inference about similarity of two
#' groups of numbers.
#'
#' @param f A pdqr-function of any [type][meta_type()] and
#' [class][meta_class()].
#' @param g A pdqr-function of any type and class.
#' @param method Method for computing distance. Should be one of "KS", "totvar",
#' "compare", "wass", "cramer", "align", "avgdist", "entropy".
#'
#' @details Methods can be separated into three categories: probability based,
#' metric based, and entropy based.
#'
#' **Probability based** methods return a number between 0 and 1 which is
#' computed in the way that mostly based on probability:
#' - *Method "KS"* (short for Kolmogorov-Smirnov) computes the supremum of
#' absolute difference between p-functions corresponding to `f` and `g` (`|F -
#' G|`). Here "supremum" is meant to describe the fact that if input functions
#' have different [types][meta_type()], there can be no point at which "KS"
#' distance is achieved. Instead, there might be a sequence of points from left
#' to right with `|F - G|` values tending to the result (see Examples).
#' - *Method "totvar"* (short for "total variation") computes a biggest absolute
#' difference of probabilities for any subset of real line. In other words,
#' there is a set of points for "discrete" type and intervals for "continuous",
#' total probability of which under `f` and `g` differs the most. **Note** that
#' if `f` and `g` have different types, output is always 1. The set of interest
#' consists from all "x" values of "discrete" pdqr-function: probability under
#' "discrete" distribution is 1 and under "continuous" is 0.
#' - *Method "compare"* represents a value computed based on probabilities of
#' one distribution being bigger than the other (see [pdqr methods for "Ops"
#' group generic family][methods-group-generic] for more details on comparing
#' pdqr-functions). It is computed as
#' `2*max(P(F > G), P(F < G)) + 0.5*P(F = G) - 1` (here `P(F > G)` is basically
#' `summ_prob_true(f > g)`). This is maximum of two values (`P(F > G) + 0.5*P(F
#' = G)` and `P(F < G) + 0.5*P(F = G)`), normalized to return values from 0
#' to 1. Other way to look at this measure is that it computes (before
#' normalization) two [ROC AUC][summ_rocauc()] values with method `"expected"`
#' for two possible ordering (`f, g`, and `g, f`) and takes their maximum.
#'
#' **Metric based** methods compute "how far" two distributions are apart on the
#' real line:
#' - *Method "wass"* (short for "Wasserstein") computes a 1-Wasserstein
#' distance: "minimum cost of 'moving' one density into another", or "average
#' path density point should go while transforming from one into another". It is
#' computed as integral of `|F - G|` (absolute difference between p-functions).
#' If any of `f` and `g` has "continuous" type, [stats::integrate()] is used, so
#' relatively small numerical errors can happen.
#' - *Method "cramer"* computes Cramer distance: integral of `(F - G)^2`. This
#' somewhat relates to "wass" method as [variance][summ_var()] relates to [first
#' central absolute moment][summ_moment()]. Relatively small numerical errors
#' can happen.
#' - *Method "align"* computes an absolute value of shift `d` (possibly
#' negative) that should be added to `f` to achieve both `P(f+d >= g) >= 0.5`
#' and `P(f+d <= g) >= 0.5` (in other words, align `f+d` and `g`) as close as
#' reasonably possible. Solution is found numerically with [stats::uniroot()],
#' so relatively small numerical errors can happen. Also **note** that this
#' method is somewhat slow (compared to all others). To increase speed, use less
#' elements in ["x_tbl" metadata][meta_x_tbl()]. For example, with
#' [form_retype()] or smaller `n_grid` argument in [as_*()][as_p()] functions.
#' - *Method "avgdist"* computes average distance between sample values from
#' inputs. Basically, it is a deterministically computed approximation of
#' expected value of absolute difference between random variables, or in 'pdqr'
#' code: `summ_mean(abs(f - g))` (but computed without randomness). Computation
#' is done by approximating possibly present continuous pdqr-functions with
#' discrete ones (see description of ["pdqr.approx_discrete_n_grid"
#' option][pdqr-package] for more information) and then computing output value
#' directly based on two discrete pdqr-functions. **Note** that this method
#' almost never returns zero, even for identical inputs (except the case of
#' discrete pdqr-functions with identical one value).
#'
#' **Entropy based** methods compute output based on entropy characteristics:
#' - *Method "entropy"* computes sum of two Kullback-Leibler divergences:
#' `KL(f, g) + KL(g, f)`, which are outputs of [summ_entropy2()] with method
#' "relative". **Notes**:
#' - If `f` and `g` don't have the same support, distance can be very high.
#' - Error is thrown if `f` and `g` have different types (the same as in
#' `summ_entropy2()`).
#'
#' @return A single non-negative number representing distance between pair of
#' distributions. For methods "KS", "totvar", and "compare" it is not bigger
#' than 1. For method "avgdist" it is almost always bigger than 0.
#'
#' @seealso [summ_separation()] for computation of optimal threshold separating
#' pair of distributions.
#'
#' @family summary functions
#'
#' @examples
#' d_unif <- as_d(dunif, max = 2)
#' d_norm <- as_d(dnorm, mean = 1)
#'
#' vapply(
#' c(
#' "KS", "totvar", "compare",
#' "wass", "cramer", "align", "avgdist",
#' "entropy"
#' ),
#' function(meth) {
#' summ_distance(d_unif, d_norm, method = meth)
#' },
#' numeric(1)
#' )
#'
#' # "Supremum" quality of "KS" distance
#' d_dis <- new_d(2, "discrete")
#' ## Distance is 1, which is a limit of |F - G| at points which tend to 2 from
#' ## left
#' summ_distance(d_dis, d_unif, method = "KS")
#' @export
summ_distance <- function(f, g, method = "KS") {
assert_pdqr_fun(f)
assert_pdqr_fun(g)
assert_method(method, methods_distance)
# Speed optimization (skips possibly expensive assertions)
disable_asserting_locally()
switch(
method,
KS = distance_ks(f, g),
totvar = distance_totvar(f, g),
compare = distance_compare(f, g),
wass = distance_wass(f, g),
cramer = distance_cramer(f, g),
align = distance_align(f, g),
avgdist = distance_avgdist(f, g),
entropy = distance_entropy(f, g)
)
}
methods_distance <- c(
"KS", "totvar", "compare", "wass", "cramer", "align", "avgdist", "entropy"
)
# Method "KS" -------------------------------------------------------------
distance_ks <- function(f, g) {
p_f <- as_p(f)
p_g <- as_p(g)
f_type <- meta_type(f)
g_type <- meta_type(g)
if (f_type == "discrete") {
if (g_type == "discrete") {
distance_ks_two_dis(p_f, p_g)
} else {
distance_ks_mixed(p_dis = p_f, p_con = p_g)
}
} else {
if (g_type == "discrete") {
distance_ks_mixed(p_dis = p_g, p_con = p_f)
} else {
distance_ks_two_con(p_f, p_g)
}
}
}
distance_ks_two_dis <- function(p_f, p_g) {
ks_sep <- separation_ks_two_dis(p_f, p_g)
abs(p_f(ks_sep) - p_g(ks_sep))
}
distance_ks_mixed <- function(p_dis, p_con) {
# Not using `separation_ks_mixed()` because of possible "limit" nature of K-S
# distance which is a "supremum" and not "maximum". Its output might be
# misleading because supremum distance might be achieved as left limit at the
# point. See also commentary in `separation_ks_mixed()`.
x_test <- meta_x_tbl(p_dis)[["x"]]
p_con_cumprob <- p_con(x_test)
p_dis_cumprob <- meta_x_tbl(p_dis)[["cumprob"]]
p_dis_left_cumprob <- c(0, p_dis_cumprob[-length(p_dis_cumprob)])
max(
abs(p_con_cumprob - p_dis_cumprob),
abs(p_con_cumprob - p_dis_left_cumprob)
)
}
distance_ks_two_con <- function(p_f, p_g) {
ks_sep <- separation_ks_two_con(p_f, p_g)
abs(p_f(ks_sep) - p_g(ks_sep))
}
# Method "totvar" ---------------------------------------------------------
# **Notes**. Set (of finite values for "discrete" and of intervals for
# "continuous"), at which total variation distance is achieved, can be expressed
# as `A = {x | f(x) > g(x)}` (`f` and `g` are d-functions) or `B = {x | f(x) <
# g(x)}`. However, absolute differences in probabilities for `A` and `B` are
# equal. This is because:
# `0 = 1 - 1 = (P_f(A) + P_f(B) + P_f(C)) - (P_g(A) + P_g(B) + P_g(C))`, where
# `P_f` and `P_g` are probability measures of `f` and `g`;
# `C = {x | f(x) = g(x)}`.
# By definitions: `abs(P_f(A) - P_g(A)) = P_f(A) - P_g(A)`;
# `abs(P_f(B) - P_g(B)) = P_g(B) - P_f(B)`; `P_f(C) = P_g(C)`.
# Therefore: `abs(P_f(A) - P_g(A)) = abs(P_f(B) - P_g(B))`.
distance_totvar <- function(f, g) {
d_f <- as_d(f)
d_g <- as_d(g)
num_dis <- (meta_type(f) == "discrete") + (meta_type(g) == "discrete")
switch(
as.character(num_dis),
`0` = distance_totvar_two_con(d_f, d_g),
# A target set is all `x` values of "discrete" pdqr-function. Its
# probability under "discrete" is 1 and under "continuous" is zero because
# it is countable.
`1` = 1,
`2` = distance_totvar_two_dis(d_f, d_g)
)
}
distance_totvar_two_con <- function(d_f, d_g) {
# `{x | d_f(x) > d_g(x)}` is a union of intervals where `d_f(x) - d_g(x)` has
# constant positive sign. `d_f(x) - d_g(x)` can change sign in two cases:
# - When `d_f` and `d_g` intersect.
# - When either `d_f` or `d_g` shows discontinuity on edges.
x_inters <- compute_density_crossings(d_f, d_g)
# This might introduce duplicate elements on the edges (if `d_f` and `d_g`
# intersect on any support edge) but they will introduce "interval" with zero
# "sign" which will later be ignored.
x_lim <- sort(c(x_inters, meta_support(d_f), meta_support(d_g)))
interval_center <- (x_lim[-1] + x_lim[-length(x_lim)]) / 2
pos_sign_inds <- which(d_f(interval_center) > d_g(interval_center))
# Note: if `pos_sign_inds` is empty, then `f` and `g` are identical. In that
# case both `x_lim_left` and `x_lim_right` are empty and `sum()` later will
# return 0, which is correct answer.
x_lim_left <- x_lim[pos_sign_inds]
x_lim_right <- x_lim[pos_sign_inds + 1]
p_f <- as_p(d_f)
p_g <- as_p(d_g)
# Output distance is total difference in probabilities of intervals where `f`
# is greater than `g`.
sum(
(p_f(x_lim_right) - p_f(x_lim_left)) - (p_g(x_lim_right) - p_g(x_lim_left))
)
}
distance_totvar_two_dis <- function(d_f, d_g) {
union_x <- union_x(d_f, d_g)
prob_diff <- d_f(union_x) - d_g(union_x)
sum(prob_diff[prob_diff > 0])
}
# Method "compare" --------------------------------------------------------
# This is basically `max(P(f > g) + 0.5*P(f == g), P(g > f) + 0.5*P(f == g))`,
# normalized to return values from 0 to 1 (`P(x)` is `summ_prob_true(x)`).
# Addition of `0.5*P(f == g)` is to ensure that 0.5 is returned when `f` and `g`
# are identical (useful to think about this as maximum between two "symmetric"
# ROCAUCs computed with "expected" method). This also means zero distance for
# identical inputs.
# Here equation `prob_geq()` is used for performance reasons and based on the
# following equation: `max(P(f>g), P(f<g)) + 0.5*P(f==g) =
# max(P(f>=g), P(f<=g)) - P(f==g) + 0.5*P(f==g)`. After `y = 2*x-1`
# normalization, this is the output.
distance_compare <- function(f, g) {
f_eq_g <- prob_equal(f, g)
f_geq_g <- prob_geq(f, g)
# prob_geq(g, f) = 1 - prob_geq(f, g) + prob_equal(f, g)
2 * max(f_geq_g, 1 - f_geq_g + f_eq_g) - f_eq_g - 1
}
# Method "wass" -----------------------------------------------------------
distance_wass <- function(f, g) {
integrate_cdf_absdiff(p_f = as_p(f), p_g = as_p(g), power = 1)
}
# Method "cramer" ---------------------------------------------------------
distance_cramer <- function(f, g) {
integrate_cdf_absdiff(p_f = as_p(f), p_g = as_p(g), power = 2)
}
# Method "align" ----------------------------------------------------------
distance_align <- function(f, g) {
f_supp <- meta_support(f)
g_supp <- meta_support(g)
f_geq_g <- prob_geq(f, g) >= 0.5
g_geq_f <- prob_geq(g, f) >= 0.5
# Handle edge case of identical "discrete" pdqr-functions
if (f_geq_g && g_geq_f) {
return(0)
}
if (f_geq_g) {
# Moving `f` to the left
search_interval <- c(g_supp[1] - f_supp[2], 0)
} else {
# Moving `f` to the right
search_interval <- c(0, g_supp[2] - f_supp[1])
}
target_fun <- function(delta) {
prob_geq(f + delta, g) - 0.5
}
res <- stats::uniroot(
target_fun, interval = search_interval, extendInt = "yes"
)[["root"]]
abs(res)
}
# Method "avgdist" --------------------------------------------------------
distance_avgdist <- function(f, g) {
f <- approx_discrete(f)
f_x_tbl <- meta_x_tbl(f)
f_x <- f_x_tbl[["x"]]
f_prob <- f_x_tbl[["prob"]]
g <- approx_discrete(g)
g_x_tbl <- meta_x_tbl(g)
g_x <- g_x_tbl[["x"]]
g_prob <- g_x_tbl[["prob"]]
# Compute average distance between two discrete distributions
f_x_avgdist <- vapply(f_x, function(cur_x) {
sum(abs(cur_x - g_x) * g_prob)
}, numeric(1))
sum(f_x_avgdist * f_prob)
}
# Method "entropy" --------------------------------------------------------
distance_entropy <- function(f, g) {
# This is mostly the same as sum of `summ_entropy2(*, *, method = "relative")`
# but without extra `assert_*()` checks. **Note** that default value of `clip`
# argument here should be the same as default value of `summ_entropy2()`.
res <- cross_entropy(f, g) - cross_entropy(f, f) +
cross_entropy(g, f) - cross_entropy(g, g)
# Account for numerical representation issues
max(res, 0)
}
# Helpers -----------------------------------------------------------------
integrate_cdf_absdiff <- function(p_f, p_g, power) {
if ((meta_type(p_f) == "discrete") && (meta_type(p_g) == "discrete")) {
union_x <- union_x(p_f, p_g)
abs_diff_cumprob <- abs(p_f(union_x) - p_g(union_x))
sum(diff(union_x) * abs_diff_cumprob[-length(union_x)]^power)
} else {
integr_range <- union_support(p_f, p_g)
stats::integrate(
f = function(x) {
abs(p_f(x) - p_g(x))^power
},
lower = integr_range[1],
upper = integr_range[2],
subdivisions = 1e3
)[["value"]]
}
}
| /R/summ_distance.R | permissive | echasnovski/pdqr | R | false | false | 14,818 | r | # summ_distance() ---------------------------------------------------------
#' Summarize pair of distributions with distance
#'
#' This function computes distance between two distributions represented by
#' pdqr-functions. Here "distance" is used in a broad sense: a single
#' non-negative number representing how much two distributions differ from one
#' another. Bigger values indicate bigger difference. Zero value means that
#' input distributions are equivalent based on the method used (except method
#' "avgdist" which is almost always returns positive value). The notion of
#' "distance" is useful for doing statistical inference about similarity of two
#' groups of numbers.
#'
#' @param f A pdqr-function of any [type][meta_type()] and
#' [class][meta_class()].
#' @param g A pdqr-function of any type and class.
#' @param method Method for computing distance. Should be one of "KS", "totvar",
#' "compare", "wass", "cramer", "align", "avgdist", "entropy".
#'
#' @details Methods can be separated into three categories: probability based,
#' metric based, and entropy based.
#'
#' **Probability based** methods return a number between 0 and 1 which is
#' computed in the way that mostly based on probability:
#' - *Method "KS"* (short for Kolmogorov-Smirnov) computes the supremum of
#' absolute difference between p-functions corresponding to `f` and `g` (`|F -
#' G|`). Here "supremum" is meant to describe the fact that if input functions
#' have different [types][meta_type()], there can be no point at which "KS"
#' distance is achieved. Instead, there might be a sequence of points from left
#' to right with `|F - G|` values tending to the result (see Examples).
#' - *Method "totvar"* (short for "total variation") computes a biggest absolute
#' difference of probabilities for any subset of real line. In other words,
#' there is a set of points for "discrete" type and intervals for "continuous",
#' total probability of which under `f` and `g` differs the most. **Note** that
#' if `f` and `g` have different types, output is always 1. The set of interest
#' consists from all "x" values of "discrete" pdqr-function: probability under
#' "discrete" distribution is 1 and under "continuous" is 0.
#' - *Method "compare"* represents a value computed based on probabilities of
#' one distribution being bigger than the other (see [pdqr methods for "Ops"
#' group generic family][methods-group-generic] for more details on comparing
#' pdqr-functions). It is computed as
#' `2*max(P(F > G), P(F < G)) + 0.5*P(F = G) - 1` (here `P(F > G)` is basically
#' `summ_prob_true(f > g)`). This is maximum of two values (`P(F > G) + 0.5*P(F
#' = G)` and `P(F < G) + 0.5*P(F = G)`), normalized to return values from 0
#' to 1. Other way to look at this measure is that it computes (before
#' normalization) two [ROC AUC][summ_rocauc()] values with method `"expected"`
#' for two possible ordering (`f, g`, and `g, f`) and takes their maximum.
#'
#' **Metric based** methods compute "how far" two distributions are apart on the
#' real line:
#' - *Method "wass"* (short for "Wasserstein") computes a 1-Wasserstein
#' distance: "minimum cost of 'moving' one density into another", or "average
#' path density point should go while transforming from one into another". It is
#' computed as integral of `|F - G|` (absolute difference between p-functions).
#' If any of `f` and `g` has "continuous" type, [stats::integrate()] is used, so
#' relatively small numerical errors can happen.
#' - *Method "cramer"* computes Cramer distance: integral of `(F - G)^2`. This
#' somewhat relates to "wass" method as [variance][summ_var()] relates to [first
#' central absolute moment][summ_moment()]. Relatively small numerical errors
#' can happen.
#' - *Method "align"* computes an absolute value of shift `d` (possibly
#' negative) that should be added to `f` to achieve both `P(f+d >= g) >= 0.5`
#' and `P(f+d <= g) >= 0.5` (in other words, align `f+d` and `g`) as close as
#' reasonably possible. Solution is found numerically with [stats::uniroot()],
#' so relatively small numerical errors can happen. Also **note** that this
#' method is somewhat slow (compared to all others). To increase speed, use less
#' elements in ["x_tbl" metadata][meta_x_tbl()]. For example, with
#' [form_retype()] or smaller `n_grid` argument in [as_*()][as_p()] functions.
#' - *Method "avgdist"* computes average distance between sample values from
#' inputs. Basically, it is a deterministically computed approximation of
#' expected value of absolute difference between random variables, or in 'pdqr'
#' code: `summ_mean(abs(f - g))` (but computed without randomness). Computation
#' is done by approximating possibly present continuous pdqr-functions with
#' discrete ones (see description of ["pdqr.approx_discrete_n_grid"
#' option][pdqr-package] for more information) and then computing output value
#' directly based on two discrete pdqr-functions. **Note** that this method
#' almost never returns zero, even for identical inputs (except the case of
#' discrete pdqr-functions with identical one value).
#'
#' **Entropy based** methods compute output based on entropy characteristics:
#' - *Method "entropy"* computes sum of two Kullback-Leibler divergences:
#' `KL(f, g) + KL(g, f)`, which are outputs of [summ_entropy2()] with method
#' "relative". **Notes**:
#' - If `f` and `g` don't have the same support, distance can be very high.
#' - Error is thrown if `f` and `g` have different types (the same as in
#' `summ_entropy2()`).
#'
#' @return A single non-negative number representing distance between pair of
#' distributions. For methods "KS", "totvar", and "compare" it is not bigger
#' than 1. For method "avgdist" it is almost always bigger than 0.
#'
#' @seealso [summ_separation()] for computation of optimal threshold separating
#' pair of distributions.
#'
#' @family summary functions
#'
#' @examples
#' d_unif <- as_d(dunif, max = 2)
#' d_norm <- as_d(dnorm, mean = 1)
#'
#' vapply(
#' c(
#' "KS", "totvar", "compare",
#' "wass", "cramer", "align", "avgdist",
#' "entropy"
#' ),
#' function(meth) {
#' summ_distance(d_unif, d_norm, method = meth)
#' },
#' numeric(1)
#' )
#'
#' # "Supremum" quality of "KS" distance
#' d_dis <- new_d(2, "discrete")
#' ## Distance is 1, which is a limit of |F - G| at points which tend to 2 from
#' ## left
#' summ_distance(d_dis, d_unif, method = "KS")
#' @export
summ_distance <- function(f, g, method = "KS") {
assert_pdqr_fun(f)
assert_pdqr_fun(g)
assert_method(method, methods_distance)
# Speed optimization (skips possibly expensive assertions)
disable_asserting_locally()
switch(
method,
KS = distance_ks(f, g),
totvar = distance_totvar(f, g),
compare = distance_compare(f, g),
wass = distance_wass(f, g),
cramer = distance_cramer(f, g),
align = distance_align(f, g),
avgdist = distance_avgdist(f, g),
entropy = distance_entropy(f, g)
)
}
methods_distance <- c(
"KS", "totvar", "compare", "wass", "cramer", "align", "avgdist", "entropy"
)
# Method "KS" -------------------------------------------------------------
distance_ks <- function(f, g) {
p_f <- as_p(f)
p_g <- as_p(g)
f_type <- meta_type(f)
g_type <- meta_type(g)
if (f_type == "discrete") {
if (g_type == "discrete") {
distance_ks_two_dis(p_f, p_g)
} else {
distance_ks_mixed(p_dis = p_f, p_con = p_g)
}
} else {
if (g_type == "discrete") {
distance_ks_mixed(p_dis = p_g, p_con = p_f)
} else {
distance_ks_two_con(p_f, p_g)
}
}
}
distance_ks_two_dis <- function(p_f, p_g) {
ks_sep <- separation_ks_two_dis(p_f, p_g)
abs(p_f(ks_sep) - p_g(ks_sep))
}
distance_ks_mixed <- function(p_dis, p_con) {
# Not using `separation_ks_mixed()` because of possible "limit" nature of K-S
# distance which is a "supremum" and not "maximum". Its output might be
# misleading because supremum distance might be achieved as left limit at the
# point. See also commentary in `separation_ks_mixed()`.
x_test <- meta_x_tbl(p_dis)[["x"]]
p_con_cumprob <- p_con(x_test)
p_dis_cumprob <- meta_x_tbl(p_dis)[["cumprob"]]
p_dis_left_cumprob <- c(0, p_dis_cumprob[-length(p_dis_cumprob)])
max(
abs(p_con_cumprob - p_dis_cumprob),
abs(p_con_cumprob - p_dis_left_cumprob)
)
}
distance_ks_two_con <- function(p_f, p_g) {
ks_sep <- separation_ks_two_con(p_f, p_g)
abs(p_f(ks_sep) - p_g(ks_sep))
}
# Method "totvar" ---------------------------------------------------------
# **Notes**. Set (of finite values for "discrete" and of intervals for
# "continuous"), at which total variation distance is achieved, can be expressed
# as `A = {x | f(x) > g(x)}` (`f` and `g` are d-functions) or `B = {x | f(x) <
# g(x)}`. However, absolute differences in probabilities for `A` and `B` are
# equal. This is because:
# `0 = 1 - 1 = (P_f(A) + P_f(B) + P_f(C)) - (P_g(A) + P_g(B) + P_g(C))`, where
# `P_f` and `P_g` are probability measures of `f` and `g`;
# `C = {x | f(x) = g(x)}`.
# By definitions: `abs(P_f(A) - P_g(A)) = P_f(A) - P_g(A)`;
# `abs(P_f(B) - P_g(B)) = P_g(B) - P_f(B)`; `P_f(C) = P_g(C)`.
# Therefore: `abs(P_f(A) - P_g(A)) = abs(P_f(B) - P_g(B))`.
distance_totvar <- function(f, g) {
d_f <- as_d(f)
d_g <- as_d(g)
num_dis <- (meta_type(f) == "discrete") + (meta_type(g) == "discrete")
switch(
as.character(num_dis),
`0` = distance_totvar_two_con(d_f, d_g),
# A target set is all `x` values of "discrete" pdqr-function. Its
# probability under "discrete" is 1 and under "continuous" is zero because
# it is countable.
`1` = 1,
`2` = distance_totvar_two_dis(d_f, d_g)
)
}
distance_totvar_two_con <- function(d_f, d_g) {
# `{x | d_f(x) > d_g(x)}` is a union of intervals where `d_f(x) - d_g(x)` has
# constant positive sign. `d_f(x) - d_g(x)` can change sign in two cases:
# - When `d_f` and `d_g` intersect.
# - When either `d_f` or `d_g` shows discontinuity on edges.
x_inters <- compute_density_crossings(d_f, d_g)
# This might introduce duplicate elements on the edges (if `d_f` and `d_g`
# intersect on any support edge) but they will introduce "interval" with zero
# "sign" which will later be ignored.
x_lim <- sort(c(x_inters, meta_support(d_f), meta_support(d_g)))
interval_center <- (x_lim[-1] + x_lim[-length(x_lim)]) / 2
pos_sign_inds <- which(d_f(interval_center) > d_g(interval_center))
# Note: if `pos_sign_inds` is empty, then `f` and `g` are identical. In that
# case both `x_lim_left` and `x_lim_right` are empty and `sum()` later will
# return 0, which is correct answer.
x_lim_left <- x_lim[pos_sign_inds]
x_lim_right <- x_lim[pos_sign_inds + 1]
p_f <- as_p(d_f)
p_g <- as_p(d_g)
# Output distance is total difference in probabilities of intervals where `f`
# is greater than `g`.
sum(
(p_f(x_lim_right) - p_f(x_lim_left)) - (p_g(x_lim_right) - p_g(x_lim_left))
)
}
distance_totvar_two_dis <- function(d_f, d_g) {
union_x <- union_x(d_f, d_g)
prob_diff <- d_f(union_x) - d_g(union_x)
sum(prob_diff[prob_diff > 0])
}
# Method "compare" --------------------------------------------------------
# This is basically `max(P(f > g) + 0.5*P(f == g), P(g > f) + 0.5*P(f == g))`,
# normalized to return values from 0 to 1 (`P(x)` is `summ_prob_true(x)`).
# Addition of `0.5*P(f == g)` is to ensure that 0.5 is returned when `f` and `g`
# are identical (useful to think about this as maximum between two "symmetric"
# ROCAUCs computed with "expected" method). This also means zero distance for
# identical inputs.
# Here equation `prob_geq()` is used for performance reasons and based on the
# following equation: `max(P(f>g), P(f<g)) + 0.5*P(f==g) =
# max(P(f>=g), P(f<=g)) - P(f==g) + 0.5*P(f==g)`. After `y = 2*x-1`
# normalization, this is the output.
distance_compare <- function(f, g) {
f_eq_g <- prob_equal(f, g)
f_geq_g <- prob_geq(f, g)
# prob_geq(g, f) = 1 - prob_geq(f, g) + prob_equal(f, g)
2 * max(f_geq_g, 1 - f_geq_g + f_eq_g) - f_eq_g - 1
}
# Method "wass" -----------------------------------------------------------
distance_wass <- function(f, g) {
integrate_cdf_absdiff(p_f = as_p(f), p_g = as_p(g), power = 1)
}
# Method "cramer" ---------------------------------------------------------
distance_cramer <- function(f, g) {
integrate_cdf_absdiff(p_f = as_p(f), p_g = as_p(g), power = 2)
}
# Method "align" ----------------------------------------------------------
distance_align <- function(f, g) {
f_supp <- meta_support(f)
g_supp <- meta_support(g)
f_geq_g <- prob_geq(f, g) >= 0.5
g_geq_f <- prob_geq(g, f) >= 0.5
# Handle edge case of identical "discrete" pdqr-functions
if (f_geq_g && g_geq_f) {
return(0)
}
if (f_geq_g) {
# Moving `f` to the left
search_interval <- c(g_supp[1] - f_supp[2], 0)
} else {
# Moving `f` to the right
search_interval <- c(0, g_supp[2] - f_supp[1])
}
target_fun <- function(delta) {
prob_geq(f + delta, g) - 0.5
}
res <- stats::uniroot(
target_fun, interval = search_interval, extendInt = "yes"
)[["root"]]
abs(res)
}
# Method "avgdist" --------------------------------------------------------
distance_avgdist <- function(f, g) {
f <- approx_discrete(f)
f_x_tbl <- meta_x_tbl(f)
f_x <- f_x_tbl[["x"]]
f_prob <- f_x_tbl[["prob"]]
g <- approx_discrete(g)
g_x_tbl <- meta_x_tbl(g)
g_x <- g_x_tbl[["x"]]
g_prob <- g_x_tbl[["prob"]]
# Compute average distance between two discrete distributions
f_x_avgdist <- vapply(f_x, function(cur_x) {
sum(abs(cur_x - g_x) * g_prob)
}, numeric(1))
sum(f_x_avgdist * f_prob)
}
# Method "entropy" --------------------------------------------------------
distance_entropy <- function(f, g) {
# This is mostly the same as sum of `summ_entropy2(*, *, method = "relative")`
# but without extra `assert_*()` checks. **Note** that default value of `clip`
# argument here should be the same as default value of `summ_entropy2()`.
res <- cross_entropy(f, g) - cross_entropy(f, f) +
cross_entropy(g, f) - cross_entropy(g, g)
# Account for numerical representation issues
max(res, 0)
}
# Helpers -----------------------------------------------------------------
integrate_cdf_absdiff <- function(p_f, p_g, power) {
if ((meta_type(p_f) == "discrete") && (meta_type(p_g) == "discrete")) {
union_x <- union_x(p_f, p_g)
abs_diff_cumprob <- abs(p_f(union_x) - p_g(union_x))
sum(diff(union_x) * abs_diff_cumprob[-length(union_x)]^power)
} else {
integr_range <- union_support(p_f, p_g)
stats::integrate(
f = function(x) {
abs(p_f(x) - p_g(x))^power
},
lower = integr_range[1],
upper = integr_range[2],
subdivisions = 1e3
)[["value"]]
}
}
|
#################################################
## R analysis script for Experiment 3 of
## Faulkenberry, Cruise, Lavro, & Shaki (in press),
## to appear in Acta Psychologica
####################################################
library(ggplot2)
rawData<-read.table("leftTrajectoriesExp3.csv",sep=",",header=TRUE)
# clean up data
dataStep3<-subset(rawData,subset=error!=1) # remove errors
meanRT<-mean(dataStep3$RT)
sdRT<-sd(dataStep3$RT)
dataLeft<-subset(dataStep3,subset=RT<meanRT+3*sdRT & RT>meanRT-3*sdRT) # remove 3 SD outliers
attach(dataLeft)
rawData<-read.table("rightTrajectoriesExp3.csv",sep=",",header=TRUE)
# clean up data
dataStep3<-subset(rawData,subset=error!=1) # remove errors
meanRT<-mean(dataStep3$RT)
sdRT<-sd(dataStep3$RT)
dataRight<-subset(dataStep3,subset=RT<meanRT+3*sdRT & RT>meanRT-3*sdRT) # remove 3 SD outliers
attach(dataRight)
# plot hand trajectories
dataLeftCongruent<-subset(dataLeft,condition==1)
dataLeftIncongruent<-subset(dataLeft,condition==2)
dataRightCongruent<-subset(dataRight,condition==1)
dataRightIncongruent<-subset(dataRight,condition==2)
xCoords=rep(0,404)
yCoords=rep(0,404)
side=rep(0,404)
condition=rep(0,404)
for (i in 1:101){
xCoords[i]=mean(dataLeftCongruent[,i+22])
yCoords[i]=mean(dataLeftCongruent[,i+123])
side[i]="left"
condition[i]="congruent"
xCoords[i+101]=mean(dataLeftIncongruent[,i+22])
yCoords[i+101]=mean(dataLeftIncongruent[,i+123])
side[i+101]="left"
condition[i+101]="incongruent"
xCoords[i+202]=mean(dataRightCongruent[,i+22])
yCoords[i+202]=mean(dataRightCongruent[,i+123])
side[i+202]="right"
condition[i+202]="congruent"
xCoords[i+303]=mean(dataRightIncongruent[,i+22])
yCoords[i+303]=mean(dataRightCongruent[,i+123])
side[i+303]="right"
condition[i+303]="incongruent"
}
library("ggplot2")
trajectoryData=data.frame(xCoords,yCoords,side,condition)
plot=ggplot(trajectoryData,aes(x=xCoords,y=yCoords,group=condition))+xlim(-1,1)+ylim(0,1.5)
paths=geom_path(aes(linetype=condition),size=1.3)
labels=labs(x="x-coordinates",y="y-coordinates")
faceting=facet_grid(.~side)
stripFormat=theme(strip.text=element_text(face="bold",size=rel(1.5)))
legendFormat=theme(legend.title=element_text(face="bold",size=rel(1.5)),legend.text=element_text(size=rel(1.5)))
axesFormat=theme(axis.title=element_text(size=rel(1.4)))
basePlot=plot+paths+labels+faceting+stripFormat+legendFormat+axesFormat
basePlot+labs(colour="Condition")+theme(legend.position=c(0.5,0.5))+theme(legend.background=element_rect(fill="white",colour="black"))
# notes: export as 954 x 461
# find out when x-coordinates differ significantly
# x variables go from 23rd column to 124th column
# left trajectories
for (i in 23:123){
test=t.test(dataLeftCongruent[,i],dataLeftIncongruent[,i])
cat(sprintf('X_%i, p=%f \n',i-22,test$p.value))
}
# differed from 26th to 84th timestep
# right trajectories
for (i in 23:123){
test=t.test(dataRightCongruent[,i],dataRightIncongruent[,i])
cat(sprintf('X_%i, p=%f \n',i-22,test$p.value))
}
# differed from 26th to 90th timestep
library(reshape)
# PERFORMANCE MEASURES
# RT
# left side
agg=aggregate(RT~subject+condition,data=dataLeft,FUN="mean") # RT performance data aggregated by subject
t.test(agg$RT[agg$condition==1],agg$RT[agg$condition==2],paired=TRUE)
mean(agg$RT[agg$condition==1])
mean(agg$RT[agg$condition==2])
m=mean(agg$RT[agg$condition==2]-agg$RT[agg$condition==1])
s=sd(agg$RT[agg$condition==2]-agg$RT[agg$condition==1])
m/s
# right side
agg=aggregate(RT~subject+condition,data=dataRight,FUN="mean") # RT performance data aggregated by subject
t.test(agg$RT[agg$condition==1],agg$RT[agg$condition==2],paired=TRUE)
mean(agg$RT[agg$condition==1])
mean(agg$RT[agg$condition==2])
m=mean(agg$RT[agg$condition==2]-agg$RT[agg$condition==1])
s=sd(agg$RT[agg$condition==2]-agg$RT[agg$condition==1])
m/s
# init
# left side
agg=aggregate(init.time~subject+condition,data=dataLeft,FUN="mean") # RT performance data aggregated by subject
t.test(agg$init.time[agg$condition==1],agg$init.time[agg$condition==2],paired=TRUE)
mean(agg$init.time[agg$condition==1])
mean(agg$init.time[agg$condition==2])
m=mean(agg$init.time[agg$condition==2]-agg$init.time[agg$condition==1])
s=sd(agg$init.time[agg$condition==2]-agg$init.time[agg$condition==1])
m/s
# init
# right side
agg=aggregate(init.time~subject+condition,data=dataRight,FUN="mean") # RT performance data aggregated by subject
t.test(agg$init.time[agg$condition==1],agg$init.time[agg$condition==2],paired=TRUE)
mean(agg$init.time[agg$condition==1])
mean(agg$init.time[agg$condition==2])
m=mean(agg$init.time[agg$condition==2]-agg$init.time[agg$condition==1])
s=sd(agg$init.time[agg$condition==2]-agg$init.time[agg$condition==1])
m/s
# movement duration
# left side
agg=aggregate(RT-init.time~subject+condition,data=dataLeft,FUN="mean") # RT performance data aggregated by subject
names(agg)<-c("subject","condition","duration")
t.test(agg$duration[agg$condition==1],agg$duration[agg$condition==2],paired=TRUE)
mean(agg$duration[agg$condition==1])
mean(agg$duration[agg$condition==2])
m=mean(agg$duration[agg$condition==2]-agg$duration[agg$condition==1])
s=sd(agg$duration[agg$condition==2]-agg$duration[agg$condition==1])
m/s
# right side
agg=aggregate(RT-init.time~subject+condition,data=dataRight,FUN="mean") # RT performance data aggregated by subject
names(agg)<-c("subject","condition","duration")
t.test(agg$duration[agg$condition==1],agg$duration[agg$condition==2],paired=TRUE)
mean(agg$duration[agg$condition==1])
mean(agg$duration[agg$condition==2])
m=mean(agg$duration[agg$condition==2]-agg$duration[agg$condition==1])
s=sd(agg$duration[agg$condition==2]-agg$duration[agg$condition==1])
m/s
# AUC
# left side
agg=aggregate(AUC~subject+condition,data=dataLeft,FUN="mean") # RT performance data aggregated by subject
t.test(agg$AUC[agg$condition==1],agg$AUC[agg$condition==2],paired=TRUE)
mean(agg$AUC[agg$condition==1])
mean(agg$AUC[agg$condition==2])
m=mean(agg$AUC[agg$condition==2]-agg$AUC[agg$condition==1])
s=sd(agg$AUC[agg$condition==2]-agg$AUC[agg$condition==1])
m/s
# right side
# left side
agg=aggregate(AUC~subject+condition,data=dataRight,FUN="mean") # RT performance data aggregated by subject
t.test(agg$AUC[agg$condition==1],agg$AUC[agg$condition==2],paired=TRUE)
mean(agg$AUC[agg$condition==1])
mean(agg$AUC[agg$condition==2])
m=mean(agg$AUC[agg$condition==2]-agg$AUC[agg$condition==1])
s=sd(agg$AUC[agg$condition==2]-agg$AUC[agg$condition==1])
m/s
RTcongruentLeft=rep(0,51)
RTincongruentLeft=rep(0,51)
for (i in 1:41){
RTcongruentLeft[i]<-mean(dataLeft$RT[dataLeft$subject==i & dataLeft$condition==1])
RTincongruentLeft[i]<-mean(dataLeft$RT[dataLeft$subject==i & dataLeft$condition==2])
}
for (i in 43:52){
RTcongruentLeft[i-1]<-mean(dataLeft$RT[dataLeft$subject==i & dataLeft$condition==1])
RTincongruentLeft[i-1]<-mean(dataLeft$RT[dataLeft$subject==i & dataLeft$condition==2])
}
mean(RTcongruentLeft)
mean(RTincongruentLeft)
t.test(RTcongruentLeft,RTincongruentLeft,paired=TRUE)
m=mean(RTincongruentLeft-RTcongruentLeft)
s=sd(RTincongruentLeft-RTcongruentLeft)
m/s
InitcongruentLeft=rep(0,51)
InitincongruentLeft=rep(0,51)
for (i in 1:41){
InitcongruentLeft[i]<-mean(dataLeft$init[dataLeft$subject==i & dataLeft$condition==1])
InitincongruentLeft[i]<-mean(dataLeft$init[dataLeft$subject==i & dataLeft$condition==2])
}
for (i in 43:52){
InitcongruentLeft[i-1]<-mean(dataLeft$init[dataLeft$subject==i & dataLeft$condition==1])
InitincongruentLeft[i-1]<-mean(dataLeft$init[dataLeft$subject==i & dataLeft$condition==2])
}
m=mean(InitincongruentLeft-InitcongruentLeft)
s=sd(InitincongruentLeft-InitcongruentLeft)
m/s
mean(InitcongruentLeft)
mean(InitincongruentLeft)
t.test(InitcongruentLeft,InitincongruentLeft,paired=TRUE)
AUCcongruentLeft=rep(0,51)
AUCincongruentLeft=rep(0,51)
for (i in 1:41){
AUCcongruentLeft[i]<-mean(dataLeft$AUC_2[dataLeft$subject==i & dataLeft$condition==1])
AUCincongruentLeft[i]<-mean(dataLeft$AUC_2[dataLeft$subject==i & dataLeft$condition==2])
}
for (i in 43:52){
AUCcongruentLeft[i-1]<-mean(dataLeft$AUC_2[dataLeft$subject==i & dataLeft$condition==1])
AUCincongruentLeft[i-1]<-mean(dataLeft$AUC_2[dataLeft$subject==i & dataLeft$condition==2])
}
m=mean(AUCincongruentLeft-AUCcongruentLeft)
s=sd(AUCincongruentLeft-AUCcongruentLeft)
m/s
t.test(AUCcongruentLeft,AUCincongruentLeft,paired=TRUE)
# right side
RTcongruentRight=rep(0,51)
RTincongruentRight=rep(0,51)
for (i in 1:41){
RTcongruentRight[i]<-mean(dataRight$RT[dataRight$subject==i & dataRight$condition==1])
RTincongruentRight[i]<-mean(dataRight$RT[dataRight$subject==i & dataRight$condition==2])
}
for (i in 43:52){
RTcongruentRight[i-1]<-mean(dataRight$RT[dataRight$subject==i & dataRight$condition==1])
RTincongruentRight[i-1]<-mean(dataRight$RT[dataRight$subject==i & dataRight$condition==2])
}
m=mean(RTincongruentRight-RTcongruentRight)
s=sd(RTincongruentRight-RTcongruentRight)
m/s
mean(RTcongruentRight)
mean(RTincongruentRight)
t.test(RTcongruentRight,RTincongruentRight,paired=TRUE)
InitcongruentRight=rep(0,51)
InitincongruentRight=rep(0,51)
for (i in 1:41){
InitcongruentRight[i]<-mean(dataRight$init[dataRight$subject==i & dataRight$condition==1])
InitincongruentRight[i]<-mean(dataRight$init[dataRight$subject==i & dataRight$condition==2])
}
for (i in 43:52){
InitcongruentRight[i-1]<-mean(dataRight$init[dataRight$subject==i & dataRight$condition==1])
InitincongruentRight[i-1]<-mean(dataRight$init[dataRight$subject==i & dataRight$condition==2])
}
m=mean(InitincongruentRight-InitcongruentRight)
s=sd(InitincongruentRight-InitcongruentRight)
m/s
mean(InitcongruentRight)
mean(InitincongruentRight)
t.test(InitcongruentRight,InitincongruentRight,paired=TRUE)
AUCcongruentRight=rep(0,51)
AUCincongruentRight=rep(0,51)
for (i in 1:41){
AUCcongruentRight[i]<-mean(dataRight$AUC_1[dataRight$subject==i & dataRight$condition==1])
AUCincongruentRight[i]<-mean(dataRight$AUC_1[dataRight$subject==i & dataRight$condition==2])
}
for (i in 43:52){
AUCcongruentRight[i-1]<-mean(dataRight$AUC_1[dataRight$subject==i & dataRight$condition==1])
AUCincongruentRight[i-1]<-mean(dataRight$AUC_1[dataRight$subject==i & dataRight$condition==2])
}
m=mean(AUCincongruentRight-AUCcongruentRight)
s=sd(AUCincongruentRight-AUCcongruentRight)
m/s
mean(AUCcongruentRight)
mean(AUCincongruentRight)
t.test(AUCcongruentRight,AUCincongruentRight,paired=TRUE)
| /analysisExp3.R | permissive | tomfaulkenberry/acta2015sce | R | false | false | 10,459 | r | #################################################
## R analysis script for Experiment 3 of
## Faulkenberry, Cruise, Lavro, & Shaki (in press),
## to appear in Acta Psychologica
####################################################
library(ggplot2)
rawData<-read.table("leftTrajectoriesExp3.csv",sep=",",header=TRUE)
# clean up data
dataStep3<-subset(rawData,subset=error!=1) # remove errors
meanRT<-mean(dataStep3$RT)
sdRT<-sd(dataStep3$RT)
dataLeft<-subset(dataStep3,subset=RT<meanRT+3*sdRT & RT>meanRT-3*sdRT) # remove 3 SD outliers
attach(dataLeft)
rawData<-read.table("rightTrajectoriesExp3.csv",sep=",",header=TRUE)
# clean up data
dataStep3<-subset(rawData,subset=error!=1) # remove errors
meanRT<-mean(dataStep3$RT)
sdRT<-sd(dataStep3$RT)
dataRight<-subset(dataStep3,subset=RT<meanRT+3*sdRT & RT>meanRT-3*sdRT) # remove 3 SD outliers
attach(dataRight)
# plot hand trajectories
dataLeftCongruent<-subset(dataLeft,condition==1)
dataLeftIncongruent<-subset(dataLeft,condition==2)
dataRightCongruent<-subset(dataRight,condition==1)
dataRightIncongruent<-subset(dataRight,condition==2)
xCoords=rep(0,404)
yCoords=rep(0,404)
side=rep(0,404)
condition=rep(0,404)
for (i in 1:101){
xCoords[i]=mean(dataLeftCongruent[,i+22])
yCoords[i]=mean(dataLeftCongruent[,i+123])
side[i]="left"
condition[i]="congruent"
xCoords[i+101]=mean(dataLeftIncongruent[,i+22])
yCoords[i+101]=mean(dataLeftIncongruent[,i+123])
side[i+101]="left"
condition[i+101]="incongruent"
xCoords[i+202]=mean(dataRightCongruent[,i+22])
yCoords[i+202]=mean(dataRightCongruent[,i+123])
side[i+202]="right"
condition[i+202]="congruent"
xCoords[i+303]=mean(dataRightIncongruent[,i+22])
yCoords[i+303]=mean(dataRightCongruent[,i+123])
side[i+303]="right"
condition[i+303]="incongruent"
}
library("ggplot2")
trajectoryData=data.frame(xCoords,yCoords,side,condition)
plot=ggplot(trajectoryData,aes(x=xCoords,y=yCoords,group=condition))+xlim(-1,1)+ylim(0,1.5)
paths=geom_path(aes(linetype=condition),size=1.3)
labels=labs(x="x-coordinates",y="y-coordinates")
faceting=facet_grid(.~side)
stripFormat=theme(strip.text=element_text(face="bold",size=rel(1.5)))
legendFormat=theme(legend.title=element_text(face="bold",size=rel(1.5)),legend.text=element_text(size=rel(1.5)))
axesFormat=theme(axis.title=element_text(size=rel(1.4)))
basePlot=plot+paths+labels+faceting+stripFormat+legendFormat+axesFormat
basePlot+labs(colour="Condition")+theme(legend.position=c(0.5,0.5))+theme(legend.background=element_rect(fill="white",colour="black"))
# notes: export as 954 x 461
# find out when x-coordinates differ significantly
# x variables go from 23rd column to 124th column
# left trajectories
for (i in 23:123){
test=t.test(dataLeftCongruent[,i],dataLeftIncongruent[,i])
cat(sprintf('X_%i, p=%f \n',i-22,test$p.value))
}
# differed from 26th to 84th timestep
# right trajectories
for (i in 23:123){
test=t.test(dataRightCongruent[,i],dataRightIncongruent[,i])
cat(sprintf('X_%i, p=%f \n',i-22,test$p.value))
}
# differed from 26th to 90th timestep
library(reshape)
# PERFORMANCE MEASURES
# RT
# left side
agg=aggregate(RT~subject+condition,data=dataLeft,FUN="mean") # RT performance data aggregated by subject
t.test(agg$RT[agg$condition==1],agg$RT[agg$condition==2],paired=TRUE)
mean(agg$RT[agg$condition==1])
mean(agg$RT[agg$condition==2])
m=mean(agg$RT[agg$condition==2]-agg$RT[agg$condition==1])
s=sd(agg$RT[agg$condition==2]-agg$RT[agg$condition==1])
m/s
# right side
agg=aggregate(RT~subject+condition,data=dataRight,FUN="mean") # RT performance data aggregated by subject
t.test(agg$RT[agg$condition==1],agg$RT[agg$condition==2],paired=TRUE)
mean(agg$RT[agg$condition==1])
mean(agg$RT[agg$condition==2])
m=mean(agg$RT[agg$condition==2]-agg$RT[agg$condition==1])
s=sd(agg$RT[agg$condition==2]-agg$RT[agg$condition==1])
m/s
# init
# left side
agg=aggregate(init.time~subject+condition,data=dataLeft,FUN="mean") # RT performance data aggregated by subject
t.test(agg$init.time[agg$condition==1],agg$init.time[agg$condition==2],paired=TRUE)
mean(agg$init.time[agg$condition==1])
mean(agg$init.time[agg$condition==2])
m=mean(agg$init.time[agg$condition==2]-agg$init.time[agg$condition==1])
s=sd(agg$init.time[agg$condition==2]-agg$init.time[agg$condition==1])
m/s
# init
# right side
agg=aggregate(init.time~subject+condition,data=dataRight,FUN="mean") # RT performance data aggregated by subject
t.test(agg$init.time[agg$condition==1],agg$init.time[agg$condition==2],paired=TRUE)
mean(agg$init.time[agg$condition==1])
mean(agg$init.time[agg$condition==2])
m=mean(agg$init.time[agg$condition==2]-agg$init.time[agg$condition==1])
s=sd(agg$init.time[agg$condition==2]-agg$init.time[agg$condition==1])
m/s
# movement duration
# left side
agg=aggregate(RT-init.time~subject+condition,data=dataLeft,FUN="mean") # RT performance data aggregated by subject
names(agg)<-c("subject","condition","duration")
t.test(agg$duration[agg$condition==1],agg$duration[agg$condition==2],paired=TRUE)
mean(agg$duration[agg$condition==1])
mean(agg$duration[agg$condition==2])
m=mean(agg$duration[agg$condition==2]-agg$duration[agg$condition==1])
s=sd(agg$duration[agg$condition==2]-agg$duration[agg$condition==1])
m/s
# right side
agg=aggregate(RT-init.time~subject+condition,data=dataRight,FUN="mean") # RT performance data aggregated by subject
names(agg)<-c("subject","condition","duration")
t.test(agg$duration[agg$condition==1],agg$duration[agg$condition==2],paired=TRUE)
mean(agg$duration[agg$condition==1])
mean(agg$duration[agg$condition==2])
m=mean(agg$duration[agg$condition==2]-agg$duration[agg$condition==1])
s=sd(agg$duration[agg$condition==2]-agg$duration[agg$condition==1])
m/s
# AUC
# left side
agg=aggregate(AUC~subject+condition,data=dataLeft,FUN="mean") # RT performance data aggregated by subject
t.test(agg$AUC[agg$condition==1],agg$AUC[agg$condition==2],paired=TRUE)
mean(agg$AUC[agg$condition==1])
mean(agg$AUC[agg$condition==2])
m=mean(agg$AUC[agg$condition==2]-agg$AUC[agg$condition==1])
s=sd(agg$AUC[agg$condition==2]-agg$AUC[agg$condition==1])
m/s
# right side
# left side
agg=aggregate(AUC~subject+condition,data=dataRight,FUN="mean") # RT performance data aggregated by subject
t.test(agg$AUC[agg$condition==1],agg$AUC[agg$condition==2],paired=TRUE)
mean(agg$AUC[agg$condition==1])
mean(agg$AUC[agg$condition==2])
m=mean(agg$AUC[agg$condition==2]-agg$AUC[agg$condition==1])
s=sd(agg$AUC[agg$condition==2]-agg$AUC[agg$condition==1])
m/s
RTcongruentLeft=rep(0,51)
RTincongruentLeft=rep(0,51)
for (i in 1:41){
RTcongruentLeft[i]<-mean(dataLeft$RT[dataLeft$subject==i & dataLeft$condition==1])
RTincongruentLeft[i]<-mean(dataLeft$RT[dataLeft$subject==i & dataLeft$condition==2])
}
for (i in 43:52){
RTcongruentLeft[i-1]<-mean(dataLeft$RT[dataLeft$subject==i & dataLeft$condition==1])
RTincongruentLeft[i-1]<-mean(dataLeft$RT[dataLeft$subject==i & dataLeft$condition==2])
}
mean(RTcongruentLeft)
mean(RTincongruentLeft)
t.test(RTcongruentLeft,RTincongruentLeft,paired=TRUE)
m=mean(RTincongruentLeft-RTcongruentLeft)
s=sd(RTincongruentLeft-RTcongruentLeft)
m/s
InitcongruentLeft=rep(0,51)
InitincongruentLeft=rep(0,51)
for (i in 1:41){
InitcongruentLeft[i]<-mean(dataLeft$init[dataLeft$subject==i & dataLeft$condition==1])
InitincongruentLeft[i]<-mean(dataLeft$init[dataLeft$subject==i & dataLeft$condition==2])
}
for (i in 43:52){
InitcongruentLeft[i-1]<-mean(dataLeft$init[dataLeft$subject==i & dataLeft$condition==1])
InitincongruentLeft[i-1]<-mean(dataLeft$init[dataLeft$subject==i & dataLeft$condition==2])
}
m=mean(InitincongruentLeft-InitcongruentLeft)
s=sd(InitincongruentLeft-InitcongruentLeft)
m/s
mean(InitcongruentLeft)
mean(InitincongruentLeft)
t.test(InitcongruentLeft,InitincongruentLeft,paired=TRUE)
AUCcongruentLeft=rep(0,51)
AUCincongruentLeft=rep(0,51)
for (i in 1:41){
AUCcongruentLeft[i]<-mean(dataLeft$AUC_2[dataLeft$subject==i & dataLeft$condition==1])
AUCincongruentLeft[i]<-mean(dataLeft$AUC_2[dataLeft$subject==i & dataLeft$condition==2])
}
for (i in 43:52){
AUCcongruentLeft[i-1]<-mean(dataLeft$AUC_2[dataLeft$subject==i & dataLeft$condition==1])
AUCincongruentLeft[i-1]<-mean(dataLeft$AUC_2[dataLeft$subject==i & dataLeft$condition==2])
}
m=mean(AUCincongruentLeft-AUCcongruentLeft)
s=sd(AUCincongruentLeft-AUCcongruentLeft)
m/s
t.test(AUCcongruentLeft,AUCincongruentLeft,paired=TRUE)
# right side
RTcongruentRight=rep(0,51)
RTincongruentRight=rep(0,51)
for (i in 1:41){
RTcongruentRight[i]<-mean(dataRight$RT[dataRight$subject==i & dataRight$condition==1])
RTincongruentRight[i]<-mean(dataRight$RT[dataRight$subject==i & dataRight$condition==2])
}
for (i in 43:52){
RTcongruentRight[i-1]<-mean(dataRight$RT[dataRight$subject==i & dataRight$condition==1])
RTincongruentRight[i-1]<-mean(dataRight$RT[dataRight$subject==i & dataRight$condition==2])
}
m=mean(RTincongruentRight-RTcongruentRight)
s=sd(RTincongruentRight-RTcongruentRight)
m/s
mean(RTcongruentRight)
mean(RTincongruentRight)
t.test(RTcongruentRight,RTincongruentRight,paired=TRUE)
InitcongruentRight=rep(0,51)
InitincongruentRight=rep(0,51)
for (i in 1:41){
InitcongruentRight[i]<-mean(dataRight$init[dataRight$subject==i & dataRight$condition==1])
InitincongruentRight[i]<-mean(dataRight$init[dataRight$subject==i & dataRight$condition==2])
}
for (i in 43:52){
InitcongruentRight[i-1]<-mean(dataRight$init[dataRight$subject==i & dataRight$condition==1])
InitincongruentRight[i-1]<-mean(dataRight$init[dataRight$subject==i & dataRight$condition==2])
}
m=mean(InitincongruentRight-InitcongruentRight)
s=sd(InitincongruentRight-InitcongruentRight)
m/s
mean(InitcongruentRight)
mean(InitincongruentRight)
t.test(InitcongruentRight,InitincongruentRight,paired=TRUE)
AUCcongruentRight=rep(0,51)
AUCincongruentRight=rep(0,51)
for (i in 1:41){
AUCcongruentRight[i]<-mean(dataRight$AUC_1[dataRight$subject==i & dataRight$condition==1])
AUCincongruentRight[i]<-mean(dataRight$AUC_1[dataRight$subject==i & dataRight$condition==2])
}
for (i in 43:52){
AUCcongruentRight[i-1]<-mean(dataRight$AUC_1[dataRight$subject==i & dataRight$condition==1])
AUCincongruentRight[i-1]<-mean(dataRight$AUC_1[dataRight$subject==i & dataRight$condition==2])
}
m=mean(AUCincongruentRight-AUCcongruentRight)
s=sd(AUCincongruentRight-AUCcongruentRight)
m/s
mean(AUCcongruentRight)
mean(AUCincongruentRight)
t.test(AUCcongruentRight,AUCincongruentRight,paired=TRUE)
|
#' @title Combine SKAT-O analyses from one or more studies.
#'
#' @description Takes as input `seqMeta` objects (from e.g.
#' \code{\link{prepScores}}), and meta analyzes them, using SKAT-O. See the
#' package vignette for more extensive documentation.
#'
#' @inheritParams singlesnpMeta
#' @inheritParams burdenMeta
#' @param skat.wts Either a function to calculate testing weights for SKAT, or a
#' character specifying a vector of weights in the SNPInfo file. For skatOMeta
#' the default are the `beta' weights.
#' @param burden.wts Either a function to calculate weights for the burden test,
#' or a character specifying a vector of weights in the SNPInfo file. For
#' skatOMeta the default are the T1 weights.
#' @param rho A sequence of values that specify combinations of SKAT and a burden test to be considered. Default is c(0,1), which considers SKAT and a burden test.
#' @param method p-value calculation method. Should be one of 'saddlepoint', 'integration', or 'liu'.
#'
#' @details \code{skatOMeta()} implements the SKAT-Optimal test, which picks the
#' `best' combination of SKAT and a burden test, and then corrects for the
#' flexibility afforded by this choice. Specifically, if the SKAT statistic is
#' Q1, and the squared score for a burden test is Q2, SKAT-O considers tests
#' of the form (1-rho)*Q1 + rho*Q2, where rho between 0 and 1. The values of
#' rho are specified by the user using the argument \code{rho}. In the
#' simplest form, which is the default, SKAT-O computes a SKAT test and a T1
#' test, and reports the minimum p-value, corrected for multiple testing. See
#' the vignette or the accompanying references for more details.
#'
#' If there is a single variant in the gene, or the burden test is undefined
#' (e.g. there are no rare alleles for the T1 test), SKAT is reported (i.e.
#' rho=0).
#'
#' Note 1: the SKAT package uses the same weights for both SKAT and the burden
#' test, which this function does not.
#'
#' Note 2: all studies must use coordinated SNP Info files - that is, the SNP
#' names and gene definitions must be the same.
#'
#' Note 3: The method of p-value calculation is much more important here than
#' in SKAT. The `integration' method is fast and typically accurate for
#' p-values larger than 1e-9. The saddlepoint method is slower, but has higher
#' relative accuracy.
#'
#' Note 4: Since p-value calculation can be slow for SKAT-O, and less accurate
#' for small p-values, a reasonable alternative would be to first calculate
#' SKAT and a burden test, and record the minimum p-value, which is a lower
#' bound for the SKAT-O p-value. This can be done quickly and accurately.
#' Then, one would only need to perform SKAT-O on the small subset of genes
#' that are potentially interesting.
#'
#' Please see the package vignette for more details.
#'
#' @return a data frame with the following columns:
#' \item{gene}{Name of the gene or unit of aggregation being meta analyzed}
#' \item{p}{p-value of the SKAT-O test.}
#' \item{pmin}{The minimum of the p-values considered by SKAT-O (not corrected for multiple testing!).}
#' \item{rho}{The value of rho which gave the smallest p-value.}
#' \item{cmaf}{The cumulative minor allele frequency.}
#' \item{nmiss}{The number of `missing` SNPs. For a gene with a single SNP
#' this is the number of individuals which do not contribute to the analysis,
#' due to studies that did not report results for that SNP. For a gene with
#' multiple SNPs, is totalled over the gene. }
#' \item{nsnps}{The number of SNPs in the gene.}
#' \item{errflag}{An indicator of possible error: 0 suggests no error, > 0
#' indicates probable loss of accuracy.}
#'
#' @references Wu, M.C., Lee, S., Cai, T., Li, Y., Boehnke, M., and Lin, X.
#' (2011) Rare Variant Association Testing for Sequencing Data Using the
#' Sequence Kernel Association Test (SKAT). American Journal of Human
#' Genetics.
#'
#' Lee, S. and Wu, M.C. and Lin, X. (2012) Optimal tests for rare variant
#' effects in sequencing association studies. Biostatistics.
#'
#' @author Arie Voorman, Jennifer Brody
#' @seealso
#' \code{\link{skatOMeta}}
#' \code{\link{prepScores}}
#' \code{\link{burdenMeta}}
#' \code{\link{singlesnpMeta}}
#'
#' @examples
#' \dontrun{
#' ### load example data for 2 studies
#' data(seqMetaExample)
#'
#' ####run on each study:
#' cohort1 <- prepScores(Z=Z1, y~sex+bmi, SNPInfo = SNPInfo, data =pheno1)
#' cohort2 <- prepScores(Z=Z2, y~sex+bmi, SNPInfo = SNPInfo, kins=kins, data=pheno2)
#'
#' #### combine results:
#' ##skat-O with default settings:
#' out1 <- skatOMeta(cohort1, cohort2, SNPInfo = SNPInfo, method = "int")
#' head(out1)
#'
#' ##skat-O, using a large number of combinations between SKAT and T1 tests:
#' out2 <- skatOMeta(cohort1, cohort2, rho=seq(0,1,length=11), SNPInfo=SNPInfo, method="int")
#' head(out2)
#'
#' #rho = 0 indicates SKAT gave the smaller p-value (or the T1 is undefined)
#' #rho=1 indicates the burden test was chosen
#' # 0 < rho < 1 indicates some other value was chosen
#' #notice that most of the time either the SKAT or T1 is chosen
#' table(out2$rho)
#'
#' ##skat-O with beta-weights used in the burden test:
#' out3 <- skatOMeta(cohort1,cohort2, burden.wts = function(maf){dbeta(maf,1,25) },
#' rho=seq(0,1,length=11),SNPInfo = SNPInfo, method="int")
#' head(out3)
#' table(out3$rho)
#'
#' ########################
#' ####binary data
#' cohort1 <- prepScores(Z=Z1, ybin~1, family=binomial(), SNPInfo=SNPInfo, data=pheno1)
#' out.bin <- skatOMeta(cohort1, SNPInfo = SNPInfo, method="int")
#' head(out.bin)
#'
#' ####################
#' ####survival data
#' cohort1 <- prepCox(Z=Z1, Surv(time,status)~strata(sex)+bmi, SNPInfo=SNPInfo,
#' data=pheno1)
#' out.surv <- skatOMeta(cohort1, SNPInfo = SNPInfo, method="int")
#' head(out.surv)
#'
#' ##########################################
#' ###Compare with SKAT and T1 tests on their own:
#' cohort1 <- prepScores(Z=Z1, y~sex+bmi, SNPInfo=SNPInfo, data=pheno1)
#' cohort2 <- prepScores(Z=Z2, y~sex+bmi, SNPInfo=SNPInfo, kins=kins, data=pheno2)
#'
#' out.skat <- skatMeta(cohort1,cohort2,SNPInfo=SNPInfo)
#' out.t1 <- burdenMeta(cohort1,cohort2, wts= function(maf){as.numeric(maf <= 0.01)},
#' SNPInfo=SNPInfo)
#'
#' #plot results
#' #We compare the minimum p-value of SKAT and T1, adjusting for multiple tests
#' #using the Sidak correction, to that of SKAT-O.
#'
#' par(mfrow=c(1,3))
#' pseq <- seq(0,1,length=100)
#' plot(y=out.skat$p, x=out1$p,xlab="SKAT-O p-value", ylab="SKAT p-value", main ="SKAT-O vs SKAT")
#' lines(y=pseq,x=1-(1-pseq)^2,col=2,lty=2, lwd=2)
#' abline(0,1)
#'
#' plot(y=out.t1$p, x=out1$p,xlab="SKAT-O p-value", ylab="T1 p-value", main ="SKAT-O vs T1")
#' lines(y=pseq,x=1-(1-pseq)^2,col=2,lty=2, lwd=2)
#' abline(0,1)
#'
#' plot(y=pmin(out.t1$p, out.skat$p,na.rm=T), x=out1$p,xlab="SKAT-O p-value",
#' ylab="min(T1,SKAT) p-value", main ="min(T1,SKAT) vs SKAT-O")
#' lines(y=pseq,x=1-(1-pseq)^2,col=2,lty=2, lwd=2)
#' abline(0,1)
#' legend("bottomright", lwd=2,lty=2,col=2,legend="Bonferroni correction")
#' }
#'
#' @export
skatOMeta <- function(..., SNPInfo=NULL, skat.wts=function(maf){dbeta(maf,1,25)}, burden.wts=function(maf){as.numeric(maf <= 0.01) }, rho=c(0,1), method=c("integration", "saddlepoint", "liu"), snpNames="Name", aggregateBy="gene", mafRange=c(0,0.5), verbose=FALSE) {
cl <- match.call(expand.dots = FALSE)
if(is.null(SNPInfo)){
warning("No SNP Info file provided: loading the Illumina HumanExome BeadChip. See ?SNPInfo for more details")
load(paste(find.package("seqMeta"), "data", "SNPInfo.rda",sep = "/"))
aggregateBy = "SKATgene"
} else {
SNPInfo <- prepSNPInfo(SNPInfo, snpNames, aggregateBy, wt1=skat.wts, wt2=burden.wts)
}
if(any(rho >1 | rho < 0 ) ) stop("rho must be between 0 and 1")
method <- match.arg(method)
#if( !(method %in% c("davies","farebrother","imhof","liu")) ) stop("Method specified is not valid! See documentation")
genelist <- na.omit(unique(SNPInfo[,aggregateBy]))
cohortNames <- lapply(cl[[2]],as.character)
ncohort <- length(cohortNames)
ev <- parent.frame()
classes <- unlist(lapply(cohortNames,function(name){class(get(name,envir=ev))}))
if(!all(classes == "seqMeta" | classes == "skatCohort") ){
stop("an argument to ... is not a seqMeta object!")
}
res.strings <- data.frame("gene"=genelist,stringsAsFactors=F)
res.numeric <- matrix(NA, nrow= nrow(res.strings),ncol = length(c("p","pmin","rho","cmaf","nmiss", "nsnps", "errflag")))
colnames(res.numeric) <- c("p","pmin","rho","cmaf","nmiss", "nsnps","errflag")
if(verbose){
cat("\n Meta Analyzing... Progress:\n")
pb <- txtProgressBar(min = 0, max = length(genelist), style = 3)
pb.i <- 0
}
ri <- 0
snp.names.list <- split(SNPInfo[,snpNames],SNPInfo[,aggregateBy])
for(gene in genelist){
ri <- ri+1
nsnps.sub <- length(snp.names.list[[gene]])
mscores <- maf <- numeric(nsnps.sub)
big.cov <- Matrix(0, nsnps.sub,nsnps.sub)
n.total <- numeric(nsnps.sub)
n.miss <- numeric(nsnps.sub)
vary.ave <- 0
for(cohort.k in 1:ncohort){
cohort.gene <- get(cohortNames[[cohort.k]],envir=ev)[[gene]]
if(!is.null(cohort.gene)){
sub <- match(snp.names.list[[gene]],colnames(cohort.gene$cov))
if(any(is.na(sub)) | any(sub != 1:length(sub), na.rm=TRUE) | length(cohort.gene$maf) > nsnps.sub){
#if(any(is.na(sub))) warning("Some SNPs were not in SNPInfo file for gene ", gene," and cohort ",names(cohorts)[cohort.k])
cohort.gene$cov <- as.matrix(cohort.gene$cov)[sub,sub,drop=FALSE]
cohort.gene$cov[is.na(sub),] <- cohort.gene$cov[,is.na(sub)] <- 0
cohort.gene$maf <- cohort.gene$maf[sub]
cohort.gene$maf[is.na(sub)] <- -1
cohort.gene$scores <- cohort.gene$scores[sub]
cohort.gene$scores[is.na(sub)] <- 0
}
n.total[cohort.gene$maf >= 0] <- n.total[cohort.gene$maf >= 0]+cohort.gene$n
n.miss[cohort.gene$maf < 0] <- n.miss[cohort.gene$maf < 0] + cohort.gene$n
cohort.gene$maf[cohort.gene$maf < 0] <- 0
mscores <- mscores + cohort.gene$scores/cohort.gene$sey^2
maf <- maf + 2*cohort.gene$maf*(cohort.gene$n)
big.cov <- big.cov + cohort.gene$cov/cohort.gene$sey^2
vary.ave <- vary.ave + max(cohort.gene$n,na.rm=T)*cohort.gene$sey^2
}else{
n.miss <- n.miss + get(cohortNames[[cohort.k]],envir=parent.frame())[[1]]$n
}
}
if(any(maf >0)){
maf <- maf/(2*n.total)
maf[is.nan(maf)] <- 0
maf <- sapply(maf, function(x){min(x,1-x)})
if( !all(mafRange == c(0,0.5))){
keep <- (maf >= min(mafRange)) & (maf <= max(mafRange))
big.cov <- big.cov[keep,keep]
mscores <- mscores[keep]
maf <- maf[keep]
}
}
if(length(maf)> 0){
if(is.function(skat.wts)){
w1 <- skat.wts(maf)
} else if(is.character(skat.wts)){
w1 <- as.numeric(SNPInfo[SNPInfo[,aggregateBy]==gene,skat.wts])
} else {
w1 <- rep(1,length(maf))
}
if(is.function(burden.wts)){
w2 <- burden.wts(maf)
} else if(is.character(burden.wts)){
w2 <- as.numeric(SNPInfo[SNPInfo[,aggregateBy]==gene,burden.wts])
} else {
w2 <- rep(1,length(maf))
}
w1 <- ifelse(maf >0, w1,0)
w2 <- ifelse(maf >0, w2,0)
##
Q.skat <- sum((w1*mscores)^2, na.rm=TRUE)
V.skat <- (w1)*t(t(big.cov)*as.vector(w1))
Q.burden <- sum(w2*mscores, na.rm=TRUE)^2
V.burden <- as.numeric(t(w2)%*%big.cov%*%w2)
#If burden test is 0, or only 1 SNP in the gene, do SKAT:
if(sum(maf > 0) ==1 | V.burden ==0){
lambda <- eigen(zapsmall(V.skat), symmetric = TRUE)$values
if(any(lambda > 0) & length(lambda) >1) {
tmpP <- pchisqsum2(Q.skat,lambda=lambda,method=method, acc=1e-7)
if(tmpP$errflag !=0 ){
res.numeric[ri,"errflag"] = 1
} else {
res.numeric[ri,"errflag"] = 0
}
p <- tmpP$p
} else {
p <- ifelse(length(lambda) == 1 & all(lambda > 0), pchisq(Q.skat/lambda,df=1,lower.tail=FALSE),1)
res.numeric[ri,"errflag"] = 0
}
res.numeric[ri,"pmin"] = res.numeric[ri,"p"] = p
res.numeric[ri,"rho"] = 0
#Else do SKAT-O
} else {
skato.res <- skatO_getp(mscores, big.cov, diag(w1), w2, rho, method= method, gene=gene)
res.numeric[ri,"p"] <- skato.res$actualp
res.numeric[ri,"pmin"] = skato.res$minp
res.numeric[ri,"rho"] = skato.res$rho
res.numeric[ri, "errflag"] = skato.res$errflag
}
} else {
res.numeric[ri,"p"] <- res.numeric[ri,"pmin"] <- 1
res.numeric[ri,"rho"] <- 0
res.numeric[ri, "errflag"] <- 0
}
res.numeric[ri,"cmaf"] = sum(maf,na.rm=TRUE)
res.numeric[ri,"nsnps"] = sum(maf!= 0, na.rm =T)
res.numeric[ri,"nmiss"] = sum(n.miss, na.rm =T)
if(verbose){
pb.i <- pb.i+1
setTxtProgressBar(pb, pb.i)
}
}
if(verbose) close(pb)
return(cbind(res.strings,res.numeric))
}
skatO_getp <- function(U,V, R, w, rho,method = "davies", gene=NULL){
##Input:
#U: score vector (length p)
#R: p x p weight matrix for skat
#w: burden weights
#rho: vector of rhos in [0,1]
#method: method for calculating Normal quadratic form distribution
#gene: The name of the region - used for error reporting
##Output: a list with elemeents
#minp: the minimum p-value
#actualp: the actual p-value
#rho: the value of rho which gave the minp
#ps: the whole vector of p-values
#errflag: 0 if no problem, 1 if quantile issue, 2 if integration issue
satterthwaite <- function(a, df) {
if (any(df > 1)) {
a <- rep(a, df)
}
tr <- mean(a)
tr2 <- mean(a^2)/(tr^2)
list(scale = tr * tr2, df = length(a)/tr2)
}
errflag = 0
Q.skat <- crossprod(R%*%U) # SKAT
Q.burden <- (t(w)%*%U)^2 # burden
Qs <- (1-rho)*Q.skat + rho*Q.burden
lambdas <- ps <- NULL
ps <- numeric(length(rho))
for(i in 1:length(rho)){
PC <- eigen((1-rho[i])*crossprod(R)+ rho[i]*outer(w,w),symmetric=TRUE)
v.sqrt <- with(PC,{ values[values < 0] <- 0; (vectors)%*%diag(sqrt(values))%*%t(vectors) })
lam <- eigen( zapsmall(v.sqrt%*%V%*%v.sqrt),only.values=TRUE,symmetric=TRUE)$values
lam <- lam[lam != 0]
lambdas <- c(lambdas, list( lam ))
tmpP <- pchisqsum2(Qs[i],lambda=lambdas[[i]],method=method, acc=1e-7)
if(tmpP$errflag != 0){
errflag <- 1
ps[i] <- pchisqsum2(Qs[i],lambda=lambdas[[i]],method="liu")$p
} else {
ps[i] <- tmpP$p
}
}
minp <- min(ps)
Ts <- numeric(length(rho))
for(i in 1:length(rho)){
sat <- satterthwaite(lambdas[[i]],rep(1,length(lambdas[[i]])))
upper <- qchisq(minp/20,df=sat$df,lower.tail=FALSE)*sat$scale
tmpT <- try(uniroot(function(x){pchisqsum2(x,lambda=lambdas[[i]],method=method,acc=1e-5)$p- minp }, interval=c(1e-10,upper))$root, silent = TRUE)
if(class(tmpT) == "try-error"){
#warning(paste0("Problem finding quantiles in gene ", gene, ", p-value may not be accurate"))
Ts[i] <- Qs[i]
errflag <- 2
} else {
Ts[i] <- tmpT
}
}
v11 <- R%*%V%*%R
v12 <- R%*%V%*%w
v22 <- as.numeric(t(w)%*%V%*%w)
V.cond <- v11 - outer( v12, v12 )/v22
lambda.cond <- eigen(V.cond,only.values=TRUE,symmetric=TRUE)$values
EDec <- eigen(V.cond,symmetric=TRUE)
D <- zapsmall(diag(EDec$values))
diag(D)[zapsmall(diag(D)) > 0] <- 1/sqrt(diag(D)[zapsmall(diag(D)) > 0])
diag(D)[diag(D) <= 0 ] <- 0
#meanvec <- t(EDec$vectors)%*%D%*%(EDec$vectors)%*%(v12)/c(v22)
meanvec <- as.numeric(D%*%t(EDec$vectors)%*%(v12)/c(v22))
Fcond <- function(x,method){
pp <- qmax <- numeric(length(x))
for(i in 1:length(x)){
qmax[i] <- min( ( (Ts[rho !=1 ] - rho[rho != 1]*x[i])/(1-rho)[rho !=1]) )
if(any(x[i] > Ts[rho == 1]) ){
pp[i] <- 1
} else {
p.tmp <- pchisqsum2(qmax[i], lambda=lambda.cond, delta = meanvec^2*x[i], method = method, acc=min(minp,1e-5) )
if(p.tmp$errflag != 0) stop("Error in integration! using Liu p-value")
pp[i] = p.tmp$p
}
}
return(pp)
}
if(any(lambda.cond > 0)){
integrand <- function(x){dchisq(x,1)*Fcond(x*v22,method=method)}
integral <- try(integrate(Vectorize(integrand),lower=0,upper=Inf, subdivisions = 200L, rel.tol=min(minp/100,1e-4)), silent = TRUE)
if (class(integral) == "try-error" ) {
integrand <- function(x){dchisq(x,1)*Fcond(x*v22,method="liu")}
integral <- integrate(Vectorize(integrand),lower=0,upper=Inf)
errflag <- 3
} else {
if(integral$message != "OK") errflag <- 2
}
actualp <- integral[1]$value
} else {
#cat(".")
actualp = minp
}
return(list("actualp"= actualp, "minp" = minp, "rho" = rho[which.min(ps)], "ps" = ps, "errflag" = errflag))
}
| /R/skatOMeta.R | no_license | izhbannikov/seqMetaPlus | R | false | false | 17,310 | r |
#' @title Combine SKAT-O analyses from one or more studies.
#'
#' @description Takes as input `seqMeta` objects (from e.g.
#' \code{\link{prepScores}}), and meta analyzes them, using SKAT-O. See the
#' package vignette for more extensive documentation.
#'
#' @inheritParams singlesnpMeta
#' @inheritParams burdenMeta
#' @param skat.wts Either a function to calculate testing weights for SKAT, or a
#' character specifying a vector of weights in the SNPInfo file. For skatOMeta
#' the default are the `beta' weights.
#' @param burden.wts Either a function to calculate weights for the burden test,
#' or a character specifying a vector of weights in the SNPInfo file. For
#' skatOMeta the default are the T1 weights.
#' @param rho A sequence of values that specify combinations of SKAT and a burden test to be considered. Default is c(0,1), which considers SKAT and a burden test.
#' @param method p-value calculation method. Should be one of 'saddlepoint', 'integration', or 'liu'.
#'
#' @details \code{skatOMeta()} implements the SKAT-Optimal test, which picks the
#' `best' combination of SKAT and a burden test, and then corrects for the
#' flexibility afforded by this choice. Specifically, if the SKAT statistic is
#' Q1, and the squared score for a burden test is Q2, SKAT-O considers tests
#' of the form (1-rho)*Q1 + rho*Q2, where rho between 0 and 1. The values of
#' rho are specified by the user using the argument \code{rho}. In the
#' simplest form, which is the default, SKAT-O computes a SKAT test and a T1
#' test, and reports the minimum p-value, corrected for multiple testing. See
#' the vignette or the accompanying references for more details.
#'
#' If there is a single variant in the gene, or the burden test is undefined
#' (e.g. there are no rare alleles for the T1 test), SKAT is reported (i.e.
#' rho=0).
#'
#' Note 1: the SKAT package uses the same weights for both SKAT and the burden
#' test, which this function does not.
#'
#' Note 2: all studies must use coordinated SNP Info files - that is, the SNP
#' names and gene definitions must be the same.
#'
#' Note 3: The method of p-value calculation is much more important here than
#' in SKAT. The `integration' method is fast and typically accurate for
#' p-values larger than 1e-9. The saddlepoint method is slower, but has higher
#' relative accuracy.
#'
#' Note 4: Since p-value calculation can be slow for SKAT-O, and less accurate
#' for small p-values, a reasonable alternative would be to first calculate
#' SKAT and a burden test, and record the minimum p-value, which is a lower
#' bound for the SKAT-O p-value. This can be done quickly and accurately.
#' Then, one would only need to perform SKAT-O on the small subset of genes
#' that are potentially interesting.
#'
#' Please see the package vignette for more details.
#'
#' @return a data frame with the following columns:
#' \item{gene}{Name of the gene or unit of aggregation being meta analyzed}
#' \item{p}{p-value of the SKAT-O test.}
#' \item{pmin}{The minimum of the p-values considered by SKAT-O (not corrected for multiple testing!).}
#' \item{rho}{The value of rho which gave the smallest p-value.}
#' \item{cmaf}{The cumulative minor allele frequency.}
#' \item{nmiss}{The number of `missing` SNPs. For a gene with a single SNP
#' this is the number of individuals which do not contribute to the analysis,
#' due to studies that did not report results for that SNP. For a gene with
#' multiple SNPs, is totalled over the gene. }
#' \item{nsnps}{The number of SNPs in the gene.}
#' \item{errflag}{An indicator of possible error: 0 suggests no error, > 0
#' indicates probable loss of accuracy.}
#'
#' @references Wu, M.C., Lee, S., Cai, T., Li, Y., Boehnke, M., and Lin, X.
#' (2011) Rare Variant Association Testing for Sequencing Data Using the
#' Sequence Kernel Association Test (SKAT). American Journal of Human
#' Genetics.
#'
#' Lee, S. and Wu, M.C. and Lin, X. (2012) Optimal tests for rare variant
#' effects in sequencing association studies. Biostatistics.
#'
#' @author Arie Voorman, Jennifer Brody
#' @seealso
#' \code{\link{skatOMeta}}
#' \code{\link{prepScores}}
#' \code{\link{burdenMeta}}
#' \code{\link{singlesnpMeta}}
#'
#' @examples
#' \dontrun{
#' ### load example data for 2 studies
#' data(seqMetaExample)
#'
#' ####run on each study:
#' cohort1 <- prepScores(Z=Z1, y~sex+bmi, SNPInfo = SNPInfo, data =pheno1)
#' cohort2 <- prepScores(Z=Z2, y~sex+bmi, SNPInfo = SNPInfo, kins=kins, data=pheno2)
#'
#' #### combine results:
#' ##skat-O with default settings:
#' out1 <- skatOMeta(cohort1, cohort2, SNPInfo = SNPInfo, method = "int")
#' head(out1)
#'
#' ##skat-O, using a large number of combinations between SKAT and T1 tests:
#' out2 <- skatOMeta(cohort1, cohort2, rho=seq(0,1,length=11), SNPInfo=SNPInfo, method="int")
#' head(out2)
#'
#' #rho = 0 indicates SKAT gave the smaller p-value (or the T1 is undefined)
#' #rho=1 indicates the burden test was chosen
#' # 0 < rho < 1 indicates some other value was chosen
#' #notice that most of the time either the SKAT or T1 is chosen
#' table(out2$rho)
#'
#' ##skat-O with beta-weights used in the burden test:
#' out3 <- skatOMeta(cohort1,cohort2, burden.wts = function(maf){dbeta(maf,1,25) },
#' rho=seq(0,1,length=11),SNPInfo = SNPInfo, method="int")
#' head(out3)
#' table(out3$rho)
#'
#' ########################
#' ####binary data
#' cohort1 <- prepScores(Z=Z1, ybin~1, family=binomial(), SNPInfo=SNPInfo, data=pheno1)
#' out.bin <- skatOMeta(cohort1, SNPInfo = SNPInfo, method="int")
#' head(out.bin)
#'
#' ####################
#' ####survival data
#' cohort1 <- prepCox(Z=Z1, Surv(time,status)~strata(sex)+bmi, SNPInfo=SNPInfo,
#' data=pheno1)
#' out.surv <- skatOMeta(cohort1, SNPInfo = SNPInfo, method="int")
#' head(out.surv)
#'
#' ##########################################
#' ###Compare with SKAT and T1 tests on their own:
#' cohort1 <- prepScores(Z=Z1, y~sex+bmi, SNPInfo=SNPInfo, data=pheno1)
#' cohort2 <- prepScores(Z=Z2, y~sex+bmi, SNPInfo=SNPInfo, kins=kins, data=pheno2)
#'
#' out.skat <- skatMeta(cohort1,cohort2,SNPInfo=SNPInfo)
#' out.t1 <- burdenMeta(cohort1,cohort2, wts= function(maf){as.numeric(maf <= 0.01)},
#' SNPInfo=SNPInfo)
#'
#' #plot results
#' #We compare the minimum p-value of SKAT and T1, adjusting for multiple tests
#' #using the Sidak correction, to that of SKAT-O.
#'
#' par(mfrow=c(1,3))
#' pseq <- seq(0,1,length=100)
#' plot(y=out.skat$p, x=out1$p,xlab="SKAT-O p-value", ylab="SKAT p-value", main ="SKAT-O vs SKAT")
#' lines(y=pseq,x=1-(1-pseq)^2,col=2,lty=2, lwd=2)
#' abline(0,1)
#'
#' plot(y=out.t1$p, x=out1$p,xlab="SKAT-O p-value", ylab="T1 p-value", main ="SKAT-O vs T1")
#' lines(y=pseq,x=1-(1-pseq)^2,col=2,lty=2, lwd=2)
#' abline(0,1)
#'
#' plot(y=pmin(out.t1$p, out.skat$p,na.rm=T), x=out1$p,xlab="SKAT-O p-value",
#' ylab="min(T1,SKAT) p-value", main ="min(T1,SKAT) vs SKAT-O")
#' lines(y=pseq,x=1-(1-pseq)^2,col=2,lty=2, lwd=2)
#' abline(0,1)
#' legend("bottomright", lwd=2,lty=2,col=2,legend="Bonferroni correction")
#' }
#'
#' @export
skatOMeta <- function(..., SNPInfo=NULL, skat.wts=function(maf){dbeta(maf,1,25)}, burden.wts=function(maf){as.numeric(maf <= 0.01) }, rho=c(0,1), method=c("integration", "saddlepoint", "liu"), snpNames="Name", aggregateBy="gene", mafRange=c(0,0.5), verbose=FALSE) {
cl <- match.call(expand.dots = FALSE)
if(is.null(SNPInfo)){
warning("No SNP Info file provided: loading the Illumina HumanExome BeadChip. See ?SNPInfo for more details")
load(paste(find.package("seqMeta"), "data", "SNPInfo.rda",sep = "/"))
aggregateBy = "SKATgene"
} else {
SNPInfo <- prepSNPInfo(SNPInfo, snpNames, aggregateBy, wt1=skat.wts, wt2=burden.wts)
}
if(any(rho >1 | rho < 0 ) ) stop("rho must be between 0 and 1")
method <- match.arg(method)
#if( !(method %in% c("davies","farebrother","imhof","liu")) ) stop("Method specified is not valid! See documentation")
genelist <- na.omit(unique(SNPInfo[,aggregateBy]))
cohortNames <- lapply(cl[[2]],as.character)
ncohort <- length(cohortNames)
ev <- parent.frame()
classes <- unlist(lapply(cohortNames,function(name){class(get(name,envir=ev))}))
if(!all(classes == "seqMeta" | classes == "skatCohort") ){
stop("an argument to ... is not a seqMeta object!")
}
res.strings <- data.frame("gene"=genelist,stringsAsFactors=F)
res.numeric <- matrix(NA, nrow= nrow(res.strings),ncol = length(c("p","pmin","rho","cmaf","nmiss", "nsnps", "errflag")))
colnames(res.numeric) <- c("p","pmin","rho","cmaf","nmiss", "nsnps","errflag")
if(verbose){
cat("\n Meta Analyzing... Progress:\n")
pb <- txtProgressBar(min = 0, max = length(genelist), style = 3)
pb.i <- 0
}
ri <- 0
snp.names.list <- split(SNPInfo[,snpNames],SNPInfo[,aggregateBy])
for(gene in genelist){
ri <- ri+1
nsnps.sub <- length(snp.names.list[[gene]])
mscores <- maf <- numeric(nsnps.sub)
big.cov <- Matrix(0, nsnps.sub,nsnps.sub)
n.total <- numeric(nsnps.sub)
n.miss <- numeric(nsnps.sub)
vary.ave <- 0
for(cohort.k in 1:ncohort){
cohort.gene <- get(cohortNames[[cohort.k]],envir=ev)[[gene]]
if(!is.null(cohort.gene)){
sub <- match(snp.names.list[[gene]],colnames(cohort.gene$cov))
if(any(is.na(sub)) | any(sub != 1:length(sub), na.rm=TRUE) | length(cohort.gene$maf) > nsnps.sub){
#if(any(is.na(sub))) warning("Some SNPs were not in SNPInfo file for gene ", gene," and cohort ",names(cohorts)[cohort.k])
cohort.gene$cov <- as.matrix(cohort.gene$cov)[sub,sub,drop=FALSE]
cohort.gene$cov[is.na(sub),] <- cohort.gene$cov[,is.na(sub)] <- 0
cohort.gene$maf <- cohort.gene$maf[sub]
cohort.gene$maf[is.na(sub)] <- -1
cohort.gene$scores <- cohort.gene$scores[sub]
cohort.gene$scores[is.na(sub)] <- 0
}
n.total[cohort.gene$maf >= 0] <- n.total[cohort.gene$maf >= 0]+cohort.gene$n
n.miss[cohort.gene$maf < 0] <- n.miss[cohort.gene$maf < 0] + cohort.gene$n
cohort.gene$maf[cohort.gene$maf < 0] <- 0
mscores <- mscores + cohort.gene$scores/cohort.gene$sey^2
maf <- maf + 2*cohort.gene$maf*(cohort.gene$n)
big.cov <- big.cov + cohort.gene$cov/cohort.gene$sey^2
vary.ave <- vary.ave + max(cohort.gene$n,na.rm=T)*cohort.gene$sey^2
}else{
n.miss <- n.miss + get(cohortNames[[cohort.k]],envir=parent.frame())[[1]]$n
}
}
if(any(maf >0)){
maf <- maf/(2*n.total)
maf[is.nan(maf)] <- 0
maf <- sapply(maf, function(x){min(x,1-x)})
if( !all(mafRange == c(0,0.5))){
keep <- (maf >= min(mafRange)) & (maf <= max(mafRange))
big.cov <- big.cov[keep,keep]
mscores <- mscores[keep]
maf <- maf[keep]
}
}
if(length(maf)> 0){
if(is.function(skat.wts)){
w1 <- skat.wts(maf)
} else if(is.character(skat.wts)){
w1 <- as.numeric(SNPInfo[SNPInfo[,aggregateBy]==gene,skat.wts])
} else {
w1 <- rep(1,length(maf))
}
if(is.function(burden.wts)){
w2 <- burden.wts(maf)
} else if(is.character(burden.wts)){
w2 <- as.numeric(SNPInfo[SNPInfo[,aggregateBy]==gene,burden.wts])
} else {
w2 <- rep(1,length(maf))
}
w1 <- ifelse(maf >0, w1,0)
w2 <- ifelse(maf >0, w2,0)
##
Q.skat <- sum((w1*mscores)^2, na.rm=TRUE)
V.skat <- (w1)*t(t(big.cov)*as.vector(w1))
Q.burden <- sum(w2*mscores, na.rm=TRUE)^2
V.burden <- as.numeric(t(w2)%*%big.cov%*%w2)
#If burden test is 0, or only 1 SNP in the gene, do SKAT:
if(sum(maf > 0) ==1 | V.burden ==0){
lambda <- eigen(zapsmall(V.skat), symmetric = TRUE)$values
if(any(lambda > 0) & length(lambda) >1) {
tmpP <- pchisqsum2(Q.skat,lambda=lambda,method=method, acc=1e-7)
if(tmpP$errflag !=0 ){
res.numeric[ri,"errflag"] = 1
} else {
res.numeric[ri,"errflag"] = 0
}
p <- tmpP$p
} else {
p <- ifelse(length(lambda) == 1 & all(lambda > 0), pchisq(Q.skat/lambda,df=1,lower.tail=FALSE),1)
res.numeric[ri,"errflag"] = 0
}
res.numeric[ri,"pmin"] = res.numeric[ri,"p"] = p
res.numeric[ri,"rho"] = 0
#Else do SKAT-O
} else {
skato.res <- skatO_getp(mscores, big.cov, diag(w1), w2, rho, method= method, gene=gene)
res.numeric[ri,"p"] <- skato.res$actualp
res.numeric[ri,"pmin"] = skato.res$minp
res.numeric[ri,"rho"] = skato.res$rho
res.numeric[ri, "errflag"] = skato.res$errflag
}
} else {
res.numeric[ri,"p"] <- res.numeric[ri,"pmin"] <- 1
res.numeric[ri,"rho"] <- 0
res.numeric[ri, "errflag"] <- 0
}
res.numeric[ri,"cmaf"] = sum(maf,na.rm=TRUE)
res.numeric[ri,"nsnps"] = sum(maf!= 0, na.rm =T)
res.numeric[ri,"nmiss"] = sum(n.miss, na.rm =T)
if(verbose){
pb.i <- pb.i+1
setTxtProgressBar(pb, pb.i)
}
}
if(verbose) close(pb)
return(cbind(res.strings,res.numeric))
}
skatO_getp <- function(U,V, R, w, rho,method = "davies", gene=NULL){
##Input:
#U: score vector (length p)
#R: p x p weight matrix for skat
#w: burden weights
#rho: vector of rhos in [0,1]
#method: method for calculating Normal quadratic form distribution
#gene: The name of the region - used for error reporting
##Output: a list with elemeents
#minp: the minimum p-value
#actualp: the actual p-value
#rho: the value of rho which gave the minp
#ps: the whole vector of p-values
#errflag: 0 if no problem, 1 if quantile issue, 2 if integration issue
satterthwaite <- function(a, df) {
if (any(df > 1)) {
a <- rep(a, df)
}
tr <- mean(a)
tr2 <- mean(a^2)/(tr^2)
list(scale = tr * tr2, df = length(a)/tr2)
}
errflag = 0
Q.skat <- crossprod(R%*%U) # SKAT
Q.burden <- (t(w)%*%U)^2 # burden
Qs <- (1-rho)*Q.skat + rho*Q.burden
lambdas <- ps <- NULL
ps <- numeric(length(rho))
for(i in 1:length(rho)){
PC <- eigen((1-rho[i])*crossprod(R)+ rho[i]*outer(w,w),symmetric=TRUE)
v.sqrt <- with(PC,{ values[values < 0] <- 0; (vectors)%*%diag(sqrt(values))%*%t(vectors) })
lam <- eigen( zapsmall(v.sqrt%*%V%*%v.sqrt),only.values=TRUE,symmetric=TRUE)$values
lam <- lam[lam != 0]
lambdas <- c(lambdas, list( lam ))
tmpP <- pchisqsum2(Qs[i],lambda=lambdas[[i]],method=method, acc=1e-7)
if(tmpP$errflag != 0){
errflag <- 1
ps[i] <- pchisqsum2(Qs[i],lambda=lambdas[[i]],method="liu")$p
} else {
ps[i] <- tmpP$p
}
}
minp <- min(ps)
Ts <- numeric(length(rho))
for(i in 1:length(rho)){
sat <- satterthwaite(lambdas[[i]],rep(1,length(lambdas[[i]])))
upper <- qchisq(minp/20,df=sat$df,lower.tail=FALSE)*sat$scale
tmpT <- try(uniroot(function(x){pchisqsum2(x,lambda=lambdas[[i]],method=method,acc=1e-5)$p- minp }, interval=c(1e-10,upper))$root, silent = TRUE)
if(class(tmpT) == "try-error"){
#warning(paste0("Problem finding quantiles in gene ", gene, ", p-value may not be accurate"))
Ts[i] <- Qs[i]
errflag <- 2
} else {
Ts[i] <- tmpT
}
}
v11 <- R%*%V%*%R
v12 <- R%*%V%*%w
v22 <- as.numeric(t(w)%*%V%*%w)
V.cond <- v11 - outer( v12, v12 )/v22
lambda.cond <- eigen(V.cond,only.values=TRUE,symmetric=TRUE)$values
EDec <- eigen(V.cond,symmetric=TRUE)
D <- zapsmall(diag(EDec$values))
diag(D)[zapsmall(diag(D)) > 0] <- 1/sqrt(diag(D)[zapsmall(diag(D)) > 0])
diag(D)[diag(D) <= 0 ] <- 0
#meanvec <- t(EDec$vectors)%*%D%*%(EDec$vectors)%*%(v12)/c(v22)
meanvec <- as.numeric(D%*%t(EDec$vectors)%*%(v12)/c(v22))
Fcond <- function(x,method){
pp <- qmax <- numeric(length(x))
for(i in 1:length(x)){
qmax[i] <- min( ( (Ts[rho !=1 ] - rho[rho != 1]*x[i])/(1-rho)[rho !=1]) )
if(any(x[i] > Ts[rho == 1]) ){
pp[i] <- 1
} else {
p.tmp <- pchisqsum2(qmax[i], lambda=lambda.cond, delta = meanvec^2*x[i], method = method, acc=min(minp,1e-5) )
if(p.tmp$errflag != 0) stop("Error in integration! using Liu p-value")
pp[i] = p.tmp$p
}
}
return(pp)
}
if(any(lambda.cond > 0)){
integrand <- function(x){dchisq(x,1)*Fcond(x*v22,method=method)}
integral <- try(integrate(Vectorize(integrand),lower=0,upper=Inf, subdivisions = 200L, rel.tol=min(minp/100,1e-4)), silent = TRUE)
if (class(integral) == "try-error" ) {
integrand <- function(x){dchisq(x,1)*Fcond(x*v22,method="liu")}
integral <- integrate(Vectorize(integrand),lower=0,upper=Inf)
errflag <- 3
} else {
if(integral$message != "OK") errflag <- 2
}
actualp <- integral[1]$value
} else {
#cat(".")
actualp = minp
}
return(list("actualp"= actualp, "minp" = minp, "rho" = rho[which.min(ps)], "ps" = ps, "errflag" = errflag))
}
|
#' Search Stackoverflow
#' @description Improve your workflow by searching Stackoverflow directly from R console.
#' @param search_terms Search terms encapsulated in " ".
#' @keywords web workflow stackoverflow
#' @examples
#' stackoverflow("r date conversion")
#' so("r ggplot2 geom_smooth()")
#' @export
stackoverflow <- function(search_terms) {
message("Opening Stackoverflow search for \"", search_terms, "\" in browser")
browseURL(paste0("https://stackoverflow.com/search?q=", URLencode(search_terms)))
}
#' @export
#' @rdname stackoverflow
so <- stackoverflow
| /R/stackoverflow.R | no_license | cran/websearchr | R | false | false | 572 | r | #' Search Stackoverflow
#' @description Improve your workflow by searching Stackoverflow directly from R console.
#' @param search_terms Search terms encapsulated in " ".
#' @keywords web workflow stackoverflow
#' @examples
#' stackoverflow("r date conversion")
#' so("r ggplot2 geom_smooth()")
#' @export
stackoverflow <- function(search_terms) {
message("Opening Stackoverflow search for \"", search_terms, "\" in browser")
browseURL(paste0("https://stackoverflow.com/search?q=", URLencode(search_terms)))
}
#' @export
#' @rdname stackoverflow
so <- stackoverflow
|
##MY FINAL ASSIGMENT IN GETTING AND CLEANING DATA COURSE PROJECT #
setwd("C:/Users/JOvissnoel/Documents/Rproyectos/Cleaning Data/FinalProy")
library(dplyr)
### 1 Merges the training and the test sets to create one data set.####
features<-read.csv("features.txt",sep = "",col.names = c("id_feature","functions"),header = FALSE)
xtrain<-read.csv("X_train.txt",sep = "",header = FALSE,col.names = features$functions)
ytrain<-read.csv("y_train.txt",sep = "",header = FALSE, col.names = "id_activity")
xtest<-read.csv("X_test.txt",sep = "",header = FALSE, col.names = features$functions)
ytest<-read.csv("y_test.txt",sep = "",header = FALSE, col.names = "id_activity")
activ <- read.csv("activity_labels.txt",header = FALSE,sep = "",col.names = c("id", "activity"))
subject_test <- read.csv("subject_test.txt", sep = "",header = FALSE,col.names = "subject")
subject_train<- read.csv("subject_train.txt",sep = "",header = FALSE,col.names = "subject")
X<-rbind(xtrain, xtest)
Y<-rbind(ytrain, ytest)
subject<-rbind(subject_train,subject_test)
merged_data<-cbind(subject, Y, X)
### 2 Extracts only the measurements on the mean and standard deviation for each measuremen####
extracted<-merged_data%>%select(subject,id_activity, contains(c("mean","std")))
### 3 Uses descriptive activity names to name the activities in the data set ####
extracted$id_activity <-activ[extracted$id_activity, 2]
extracted<-extracted%>%rename(activity=id_activity)
### 4. Appropriately labels the data set with descriptive variable names ####
names(extracted)<-gsub("Acc", "Accelerometer", names(extracted))
names(extracted)<-gsub("Gyro", "Gyroscope", names(extracted))
names(extracted)<-gsub("BodyBody", "Body", names(extracted))
names(extracted)<-gsub("Mag", "Magnitude", names(extracted))
names(extracted)<-gsub("^t", "Time", names(extracted))
names(extracted)<-gsub("^f", "Frequency", names(extracted))
names(extracted)<-gsub("tBody", "TimeBody", names(extracted))
names(extracted)<-gsub("-mean()", "Mean", names(extracted), ignore.case = TRUE)
names(extracted)<-gsub("-std()", "STD", names(extracted), ignore.case = TRUE)
names(extracted)<-gsub("-freq()", "Frequency", names(extracted), ignore.case = TRUE)
names(extracted)<-gsub("angle", "Angle", names(extracted))
names(extracted)<-gsub("gravity", "Gravity", names(extracted))
### 5: From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
extracted_average<-extracted%>%group_by(activity,subject)%>%summarise_all(mean)
write.table(extracted_average, "FinalDataBase.txt", row.name=FALSE)
| /run_analysis.R | no_license | hnperez/getting-and-cleaning-data-Final-Project | R | false | false | 2,669 | r | ##MY FINAL ASSIGMENT IN GETTING AND CLEANING DATA COURSE PROJECT #
setwd("C:/Users/JOvissnoel/Documents/Rproyectos/Cleaning Data/FinalProy")
library(dplyr)
### 1 Merges the training and the test sets to create one data set.####
features<-read.csv("features.txt",sep = "",col.names = c("id_feature","functions"),header = FALSE)
xtrain<-read.csv("X_train.txt",sep = "",header = FALSE,col.names = features$functions)
ytrain<-read.csv("y_train.txt",sep = "",header = FALSE, col.names = "id_activity")
xtest<-read.csv("X_test.txt",sep = "",header = FALSE, col.names = features$functions)
ytest<-read.csv("y_test.txt",sep = "",header = FALSE, col.names = "id_activity")
activ <- read.csv("activity_labels.txt",header = FALSE,sep = "",col.names = c("id", "activity"))
subject_test <- read.csv("subject_test.txt", sep = "",header = FALSE,col.names = "subject")
subject_train<- read.csv("subject_train.txt",sep = "",header = FALSE,col.names = "subject")
X<-rbind(xtrain, xtest)
Y<-rbind(ytrain, ytest)
subject<-rbind(subject_train,subject_test)
merged_data<-cbind(subject, Y, X)
### 2 Extracts only the measurements on the mean and standard deviation for each measuremen####
extracted<-merged_data%>%select(subject,id_activity, contains(c("mean","std")))
### 3 Uses descriptive activity names to name the activities in the data set ####
extracted$id_activity <-activ[extracted$id_activity, 2]
extracted<-extracted%>%rename(activity=id_activity)
### 4. Appropriately labels the data set with descriptive variable names ####
names(extracted)<-gsub("Acc", "Accelerometer", names(extracted))
names(extracted)<-gsub("Gyro", "Gyroscope", names(extracted))
names(extracted)<-gsub("BodyBody", "Body", names(extracted))
names(extracted)<-gsub("Mag", "Magnitude", names(extracted))
names(extracted)<-gsub("^t", "Time", names(extracted))
names(extracted)<-gsub("^f", "Frequency", names(extracted))
names(extracted)<-gsub("tBody", "TimeBody", names(extracted))
names(extracted)<-gsub("-mean()", "Mean", names(extracted), ignore.case = TRUE)
names(extracted)<-gsub("-std()", "STD", names(extracted), ignore.case = TRUE)
names(extracted)<-gsub("-freq()", "Frequency", names(extracted), ignore.case = TRUE)
names(extracted)<-gsub("angle", "Angle", names(extracted))
names(extracted)<-gsub("gravity", "Gravity", names(extracted))
### 5: From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
extracted_average<-extracted%>%group_by(activity,subject)%>%summarise_all(mean)
write.table(extracted_average, "FinalDataBase.txt", row.name=FALSE)
|
# Modified water::loadImageSR() with correct filename pattern
loadSR = function (path = getwd(), aoi)
{
files <- list.files(path = path, pattern = "_SR_B+[1-7].TIF$",
full.names = T)
stack1 <- list()
for (i in 1:7) {
stack1[i] <- raster(files[i])
}
image_SR <- do.call(stack, stack1)
image_SR <- water:::aoiCrop(image_SR, aoi)
# image_SR <- image_SR/10000
bandnames <- c("SB", "B", "G", "R", "NIR",
"SWIR1", "SWIR2")
image_SR <- water:::saveLoadClean(imagestack = image_SR, stack.names = bandnames,
file = "image_SR", overwrite = TRUE)
return(image_SR)
}
loadST = function(path = getwd(), aoi)
{
files <- list.files(path = path, pattern = "_ST_B10.TIF$",
full.names = T)
image_ST = raster(files[1])
# stack1 <- list()
# for (i in 1:7) {
# stack1[i] <- raster(files[i])
# }
# image_SR <- do.call(stack, stack1)
image_ST <- water:::aoiCrop(image_ST, aoi)
# image_SR <- image_SR/10000
# bandnames <- c("SB", "B", "G", "R", "NIR",
# "SWIR1", "SWIR2")
# image_ST <- water:::saveLoadClean(imagestack = image_ST, stack.names = bandnames,
# file = "image_SR", overwrite = TRUE)
return(image_ST)
}
| /loadSR.R | no_license | pramitghosh/RS_ET | R | false | false | 1,307 | r | # Modified water::loadImageSR() with correct filename pattern
loadSR = function (path = getwd(), aoi)
{
files <- list.files(path = path, pattern = "_SR_B+[1-7].TIF$",
full.names = T)
stack1 <- list()
for (i in 1:7) {
stack1[i] <- raster(files[i])
}
image_SR <- do.call(stack, stack1)
image_SR <- water:::aoiCrop(image_SR, aoi)
# image_SR <- image_SR/10000
bandnames <- c("SB", "B", "G", "R", "NIR",
"SWIR1", "SWIR2")
image_SR <- water:::saveLoadClean(imagestack = image_SR, stack.names = bandnames,
file = "image_SR", overwrite = TRUE)
return(image_SR)
}
loadST = function(path = getwd(), aoi)
{
files <- list.files(path = path, pattern = "_ST_B10.TIF$",
full.names = T)
image_ST = raster(files[1])
# stack1 <- list()
# for (i in 1:7) {
# stack1[i] <- raster(files[i])
# }
# image_SR <- do.call(stack, stack1)
image_ST <- water:::aoiCrop(image_ST, aoi)
# image_SR <- image_SR/10000
# bandnames <- c("SB", "B", "G", "R", "NIR",
# "SWIR1", "SWIR2")
# image_ST <- water:::saveLoadClean(imagestack = image_ST, stack.names = bandnames,
# file = "image_SR", overwrite = TRUE)
return(image_ST)
}
|
#ะะฐะฟะธัะธัะต ััะฝะบัะธั smart_hclust, ะบะพัะพัะฐั ะฟะพะปััะฐะตั ะฝะฐ ะฒั
ะพะด dataframe ั ะฟัะพะธะทะฒะพะปัะฝัะผ
#ัะธัะปะพะผ ะบะพะปะธัะตััะฒะตะฝะฝัั
ะฟะตัะตะผะตะฝะฝัั
ะธ ัะธัะปะพ ะบะปะฐััะตัะพะฒ, ะบะพัะพัะพะต ะฝะตะพะฑั
ะพะดะธะผะพ ะฒัะดะตะปะธัั ะฟัะธ ะฟะพะผะพัะธ ะธะตัะฐัั
ะธัะตัะบะพะน ะบะปะฐััะตัะธะทะฐัะธะธ.
test_data <- read.csv("https://stepic.org/media/attachments/course/524/test_data_hclust.csv")
smart_hclust<- function(test_data, cluster_number){
fit <- hclust(dist(test_data))
cluster <- cutree(fit, cluster_number)
test_data <- cbind(test_data, cluster)
}
#ะะฐะฟะธัะธัะต ััะฝะบัะธั get_difference, ะบะพัะพัะฐั ะฟะพะปััะฐะตั ะฝะฐ ะฒั
ะพะด ะดะฒะฐ ะฐัะณัะผะตะฝัะฐ:
#test_data โ ะฝะฐะฑะพั ะดะฐะฝะฝัั
ั ะฟัะพะธะทะฒะพะปัะฝัะผ ัะธัะปะพะผ ะบะพะปะธัะตััะฒะตะฝะฝัั
ะฟะตัะตะผะตะฝะฝัั
.
#n_cluster โ ัะธัะปะพ ะบะปะฐััะตัะพะฒ, ะบะพัะพัะพะต ะฝัะถะฝะพ ะฒัะดะตะปะธัั ะฒ ะดะฐะฝะฝัั
ะฟัะธ ะฟะพะผะพัะธ ะธะตัะฐัั
ะธัะตัะบะพะน ะบะปะฐััะตัะธะทะฐัะธะธ.
#ะคัะฝะบัะธั ะดะพะปะถะฝะฐ ะฒะตัะฝััั ะฝะฐะทะฒะฐะฝะธั ะฟะตัะตะผะตะฝะฝัั
, ะฟะพ ะบะพัะพััะผ ะฑัะปะธ ะพะฑะฝะฐััะถะตะฝ ะทะฝะฐัะธะผัะต ัะฐะทะปะธัะธั ะผะตะถะดั ะฒัะดะตะปะตะฝะฝัะผะธ ะบะปะฐััะตัะฐะผะธ (p < 0.05)
get_difference <- function(test_data, n_cluster) {
d <- dist(test_data)
fit <- hclust(d)
test_data$cluster <- factor(cutree(fit, k = n_cluste))
mod <- sapply(test_data[,colnames(test_data) != "cluster"], function(x) anova(aov(x ~ cluster , data = test_data))$P[1])
return(names(mod)[mod < 0.05])
}
#ะะฐะฟะธัะธัะต ััะฝะบัะธั get_pc, ะบะพัะพัะฐั ะฟะพะปััะฐะตั ะฝะฐ ะฒั
ะพะด dataframe ั ะฟัะพะธะทะฒะพะปัะฝัะผ ัะธัะปะพะผ ะบะพะปะธัะตััะฒะตะฝะฝัั
ะฟะตัะตะผะตะฝะฝัั
.
#ะคัะฝะบัะธั ะดะพะปะถะฝะฐ ะฒัะฟะพะปะฝััั ะฐะฝะฐะปะธะท ะณะปะฐะฒะฝัั
ะบะพะผะฟะพะฝะตะฝั ะธ ะดะพะฑะฐะฒะปััั ะฒ
#ะธัั
ะพะดะฝัะต ะดะฐะฝะฝัะต ะดะฒะต ะฝะพะฒัะต ะบะพะปะพะฝะบะธ ัะพ ะทะฝะฐัะตะฝะธัะผะธ ะฟะตัะฒะพะน ะธ ะฒัะพัะพะน
#ะณะปะฐะฒะฝะพะน ะบะพะผะฟะพะฝะตะฝัั. ะะพะฒัะต ะฟะตัะตะผะตะฝะฝัะต ะดะพะปะถะฝั ะฝะฐะทัะฒะฐัััั "PC1" ะธ "PC2" ัะพะพัะฒะตัััะฒะตะฝะฝะพ
test_data <- read.csv("https://stepic.org/media/attachments/course/524/pca_test.csv")
get_pc <- function(d){
test_data <- cbind(test_data,prcomp(test_data)$x[,c(1,2)] )
}
#ะฃัะปะพะถะฝะธะผ ะฟัะตะดัะดัััั ะทะฐะดะฐัั!
#ะะฐะฟะธัะธัะต ััะฝะบัะธั get_pca2, ะบะพัะพัะฐั ะฟัะธะฝะธะผะฐะตั ะฝะฐ ะฒั
ะพะด dataframe ั ะฟัะพะธะทะฒะพะปัะฝัะผ ัะธัะปะพะผ
#ะบะพะปะธัะตััะฒะตะฝะฝัั
ะฟะตัะตะผะตะฝะฝัั
. ะคัะฝะบัะธั ะดะพะปะถะฝะฐ ัะฐัััะธัะฐัั, ะบะฐะบะพะต ะผะธะฝะธะผะฐะปัะฝะพะต ัะธัะปะพ ะณะปะฐะฒะฝัั
ะบะพะผะฟะพะฝะตะฝั ะพะฑัััะฝัะตั
#ะฑะพะปััะต 90% ะธะทะผะตะฝัะธะฒะพััะธ ะฒ ะธัั
ะพะดะฝัั
ะดะฐะฝะฝัั
ะธ ะดะพะฑะฐะฒะปััั ะทะฝะฐัะตะฝะธั ััะธั
ะบะพะผะฟะพะฝะตะฝั ะฒ ะธัั
ะพะดะฝัะน dataframe ะฒ ะฒะธะดะต ะฝะพะฒัั
ะฟะตัะตะผะตะฝะฝัั
get_pca2 <- function(data){
fit <- prcomp(swiss)$x
imp <- summary(prcomp(swiss))$importance[3,]
cnt = 0
n = 0
for (l in 1:length(imp) ) {
if( cnt >= 0.90) {
break
} else {
n = n + 1
cnt = imp[l]
}
}
data <- cbind(data , fit[,c(seq(n))] )
}
#ะะฐะดะฐัะฐ ะดะปั ะงะฐะบะฐ ะะพััะธัะฐ.
test_data <- read.csv("https://stepic.org/media/attachments/course/524/Norris_2.csv")
is_multicol <- function(test_data) {corr <- cor(test_data)
diag(corr) <- 0
out <- row.names(which(abs(corr) == 1, arr.ind=TRUE))
if (length(out) == 0) {
print("There is no collinearity in the data")
} else {print(out)
}
}
# ะดะพะฟะพะปะฝะธัะต ะบะพะด, ััะพะฑั ะฟะพะปััะธัั ะณัะฐัะธะบ
library(ggplot2)
my_plot <- ggplot(swiss, aes(Education, Catholic, col = cluster))+
geom_point()+
geom_smooth(method = "lm")
| /PCA_clustering.R | no_license | zhukovanan/Stepik_ | R | false | false | 3,891 | r | #ะะฐะฟะธัะธัะต ััะฝะบัะธั smart_hclust, ะบะพัะพัะฐั ะฟะพะปััะฐะตั ะฝะฐ ะฒั
ะพะด dataframe ั ะฟัะพะธะทะฒะพะปัะฝัะผ
#ัะธัะปะพะผ ะบะพะปะธัะตััะฒะตะฝะฝัั
ะฟะตัะตะผะตะฝะฝัั
ะธ ัะธัะปะพ ะบะปะฐััะตัะพะฒ, ะบะพัะพัะพะต ะฝะตะพะฑั
ะพะดะธะผะพ ะฒัะดะตะปะธัั ะฟัะธ ะฟะพะผะพัะธ ะธะตัะฐัั
ะธัะตัะบะพะน ะบะปะฐััะตัะธะทะฐัะธะธ.
test_data <- read.csv("https://stepic.org/media/attachments/course/524/test_data_hclust.csv")
smart_hclust<- function(test_data, cluster_number){
fit <- hclust(dist(test_data))
cluster <- cutree(fit, cluster_number)
test_data <- cbind(test_data, cluster)
}
#ะะฐะฟะธัะธัะต ััะฝะบัะธั get_difference, ะบะพัะพัะฐั ะฟะพะปััะฐะตั ะฝะฐ ะฒั
ะพะด ะดะฒะฐ ะฐัะณัะผะตะฝัะฐ:
#test_data โ ะฝะฐะฑะพั ะดะฐะฝะฝัั
ั ะฟัะพะธะทะฒะพะปัะฝัะผ ัะธัะปะพะผ ะบะพะปะธัะตััะฒะตะฝะฝัั
ะฟะตัะตะผะตะฝะฝัั
.
#n_cluster โ ัะธัะปะพ ะบะปะฐััะตัะพะฒ, ะบะพัะพัะพะต ะฝัะถะฝะพ ะฒัะดะตะปะธัั ะฒ ะดะฐะฝะฝัั
ะฟัะธ ะฟะพะผะพัะธ ะธะตัะฐัั
ะธัะตัะบะพะน ะบะปะฐััะตัะธะทะฐัะธะธ.
#ะคัะฝะบัะธั ะดะพะปะถะฝะฐ ะฒะตัะฝััั ะฝะฐะทะฒะฐะฝะธั ะฟะตัะตะผะตะฝะฝัั
, ะฟะพ ะบะพัะพััะผ ะฑัะปะธ ะพะฑะฝะฐััะถะตะฝ ะทะฝะฐัะธะผัะต ัะฐะทะปะธัะธั ะผะตะถะดั ะฒัะดะตะปะตะฝะฝัะผะธ ะบะปะฐััะตัะฐะผะธ (p < 0.05)
get_difference <- function(test_data, n_cluster) {
d <- dist(test_data)
fit <- hclust(d)
test_data$cluster <- factor(cutree(fit, k = n_cluste))
mod <- sapply(test_data[,colnames(test_data) != "cluster"], function(x) anova(aov(x ~ cluster , data = test_data))$P[1])
return(names(mod)[mod < 0.05])
}
#ะะฐะฟะธัะธัะต ััะฝะบัะธั get_pc, ะบะพัะพัะฐั ะฟะพะปััะฐะตั ะฝะฐ ะฒั
ะพะด dataframe ั ะฟัะพะธะทะฒะพะปัะฝัะผ ัะธัะปะพะผ ะบะพะปะธัะตััะฒะตะฝะฝัั
ะฟะตัะตะผะตะฝะฝัั
.
#ะคัะฝะบัะธั ะดะพะปะถะฝะฐ ะฒัะฟะพะปะฝััั ะฐะฝะฐะปะธะท ะณะปะฐะฒะฝัั
ะบะพะผะฟะพะฝะตะฝั ะธ ะดะพะฑะฐะฒะปััั ะฒ
#ะธัั
ะพะดะฝัะต ะดะฐะฝะฝัะต ะดะฒะต ะฝะพะฒัะต ะบะพะปะพะฝะบะธ ัะพ ะทะฝะฐัะตะฝะธัะผะธ ะฟะตัะฒะพะน ะธ ะฒัะพัะพะน
#ะณะปะฐะฒะฝะพะน ะบะพะผะฟะพะฝะตะฝัั. ะะพะฒัะต ะฟะตัะตะผะตะฝะฝัะต ะดะพะปะถะฝั ะฝะฐะทัะฒะฐัััั "PC1" ะธ "PC2" ัะพะพัะฒะตัััะฒะตะฝะฝะพ
test_data <- read.csv("https://stepic.org/media/attachments/course/524/pca_test.csv")
get_pc <- function(d){
test_data <- cbind(test_data,prcomp(test_data)$x[,c(1,2)] )
}
#ะฃัะปะพะถะฝะธะผ ะฟัะตะดัะดัััั ะทะฐะดะฐัั!
#ะะฐะฟะธัะธัะต ััะฝะบัะธั get_pca2, ะบะพัะพัะฐั ะฟัะธะฝะธะผะฐะตั ะฝะฐ ะฒั
ะพะด dataframe ั ะฟัะพะธะทะฒะพะปัะฝัะผ ัะธัะปะพะผ
#ะบะพะปะธัะตััะฒะตะฝะฝัั
ะฟะตัะตะผะตะฝะฝัั
. ะคัะฝะบัะธั ะดะพะปะถะฝะฐ ัะฐัััะธัะฐัั, ะบะฐะบะพะต ะผะธะฝะธะผะฐะปัะฝะพะต ัะธัะปะพ ะณะปะฐะฒะฝัั
ะบะพะผะฟะพะฝะตะฝั ะพะฑัััะฝัะตั
#ะฑะพะปััะต 90% ะธะทะผะตะฝัะธะฒะพััะธ ะฒ ะธัั
ะพะดะฝัั
ะดะฐะฝะฝัั
ะธ ะดะพะฑะฐะฒะปััั ะทะฝะฐัะตะฝะธั ััะธั
ะบะพะผะฟะพะฝะตะฝั ะฒ ะธัั
ะพะดะฝัะน dataframe ะฒ ะฒะธะดะต ะฝะพะฒัั
ะฟะตัะตะผะตะฝะฝัั
get_pca2 <- function(data){
fit <- prcomp(swiss)$x
imp <- summary(prcomp(swiss))$importance[3,]
cnt = 0
n = 0
for (l in 1:length(imp) ) {
if( cnt >= 0.90) {
break
} else {
n = n + 1
cnt = imp[l]
}
}
data <- cbind(data , fit[,c(seq(n))] )
}
#ะะฐะดะฐัะฐ ะดะปั ะงะฐะบะฐ ะะพััะธัะฐ.
test_data <- read.csv("https://stepic.org/media/attachments/course/524/Norris_2.csv")
is_multicol <- function(test_data) {corr <- cor(test_data)
diag(corr) <- 0
out <- row.names(which(abs(corr) == 1, arr.ind=TRUE))
if (length(out) == 0) {
print("There is no collinearity in the data")
} else {print(out)
}
}
# ะดะพะฟะพะปะฝะธัะต ะบะพะด, ััะพะฑั ะฟะพะปััะธัั ะณัะฐัะธะบ
library(ggplot2)
my_plot <- ggplot(swiss, aes(Education, Catholic, col = cluster))+
geom_point()+
geom_smooth(method = "lm")
|
library(sqldf)
#connecting to SQL db and joining the tables to fetch the type id and topic id
db <- dbConnect(SQLite(), dbname=("C:\\Users\\Sandhya\\Documents\\correlationone\\content_digest.db\\contentDiscovery.db"))
dbListTables(db)
dbListFields(db, "articles")
p <- dbGetQuery(db,"select * from articles")
'''
query <- dbGetQuery(db,"select distinct u.user_id , u.email, em.content_id ,
em.email_id , a.article_id ,a.author_id , top.topic_id ,
t.type_id ,top.name,t.name typename from
users u ,
email_content em,
articles a,
types t,
topics top
where u.user_id = em.user_id
and em.article_id = a.article_id
and a.type_id = t.type_id
and a.topic_id = top.topic_id
")
'''
article <- dbGetQuery(db," select distinct a.article_id,top.topic_id,t.type_id
from
articles a, types t, topics top
where a.type_id = t.type_id
and a.topic_id = top.topic_id ")
topicname <- dbGetQuery(db," select distinct a.article_id,top.name topicname,t.name typename
from
articles a, types t, topics top
where a.type_id = t.type_id
and a.topic_id = top.topic_id ")
type <- dbGetQuery(db,"select distinct type_id from types")
topic <- dbGetQuery(db,"select distinct topic_id from topics")
'''
f <- dbGetQuery(db,"select distinct a.article_id,top.name topicname,t.name typename from
articles a, types t, topics top
where a.type_id = t.type_id
and a.topic_id = top.topic_id and article_id = 1")
'''
#converting the query output to df
articledetail <- data.frame(article)
topicdetail <- data.frame(topicname)
colnames(tempfile)[2] <- "article_id" #changing the colnames into common names
#merging the two tables from db and log
tottypeartsent <- merge(tempfile,articledetail,by = "article_id")
dim(tottypeartsent)
summary(tottypeartsent)
#ploting the topics and types of article viewed
head(topicdetail)
typeplot <- merge (tempfile , topicdetail, by = "article_id")
head(typeplot)
#table(typeplot[,4])
barplot(table(typeplot[,6])) #topic plot
table(typeplot[,5])
barplot(table(typeplot[,7])) #type plot
#writing the merged output to csv
write.csv(tottypeartsent, file = "datasetlinks.csv",row.names=FALSE, na="",col.names=TRUE,sep = ",")
| /sqllitecor.R | no_license | sandhiyakothandan/Web-Log-Data-Analysis | R | false | false | 2,679 | r | library(sqldf)
#connecting to SQL db and joining the tables to fetch the type id and topic id
db <- dbConnect(SQLite(), dbname=("C:\\Users\\Sandhya\\Documents\\correlationone\\content_digest.db\\contentDiscovery.db"))
dbListTables(db)
dbListFields(db, "articles")
p <- dbGetQuery(db,"select * from articles")
'''
query <- dbGetQuery(db,"select distinct u.user_id , u.email, em.content_id ,
em.email_id , a.article_id ,a.author_id , top.topic_id ,
t.type_id ,top.name,t.name typename from
users u ,
email_content em,
articles a,
types t,
topics top
where u.user_id = em.user_id
and em.article_id = a.article_id
and a.type_id = t.type_id
and a.topic_id = top.topic_id
")
'''
article <- dbGetQuery(db," select distinct a.article_id,top.topic_id,t.type_id
from
articles a, types t, topics top
where a.type_id = t.type_id
and a.topic_id = top.topic_id ")
topicname <- dbGetQuery(db," select distinct a.article_id,top.name topicname,t.name typename
from
articles a, types t, topics top
where a.type_id = t.type_id
and a.topic_id = top.topic_id ")
type <- dbGetQuery(db,"select distinct type_id from types")
topic <- dbGetQuery(db,"select distinct topic_id from topics")
'''
f <- dbGetQuery(db,"select distinct a.article_id,top.name topicname,t.name typename from
articles a, types t, topics top
where a.type_id = t.type_id
and a.topic_id = top.topic_id and article_id = 1")
'''
#converting the query output to df
articledetail <- data.frame(article)
topicdetail <- data.frame(topicname)
colnames(tempfile)[2] <- "article_id" #changing the colnames into common names
#merging the two tables from db and log
tottypeartsent <- merge(tempfile,articledetail,by = "article_id")
dim(tottypeartsent)
summary(tottypeartsent)
#ploting the topics and types of article viewed
head(topicdetail)
typeplot <- merge (tempfile , topicdetail, by = "article_id")
head(typeplot)
#table(typeplot[,4])
barplot(table(typeplot[,6])) #topic plot
table(typeplot[,5])
barplot(table(typeplot[,7])) #type plot
#writing the merged output to csv
write.csv(tottypeartsent, file = "datasetlinks.csv",row.names=FALSE, na="",col.names=TRUE,sep = ",")
|
\name{a.mes2}
\alias{a.mes2}
\title{Mean Values from ANCOVA F-statistic with Pooled SD to Effect Size
}
\description{
Converts an ANCOVA F-statistic with a pooled standard deviation to an effect size of \eqn{d} (mean difference), \eqn{g} (unbiased estimate of \eqn{d}), \eqn{r} (correlation coefficient), \eqn{z'} (Fisher's \eqn{z}), and log odds ratio. The variances, confidence intervals and p-values of these estimates are also computed, along with NNT (number needed to treat), U3 (Cohen's \eqn{U_(3)} overlapping proportions of distributions), CLES (Common Language Effect Size) and Cliff's Delta.
}
\usage{
a.mes2(m.1.adj, m.2.adj, s.pooled, n.1, n.2, R, q,
level = 95, cer = 0.2, dig = 2, verbose = TRUE, id=NULL, data=NULL)
}
\arguments{
\item{m.1.adj}{Adjusted mean of treatment group from ANCOVA.
}
\item{m.2.adj}{Adjusted mean of comparison group from ANCOVA.
}
\item{s.pooled}{Pooled standard deviation.
}
\item{n.1}{Treatment group sample size.
}
\item{n.2}{Comparison group sample size.
}
\item{R}{Covariate outcome correlation or multiple correlation.
}
\item{q}{Number of covariates
}
\item{level}{Confidence level. Default is \code{95\%}.
}
\item{cer}{Control group Event Rate (e.g., proportion of cases showing recovery). Default is \code{0.2} (=20\% of cases showing recovery). \bold{CER is used exclusively for NNT output.} \emph{This argument can be ignored if input is not a mean difference effect size}. Note: NNT output (described below) will NOT be meaningful if based on anything other than input from mean difference effect sizes (i.e., input of Cohen's d, Hedges' g will produce meaningful output, while correlation coefficient input will NOT produce meaningful NNT output).
}
\item{dig}{Number of digits to display. Default is \code{2} digits.
}
\item{verbose}{Print output from scalar values? If yes, then verbose=TRUE; otherwise, verbose=FALSE. Default is TRUE.
}
\item{id}{Study identifier. Default is \code{NULL}, assuming a scalar is used as input. If input is a vector dataset (i.e., \code{data.frame}, with multiple values to be computed), enter the name of the study identifier here.
}
\item{data}{name of \code{data.frame}. Default is \code{NULL}, assuming a scalar is used as input. If input is a vector dataset (i.e., \code{data.frame}, with multiple values to be computed), enter the name of the \code{data.frame} here.
}
}
\value{
\item{d}{Standardized mean difference (\eqn{d}).}
\item{var.d }{Variance of \eqn{d}.}
\item{l.d }{lower confidence limits for \eqn{d}.}
\item{u.d }{upper confidence limits for \eqn{d}.}
\item{U3.d }{Cohen's \eqn{U_(3)}, for \eqn{d}.}
\item{cl.d }{ Common Language Effect Size for \eqn{d}.}
\item{cliffs.d }{Cliff's Delta for \eqn{d}.}
\item{p.d }{p-value for \eqn{d}.}
\item{g }{Unbiased estimate of \eqn{d}.}
\item{var.g }{Variance of \eqn{g}.}
\item{l.g }{lower confidence limits for \eqn{g}.}
\item{u.g }{upper confidence limits for \eqn{g}.}
\item{U3.g }{Cohen's \eqn{U_(3)}, for \eqn{g}.}
\item{cl.g }{ Common Language Effect Size for \eqn{g}.}
\item{p.g }{p-value for \eqn{g}.}
\item{r }{Correlation coefficient.}
\item{var.r }{Variance of \eqn{r}.}
\item{l.r }{lower confidence limits for \eqn{r}.}
\item{u.r }{upper confidence limits for \eqn{r}.}
\item{p.r }{p-value for \eqn{r}.}
\item{z }{Fisher's z (\eqn{z'}).}
\item{var.z }{Variance of \eqn{z'}.}
\item{l.z }{lower confidence limits for \eqn{z'}.}
\item{u.z }{upper confidence limits for \eqn{z'}.}
\item{p.z}{p-value for \eqn{z'}.}
\item{OR}{Odds ratio.}
\item{l.or }{lower confidence limits for \eqn{OR}.}
\item{u.or }{upper confidence limits for \eqn{OR}.}
\item{p.or}{p-value for \eqn{OR}.}
\item{lOR}{Log odds ratio.}
\item{var.lor}{Variance of log odds ratio.}
\item{l.lor }{lower confidence limits for \eqn{lOR}.}
\item{u.lor }{upper confidence limits for \eqn{lOR}.}
\item{p.lor}{p-value for \eqn{lOR}.}
\item{N.total}{Total sample size.}
\item{NNT}{Number needed to treat.}
}
\note{
\bold{Detailed information regarding output values of:}
(1) \emph{Cohen's \eqn{d}, Hedges' \eqn{g} (unbiased estimate of \eqn{d}) and variance}
(2) \emph{Correlation coefficient (\eqn{r}), Fisher's \eqn{z'}, and variance}
(3) \emph{Log odds and variance}
is provided below (followed by general information about NNT, U3, Common Language Effect Size, and Cliff's Delta):
\bold{Cohen's d, Hedges' g and Variance of g}:
This function will initially calculate Cohen's \eqn{d} from the independent groups adjusted mean ANCOVA values. Then, all other effect size estimates are derived from \eqn{d} and its variance. This parameter is calculated by
\deqn{d=%
\frac{\bar Y^A_{1}-\bar Y^A_{2}}%
{S_{pooled}}}{%
d=%
(Y^A_(1) bar-Y^A_(2) bar)/%
(S_(pooled))}
where \eqn{\bar Y^A_{1}}{Y^A_(1) bar} and \eqn{\bar Y^A_{2}}{Y^A_(2) bar} are the adjusted sample means in each group and \eqn{S_{pooled}}{S_(pooled)} is the pooled standard deviation for both groups.
The variance of \eqn{d} is derived from
\deqn{v_{d}=%
\frac{(n_{1}+n_{2})(1-R^2)}%
{n_{1}n_{2}}+%
\frac{d^2}%
{2(n_{1}+n_{2})}}{%
v_(d)=%
((n_(1)+n_(2))(1-R^2))/%
(n_(1)n_(2))+%
(d^2)/%
(2(n_(1)+n_(2)))}
The effect size estimate \eqn{d} has a small upward bias (overestimates the population parameter effect size) which can be removed using a correction formula to derive the unbiased estimate of Hedges' \eqn{g}. The correction factor, \eqn{j}, is defined as
\deqn{J=%
1-%
\frac{3}%
{4df-1}}{%
J=%
1-%
(3)/%
(4df-1)}
where \eqn{df}= degrees of freedom, which is \eqn{n_{1}+n_{2}-2}{n_(1)+n_(2)-2} for two independent groups. Then, to calculate \eqn{g}
\deqn{g=%
Jd}{%
g=%
Jd }
and the variance of \eqn{g}
\deqn{v_{g}=%
J^2v_{d}}{%
v_(g)=%
J^2v_(d)}
\bold{Correlation Coefficient r, Fisher's z, and Variances}:
In this particular formula \eqn{r} is calculated as follows
\deqn{r=%
\frac{d}%
{\sqrt{d^2+a}}}{%
r=%
(d)/%
(sqrt(d^2+a))}
where \eqn{a} corrects for inbalance in \eqn{n_{1}}{n_(1)} & \eqn{n_{2}}{n_(2)} and is defined as
\deqn{a=%
\frac{(n_{1}+n_{2})^2}%
{n_{1}n_{2}}}{%
a=%
((n_(1)+n_(2))^2)/%
(n_(1)n_(2))}
The variance of \eqn{r} is then defined as
\deqn{v_{r}=%
\frac{a^2v_{d}}%
{(d^2+a)^3}}{%
v_(r)=%
(a^2v_(d))/%
((d^2+a)^3)}
Often researchers are interested in transforming \eqn{r} to \eqn{z'} (Fisher's \eqn{z}) because \eqn{r} is not normally distributed, particularly at large values of \eqn{r}. Therefore, converting to \eqn{z'} will help to normally distribute the estimate. Converting from \eqn{r} to \eqn{z'} is defined as
\deqn{z=%
.5^*log(\frac{1+r}%
{1-r})}{%
z=%
.5^*log((1+r)/%
(1-r)}
and the variance of \eqn{z}
\deqn{v_{z}=%
\frac{1}%
{n-3}}{%
v_(z)=%
(1)/%
(n-3)}
where \eqn{n} is the total sample size for groups 1 and 2.
\bold{Log Odds Ratio & Variance of Log Odds}:
In this particular formula, log odds is calculated as follows
\deqn{\log(o)=%
\frac{\pi d}%
{\sqrt{3}}}{%
log(o)=%
(pi d)/%
(sqrt(3))}
where \eqn{pi} = 3.1459. The variance of log odds is defined as
\deqn{v_{log(o)}=%
\frac{\pi^2v_{d}}%
{3}}{%
v_(log(o))=%
(pi^2v_(d))/%
(3)}
\bold{General information about NNT, U3, Common Language Effect Size, and Cliff's Delta:}
\emph{Number needed to treat (NNT).} NNT is interpreted as the number of participants that would need to be treated in one group (e.g., intervention group) in order to have one additional positive outcome over that of the outcome of a randomly selected participant in the other group (e.g., control group). In the \code{compute.es} package, NNT is calculated directly from d (Furukawa & Leucht, 2011), assuming relative normality of distribution and equal variances across groups, as follows:
\deqn{NNT=%
\frac{1}%
{\Phi{(d-\Psi{(CER}))}-CER}
}{
NNT=%
1/(Phi(d-Psi(CER))-CER)
}
\emph{U3.} Cohen (1988) proposed a method for characterizing effect sizes by expressing them in terms of (normal) distribution overlap, called U3. This statistic describes the percentage of scores in one group that are exceeded by the mean score in another group. If the population means are equal then half of the scores in the treatment group exceed half the scores in the comparison group, and U3 = 50\%. As the population mean difference increases, U3 approaches 100\% (Valentine & Cooper, 2003).
\emph{Common Language Effect Size (CLES).} CLES (McGraw & Wong, 1992) expresses the probability that a randomly selected score from one population will be greater than a randomly sampled score from another population. CLES is computed as the percentage of the normal curve that falls between negative infinity and the effect size (Valentine & Cooper, 2003).
\emph{Cliff's Delta/success rate difference.} Cliff's delta (or success rate difference; Furukawa & Leucht (2011)) is a robust alternative to Cohen's d, when data are either non-normal or ordinal (with truncated/reduced variance). Cliff's Delta is a non-parametric procedure that provides the probability that individual observations in one group are likely to be greater than the observations in another group. It is the probability that a randomly selected participant of one population has a better outcome than a randomly selected participant of the second population (minus the reverse probability). Cliff's Delta of negative 1 or positive 1 indicates no overlap between the two groups, whereas a value of 0 indicates complete overlap and equal group distributions.
\deqn{\delta=%
2 * \Phi(\frac{d}%
{\sqrt{2}})-1
}{
Cliff's Delta=%
2*Phi(d/sqrt(2))-1
}
}
\author{ AC Del Re
Much appreciation to Dr. Jeffrey C. Valentine for his contributions in implementing \eqn{U3} and \eqn{CLES} procedures and related documentation.
Maintainer: AC Del Re \email{acdelre@gmail.com}
}
\references{Borenstein (2009). Effect sizes for continuous data. In H. Cooper, L. V. Hedges, & J. C. Valentine (Eds.), \emph{The handbook of research synthesis and meta analysis} (pp. 279-293). New York: Russell Sage Foundation.
Cohen, J. (1988). \emph{Statistical power for the behavioral sciences (2nd ed.)}. Hillsdale, NJ: Erlbaum.
Furukawa, T. A., & Leucht, S. (2011). How to obtain NNT from Cohen's d: comparison of two methods. \emph{PloS one, 6}(4), e19070.
McGraw, K. O. & Wong, S. P. (1992). A common language effect size statistic. \emph{Psychological Bulletin, 111,} 361-365.
Valentine, J. C. & Cooper, H. (2003). \emph{Effect size substantive interpretation guidelines:
Issues in the interpretation of effect sizes.} Washington, DC: What Works Clearinghouse.
}
\seealso{
\code{\link{mes}},
\code{\link{a.mes2}},
\code{\link{a.mes}}
}
\examples{
# CALCULATE SEVERAL EFFECT SIZES BASED ON MEAN VALUES FROM ANCOVA F-STAT (WITH POOLED SD):
a.mes2(10, 12, 1, 30, 30, .2, 2)
}
\keyword{ arith } | /man/mean_anc_to_es2.Rd | no_license | cran/compute.es | R | false | false | 11,711 | rd | \name{a.mes2}
\alias{a.mes2}
\title{Mean Values from ANCOVA F-statistic with Pooled SD to Effect Size
}
\description{
Converts an ANCOVA F-statistic with a pooled standard deviation to an effect size of \eqn{d} (mean difference), \eqn{g} (unbiased estimate of \eqn{d}), \eqn{r} (correlation coefficient), \eqn{z'} (Fisher's \eqn{z}), and log odds ratio. The variances, confidence intervals and p-values of these estimates are also computed, along with NNT (number needed to treat), U3 (Cohen's \eqn{U_(3)} overlapping proportions of distributions), CLES (Common Language Effect Size) and Cliff's Delta.
}
\usage{
a.mes2(m.1.adj, m.2.adj, s.pooled, n.1, n.2, R, q,
level = 95, cer = 0.2, dig = 2, verbose = TRUE, id=NULL, data=NULL)
}
\arguments{
\item{m.1.adj}{Adjusted mean of treatment group from ANCOVA.
}
\item{m.2.adj}{Adjusted mean of comparison group from ANCOVA.
}
\item{s.pooled}{Pooled standard deviation.
}
\item{n.1}{Treatment group sample size.
}
\item{n.2}{Comparison group sample size.
}
\item{R}{Covariate outcome correlation or multiple correlation.
}
\item{q}{Number of covariates
}
\item{level}{Confidence level. Default is \code{95\%}.
}
\item{cer}{Control group Event Rate (e.g., proportion of cases showing recovery). Default is \code{0.2} (=20\% of cases showing recovery). \bold{CER is used exclusively for NNT output.} \emph{This argument can be ignored if input is not a mean difference effect size}. Note: NNT output (described below) will NOT be meaningful if based on anything other than input from mean difference effect sizes (i.e., input of Cohen's d, Hedges' g will produce meaningful output, while correlation coefficient input will NOT produce meaningful NNT output).
}
\item{dig}{Number of digits to display. Default is \code{2} digits.
}
\item{verbose}{Print output from scalar values? If yes, then verbose=TRUE; otherwise, verbose=FALSE. Default is TRUE.
}
\item{id}{Study identifier. Default is \code{NULL}, assuming a scalar is used as input. If input is a vector dataset (i.e., \code{data.frame}, with multiple values to be computed), enter the name of the study identifier here.
}
\item{data}{name of \code{data.frame}. Default is \code{NULL}, assuming a scalar is used as input. If input is a vector dataset (i.e., \code{data.frame}, with multiple values to be computed), enter the name of the \code{data.frame} here.
}
}
\value{
\item{d}{Standardized mean difference (\eqn{d}).}
\item{var.d }{Variance of \eqn{d}.}
\item{l.d }{lower confidence limits for \eqn{d}.}
\item{u.d }{upper confidence limits for \eqn{d}.}
\item{U3.d }{Cohen's \eqn{U_(3)}, for \eqn{d}.}
\item{cl.d }{ Common Language Effect Size for \eqn{d}.}
\item{cliffs.d }{Cliff's Delta for \eqn{d}.}
\item{p.d }{p-value for \eqn{d}.}
\item{g }{Unbiased estimate of \eqn{d}.}
\item{var.g }{Variance of \eqn{g}.}
\item{l.g }{lower confidence limits for \eqn{g}.}
\item{u.g }{upper confidence limits for \eqn{g}.}
\item{U3.g }{Cohen's \eqn{U_(3)}, for \eqn{g}.}
\item{cl.g }{ Common Language Effect Size for \eqn{g}.}
\item{p.g }{p-value for \eqn{g}.}
\item{r }{Correlation coefficient.}
\item{var.r }{Variance of \eqn{r}.}
\item{l.r }{lower confidence limits for \eqn{r}.}
\item{u.r }{upper confidence limits for \eqn{r}.}
\item{p.r }{p-value for \eqn{r}.}
\item{z }{Fisher's z (\eqn{z'}).}
\item{var.z }{Variance of \eqn{z'}.}
\item{l.z }{lower confidence limits for \eqn{z'}.}
\item{u.z }{upper confidence limits for \eqn{z'}.}
\item{p.z}{p-value for \eqn{z'}.}
\item{OR}{Odds ratio.}
\item{l.or }{lower confidence limits for \eqn{OR}.}
\item{u.or }{upper confidence limits for \eqn{OR}.}
\item{p.or}{p-value for \eqn{OR}.}
\item{lOR}{Log odds ratio.}
\item{var.lor}{Variance of log odds ratio.}
\item{l.lor }{lower confidence limits for \eqn{lOR}.}
\item{u.lor }{upper confidence limits for \eqn{lOR}.}
\item{p.lor}{p-value for \eqn{lOR}.}
\item{N.total}{Total sample size.}
\item{NNT}{Number needed to treat.}
}
\note{
\bold{Detailed information regarding output values of:}
(1) \emph{Cohen's \eqn{d}, Hedges' \eqn{g} (unbiased estimate of \eqn{d}) and variance}
(2) \emph{Correlation coefficient (\eqn{r}), Fisher's \eqn{z'}, and variance}
(3) \emph{Log odds and variance}
is provided below (followed by general information about NNT, U3, Common Language Effect Size, and Cliff's Delta):
\bold{Cohen's d, Hedges' g and Variance of g}:
This function will initially calculate Cohen's \eqn{d} from the independent groups adjusted mean ANCOVA values. Then, all other effect size estimates are derived from \eqn{d} and its variance. This parameter is calculated by
\deqn{d=%
\frac{\bar Y^A_{1}-\bar Y^A_{2}}%
{S_{pooled}}}{%
d=%
(Y^A_(1) bar-Y^A_(2) bar)/%
(S_(pooled))}
where \eqn{\bar Y^A_{1}}{Y^A_(1) bar} and \eqn{\bar Y^A_{2}}{Y^A_(2) bar} are the adjusted sample means in each group and \eqn{S_{pooled}}{S_(pooled)} is the pooled standard deviation for both groups.
The variance of \eqn{d} is derived from
\deqn{v_{d}=%
\frac{(n_{1}+n_{2})(1-R^2)}%
{n_{1}n_{2}}+%
\frac{d^2}%
{2(n_{1}+n_{2})}}{%
v_(d)=%
((n_(1)+n_(2))(1-R^2))/%
(n_(1)n_(2))+%
(d^2)/%
(2(n_(1)+n_(2)))}
The effect size estimate \eqn{d} has a small upward bias (overestimates the population parameter effect size) which can be removed using a correction formula to derive the unbiased estimate of Hedges' \eqn{g}. The correction factor, \eqn{j}, is defined as
\deqn{J=%
1-%
\frac{3}%
{4df-1}}{%
J=%
1-%
(3)/%
(4df-1)}
where \eqn{df}= degrees of freedom, which is \eqn{n_{1}+n_{2}-2}{n_(1)+n_(2)-2} for two independent groups. Then, to calculate \eqn{g}
\deqn{g=%
Jd}{%
g=%
Jd }
and the variance of \eqn{g}
\deqn{v_{g}=%
J^2v_{d}}{%
v_(g)=%
J^2v_(d)}
\bold{Correlation Coefficient r, Fisher's z, and Variances}:
In this particular formula \eqn{r} is calculated as follows
\deqn{r=%
\frac{d}%
{\sqrt{d^2+a}}}{%
r=%
(d)/%
(sqrt(d^2+a))}
where \eqn{a} corrects for inbalance in \eqn{n_{1}}{n_(1)} & \eqn{n_{2}}{n_(2)} and is defined as
\deqn{a=%
\frac{(n_{1}+n_{2})^2}%
{n_{1}n_{2}}}{%
a=%
((n_(1)+n_(2))^2)/%
(n_(1)n_(2))}
The variance of \eqn{r} is then defined as
\deqn{v_{r}=%
\frac{a^2v_{d}}%
{(d^2+a)^3}}{%
v_(r)=%
(a^2v_(d))/%
((d^2+a)^3)}
Often researchers are interested in transforming \eqn{r} to \eqn{z'} (Fisher's \eqn{z}) because \eqn{r} is not normally distributed, particularly at large values of \eqn{r}. Therefore, converting to \eqn{z'} will help to normally distribute the estimate. Converting from \eqn{r} to \eqn{z'} is defined as
\deqn{z=%
.5^*log(\frac{1+r}%
{1-r})}{%
z=%
.5^*log((1+r)/%
(1-r)}
and the variance of \eqn{z}
\deqn{v_{z}=%
\frac{1}%
{n-3}}{%
v_(z)=%
(1)/%
(n-3)}
where \eqn{n} is the total sample size for groups 1 and 2.
\bold{Log Odds Ratio & Variance of Log Odds}:
In this particular formula, log odds is calculated as follows
\deqn{\log(o)=%
\frac{\pi d}%
{\sqrt{3}}}{%
log(o)=%
(pi d)/%
(sqrt(3))}
where \eqn{pi} = 3.1459. The variance of log odds is defined as
\deqn{v_{log(o)}=%
\frac{\pi^2v_{d}}%
{3}}{%
v_(log(o))=%
(pi^2v_(d))/%
(3)}
\bold{General information about NNT, U3, Common Language Effect Size, and Cliff's Delta:}
\emph{Number needed to treat (NNT).} NNT is interpreted as the number of participants that would need to be treated in one group (e.g., intervention group) in order to have one additional positive outcome over that of the outcome of a randomly selected participant in the other group (e.g., control group). In the \code{compute.es} package, NNT is calculated directly from d (Furukawa & Leucht, 2011), assuming relative normality of distribution and equal variances across groups, as follows:
\deqn{NNT=%
\frac{1}%
{\Phi{(d-\Psi{(CER}))}-CER}
}{
NNT=%
1/(Phi(d-Psi(CER))-CER)
}
\emph{U3.} Cohen (1988) proposed a method for characterizing effect sizes by expressing them in terms of (normal) distribution overlap, called U3. This statistic describes the percentage of scores in one group that are exceeded by the mean score in another group. If the population means are equal then half of the scores in the treatment group exceed half the scores in the comparison group, and U3 = 50\%. As the population mean difference increases, U3 approaches 100\% (Valentine & Cooper, 2003).
\emph{Common Language Effect Size (CLES).} CLES (McGraw & Wong, 1992) expresses the probability that a randomly selected score from one population will be greater than a randomly sampled score from another population. CLES is computed as the percentage of the normal curve that falls between negative infinity and the effect size (Valentine & Cooper, 2003).
\emph{Cliff's Delta/success rate difference.} Cliff's delta (or success rate difference; Furukawa & Leucht (2011)) is a robust alternative to Cohen's d, when data are either non-normal or ordinal (with truncated/reduced variance). Cliff's Delta is a non-parametric procedure that provides the probability that individual observations in one group are likely to be greater than the observations in another group. It is the probability that a randomly selected participant of one population has a better outcome than a randomly selected participant of the second population (minus the reverse probability). Cliff's Delta of negative 1 or positive 1 indicates no overlap between the two groups, whereas a value of 0 indicates complete overlap and equal group distributions.
\deqn{\delta=%
2 * \Phi(\frac{d}%
{\sqrt{2}})-1
}{
Cliff's Delta=%
2*Phi(d/sqrt(2))-1
}
}
\author{ AC Del Re
Much appreciation to Dr. Jeffrey C. Valentine for his contributions in implementing \eqn{U3} and \eqn{CLES} procedures and related documentation.
Maintainer: AC Del Re \email{acdelre@gmail.com}
}
\references{Borenstein (2009). Effect sizes for continuous data. In H. Cooper, L. V. Hedges, & J. C. Valentine (Eds.), \emph{The handbook of research synthesis and meta analysis} (pp. 279-293). New York: Russell Sage Foundation.
Cohen, J. (1988). \emph{Statistical power for the behavioral sciences (2nd ed.)}. Hillsdale, NJ: Erlbaum.
Furukawa, T. A., & Leucht, S. (2011). How to obtain NNT from Cohen's d: comparison of two methods. \emph{PloS one, 6}(4), e19070.
McGraw, K. O. & Wong, S. P. (1992). A common language effect size statistic. \emph{Psychological Bulletin, 111,} 361-365.
Valentine, J. C. & Cooper, H. (2003). \emph{Effect size substantive interpretation guidelines:
Issues in the interpretation of effect sizes.} Washington, DC: What Works Clearinghouse.
}
\seealso{
\code{\link{mes}},
\code{\link{a.mes2}},
\code{\link{a.mes}}
}
\examples{
# CALCULATE SEVERAL EFFECT SIZES BASED ON MEAN VALUES FROM ANCOVA F-STAT (WITH POOLED SD):
a.mes2(10, 12, 1, 30, 30, .2, 2)
}
\keyword{ arith } |
whiteKernExtractParam <-
function (kern, only.values=TRUE,
untransformed.values=TRUE,
matlabway = TRUE) {
params <- c(kern$variance)
if ( !only.values ) {
names(params) <- c("variance")
}
return (params)
}
| /R/vargplvm/R/whiteKernExtractParam.R | no_license | shaohua0720/vargplvm | R | false | false | 266 | r | whiteKernExtractParam <-
function (kern, only.values=TRUE,
untransformed.values=TRUE,
matlabway = TRUE) {
params <- c(kern$variance)
if ( !only.values ) {
names(params) <- c("variance")
}
return (params)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/solarproduction_function.R
\name{solarproduction}
\alias{solarproduction}
\title{takes an xml file of greenbutton format, gets the address and then estimates solar production}
\usage{
solarproduction(lat, lon, annual.consumption)
}
\arguments{
\item{the}{address of the house and the average annual consumption}
}
\value{
an array of average daily solar production
}
\description{
Takes greenbutton data,extracts hourly interval and returns daily mean consumption
}
\author{
Christina Machak
}
| /man/solarproduction.Rd | no_license | xmachak/rateanalyzer | R | false | true | 572 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/solarproduction_function.R
\name{solarproduction}
\alias{solarproduction}
\title{takes an xml file of greenbutton format, gets the address and then estimates solar production}
\usage{
solarproduction(lat, lon, annual.consumption)
}
\arguments{
\item{the}{address of the house and the average annual consumption}
}
\value{
an array of average daily solar production
}
\description{
Takes greenbutton data,extracts hourly interval and returns daily mean consumption
}
\author{
Christina Machak
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/01_AllMethods.R
\name{invExWAS}
\alias{invExWAS}
\title{Testing the association between an exposure and a phenotype of an
ExposomeSet (modelling the exposures as response)}
\usage{
invExWAS(object, formula, filter, tef = TRUE, verbose = FALSE, warnings = TRUE)
}
\arguments{
\item{object}{\code{ExposomeSet} that will be used for the ExWAS.}
\item{formula}{\code{formula} indicating the test to be done. If any
exposure is included it will be used as covariate. \code{exwas} metho will
perform the test for each exposure.}
\item{filter}{\code{expression} to be used to filter the individuals
included into the test.}
\item{tef}{(default \code{TRUE}) If \code{TRUE} it computed the
effective number of tests and the threhold for the effective
number of tests. Usually it needs imputed data.}
\item{verbose}{(default \code{FALSE}) If set o true messages along the
tests are shown.}
\item{warnings}{(default \code{TRUE}) If set to \code{FALSE} warnings will
not be displayed.}
}
\value{
An code{ExWAS} object with the result of the association study
}
\description{
The \code{invExWAS} method performs an "Exposome-Wide Association Study" (ExWAS)
using the exposures in \link{ExposomeSet} and one of its phenotype. (modelling the exposures as response)
}
\examples{
data(exposome)
w1 <- invExWAS(expo, ~BMI)
w2 <- invExWAS(expo, ~BMI + sex)
plotExwas(w1, w2)
}
\seealso{
\link{extract} to obtain a table with the result of the ExWAS,
\link{plotExwas} to plot the results of the association
}
| /man/invExWAS-methods.Rd | permissive | isglobal-brge/rexposome | R | false | true | 1,572 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/01_AllMethods.R
\name{invExWAS}
\alias{invExWAS}
\title{Testing the association between an exposure and a phenotype of an
ExposomeSet (modelling the exposures as response)}
\usage{
invExWAS(object, formula, filter, tef = TRUE, verbose = FALSE, warnings = TRUE)
}
\arguments{
\item{object}{\code{ExposomeSet} that will be used for the ExWAS.}
\item{formula}{\code{formula} indicating the test to be done. If any
exposure is included it will be used as covariate. \code{exwas} metho will
perform the test for each exposure.}
\item{filter}{\code{expression} to be used to filter the individuals
included into the test.}
\item{tef}{(default \code{TRUE}) If \code{TRUE} it computed the
effective number of tests and the threhold for the effective
number of tests. Usually it needs imputed data.}
\item{verbose}{(default \code{FALSE}) If set o true messages along the
tests are shown.}
\item{warnings}{(default \code{TRUE}) If set to \code{FALSE} warnings will
not be displayed.}
}
\value{
An code{ExWAS} object with the result of the association study
}
\description{
The \code{invExWAS} method performs an "Exposome-Wide Association Study" (ExWAS)
using the exposures in \link{ExposomeSet} and one of its phenotype. (modelling the exposures as response)
}
\examples{
data(exposome)
w1 <- invExWAS(expo, ~BMI)
w2 <- invExWAS(expo, ~BMI + sex)
plotExwas(w1, w2)
}
\seealso{
\link{extract} to obtain a table with the result of the ExWAS,
\link{plotExwas} to plot the results of the association
}
|
## These functions calculate the inverse of a matrix and cache the
## value. Because of lexical scoping in R, the entire makeCacheMatrix()
## environment as defined at the design stage stays in memory, and can
## be assigned to an S3 object that retains a complete copy of this environment.
## makeCacheMatrix() first initialises two objects: the formal argument 'x'(a matrix)
## and 'inverse', which is used later on in the code. Four functions are then defined,
## accessing values in the parent environment by lexical scoping. Then set()
## is defined, allowing new values to be assigned to the x argument and ensuring
## a cached value of invese is cleared. get() simply retrieves x from the parent
## environment. setinverse() assigns a new value to inverse. getinverse()
## retrieves the value of inverse. Finally makeCacheMatrix() assigns each of these as a
## named element within a list, allowing them to be accessed by the $ extract operator.
## The list is then returned to the parent environment.
makeCacheMatrix <- function(x = matrix()) {
inverse<-NULL
set<-function(y){
x<<-y
inverse<<-NULL
}
get<-function()x
setinverse<-function(solved_inverse) inverse <<-solved_inverse
getinverse<-function() inverse
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
## CacheInverse() initialises a single argument'x', an object of type 'makeCacheMatrix()' ,
## and allows further arguments to be called with the ellipsis. First it retrieves
## the value of inverse from the argument x, and determines if there is a cached inverse
## value. If so, this value is printed. If not, the value of the inverse object is set to the
## inverse of the original makeCacheMatrix() argument and prints this value.
CacheInverse<-function(x,...){
inverse<-x$getinverse()
if(!is.null(inverse)){
message("getting inverse from cache")
return(inverse)
}
inverse<-solve(x$get())
x$setinverse(inverse)
inverse
}
| /cachematrix.R | no_license | keeshlarayne/ProgrammingAssignment2 | R | false | false | 2,039 | r |
## These functions calculate the inverse of a matrix and cache the
## value. Because of lexical scoping in R, the entire makeCacheMatrix()
## environment as defined at the design stage stays in memory, and can
## be assigned to an S3 object that retains a complete copy of this environment.
## makeCacheMatrix() first initialises two objects: the formal argument 'x'(a matrix)
## and 'inverse', which is used later on in the code. Four functions are then defined,
## accessing values in the parent environment by lexical scoping. Then set()
## is defined, allowing new values to be assigned to the x argument and ensuring
## a cached value of invese is cleared. get() simply retrieves x from the parent
## environment. setinverse() assigns a new value to inverse. getinverse()
## retrieves the value of inverse. Finally makeCacheMatrix() assigns each of these as a
## named element within a list, allowing them to be accessed by the $ extract operator.
## The list is then returned to the parent environment.
makeCacheMatrix <- function(x = matrix()) {
inverse<-NULL
set<-function(y){
x<<-y
inverse<<-NULL
}
get<-function()x
setinverse<-function(solved_inverse) inverse <<-solved_inverse
getinverse<-function() inverse
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
## CacheInverse() initialises a single argument'x', an object of type 'makeCacheMatrix()' ,
## and allows further arguments to be called with the ellipsis. First it retrieves
## the value of inverse from the argument x, and determines if there is a cached inverse
## value. If so, this value is printed. If not, the value of the inverse object is set to the
## inverse of the original makeCacheMatrix() argument and prints this value.
CacheInverse<-function(x,...){
inverse<-x$getinverse()
if(!is.null(inverse)){
message("getting inverse from cache")
return(inverse)
}
inverse<-solve(x$get())
x$setinverse(inverse)
inverse
}
|
#' @importFrom magrittr %>%
#' @export
magrittr::`%>%`
#' @importFrom methods is
#' @importFrom utils View installed.packages
# Spurious imports to satisfy R CMD check
#' @importFrom purrr map
NULL
utils::globalVariables(c(
".",
"inner_join",
"mutate",
"select",
"rename",
"quo",
"UQ",
"quo_name",
"from_row",
"from_col",
"to_row",
"to_col",
"type",
"value",
"everything",
"data_type",
"is_na",
".value",
".data_type",
"n",
":=",
".partition",
"ns_env",
"corner_row",
"corner_col",
".data",
".boundary"
))
# Concatenate lists into vectors, handling factors and NULLs, and coercing data
# types only when necessary
concatenate <- function(..., combine_factors = TRUE, fill_factor_na = TRUE) {
c.POSIXct <- function(..., recursive = FALSE) {
.POSIXct(c(unlist(lapply(list(...), unclass))), tz = "UTC")
}
dots <- (...)
dots_is_null <- purrr::map_lgl(dots, rlang::is_null)
# If all elements are NULL, return as-is
if (all(dots_is_null)) {
return(dots)
}
# If any non-NULL elements aren't scalars, return as-is
dots_is_scalar_vector <- purrr::map_lgl(dots, rlang::is_scalar_vector)
if (any(!dots_is_scalar_vector[!dots_is_null])) {
return(dots)
}
classes <- purrr::map(dots, class)
# It might be safe to use c() if all non-NA/NULLs are the same class.
if (length(unique(classes[!dots_is_null])) == 1L) {
# The first element of each class is the telling one
all_classes <- classes[!dots_is_null][[1]]
first_class <- all_classes[1]
# If it's a factor, then forcats::fct_c() could combine the levels if so
# desired.
if (first_class %in% c("factor", "ordered")) {
# If combining_factors then forcats::fct_c() needs all elements to be
# factors, so replace them each with an NA factor. Or even if you're not
# combining factors but still want some kind of consistency.
if (combine_factors || fill_factor_na) {
dots[dots_is_null] <- list(factor(NA_character_))
}
if (combine_factors) {
return(forcats::fct_c(rlang::splice(dots)))
}
else {
return(dots)
}
} else {
# c() omits NULLs, so replace them with NA, which c() will promote when
# necessary. c() demotes dates etc. when the first element is NA, so
# replace the classes.
NA_class_ <- NA
if (is.list(dots)) { # e.g. dates POSIXct
class(NA_class_) <- all_classes
# without list() the POSIXct classes are stripped on assignment.
dots[dots_is_null] <- list(NA_class_)
} else {
dots[dots_is_null] <- NA_class_
}
dots <- do.call(c, c(dots, use.names = FALSE))
class(dots) <- all_classes
return(dots)
}
}
# Here, not every non-NA/NULL element is the same class, and c() isn't very
# clever about homogenising things, so handle factors and dates manually.
# c() ignores nulls, so replace them with NA.
dots[dots_is_null] <- NA
# Convert factors to strings before they're (potentially) coerced to integers
factors <- purrr::map_lgl(classes, ~ .[1] %in% c("factor", "ordered"))
dots[factors] <- purrr::map(dots[factors], as.character)
# Convert dates to strings before they're (potentially) coerced to numbers
dates <- purrr::map_lgl(classes, ~ .[1] %in% c("Date", "POSIXct", "POSIXlt"))
dots[dates] <- purrr::map(dots[dates], format, justify = "none", trim = TRUE)
# Finally go with c()'s default homegnising of remaining classes. Don't use
# purrr::flatten(), because it strips classes from dates.
do.call(c, c(dots, use.names = FALSE))
}
# Return an NA of the same type as the given vector
na_of_type <- function(x) x[NA]
# Apply custom functions to list-elements of a list-column created by pack()
# whose type matches the custom function.
maybe_format_list_element <- function(x, name, functions) {
func <- functions[[name]]
if (is.null(func)) func <- identity
func(x)
}
# Standardise dialects of directions
standardise_direction <- function(direction) {
stopifnot(length(direction) == 1L)
dictionary <-
c(`up-left` = "up-left", `up` = "up", `up-right` = "up-right",
`right-up` = "right-up", `right` = "right", `right-down` = "right-down",
`down-right` = "down-right", `down` = "down", `down-left` = "down-left",
`left-down` = "left-down", `left` = "left", `left-up` = "left-up",
`up-ish` = "up-ish", `right-ish` = "right-ish",
`down-ish` = "down-ish", `left-ish` = "left-ish",
NNW = "up-left", N = "up", NNE = "up-right",
ENE = "right-up", E = "right", ESE = "right-down",
SSE = "down-right", S = "down", SSW = "down-left",
WSW = "left-down", W = "left", WNW = "left-up",
ABOVE = "up-ish", RIGHT = "right-ish",
BELOW = "down-ish", LEFT = "left-ish")
if (direction %in% names(dictionary)) return(unname(dictionary[direction]))
stop("The direction \"", direction, "\" is not recognised. See ?directions.")
}
| /R/utils.R | permissive | nacnudus/unpivotr | R | false | false | 4,967 | r | #' @importFrom magrittr %>%
#' @export
magrittr::`%>%`
#' @importFrom methods is
#' @importFrom utils View installed.packages
# Spurious imports to satisfy R CMD check
#' @importFrom purrr map
NULL
utils::globalVariables(c(
".",
"inner_join",
"mutate",
"select",
"rename",
"quo",
"UQ",
"quo_name",
"from_row",
"from_col",
"to_row",
"to_col",
"type",
"value",
"everything",
"data_type",
"is_na",
".value",
".data_type",
"n",
":=",
".partition",
"ns_env",
"corner_row",
"corner_col",
".data",
".boundary"
))
# Concatenate lists into vectors, handling factors and NULLs, and coercing data
# types only when necessary
concatenate <- function(..., combine_factors = TRUE, fill_factor_na = TRUE) {
c.POSIXct <- function(..., recursive = FALSE) {
.POSIXct(c(unlist(lapply(list(...), unclass))), tz = "UTC")
}
dots <- (...)
dots_is_null <- purrr::map_lgl(dots, rlang::is_null)
# If all elements are NULL, return as-is
if (all(dots_is_null)) {
return(dots)
}
# If any non-NULL elements aren't scalars, return as-is
dots_is_scalar_vector <- purrr::map_lgl(dots, rlang::is_scalar_vector)
if (any(!dots_is_scalar_vector[!dots_is_null])) {
return(dots)
}
classes <- purrr::map(dots, class)
# It might be safe to use c() if all non-NA/NULLs are the same class.
if (length(unique(classes[!dots_is_null])) == 1L) {
# The first element of each class is the telling one
all_classes <- classes[!dots_is_null][[1]]
first_class <- all_classes[1]
# If it's a factor, then forcats::fct_c() could combine the levels if so
# desired.
if (first_class %in% c("factor", "ordered")) {
# If combining_factors then forcats::fct_c() needs all elements to be
# factors, so replace them each with an NA factor. Or even if you're not
# combining factors but still want some kind of consistency.
if (combine_factors || fill_factor_na) {
dots[dots_is_null] <- list(factor(NA_character_))
}
if (combine_factors) {
return(forcats::fct_c(rlang::splice(dots)))
}
else {
return(dots)
}
} else {
# c() omits NULLs, so replace them with NA, which c() will promote when
# necessary. c() demotes dates etc. when the first element is NA, so
# replace the classes.
NA_class_ <- NA
if (is.list(dots)) { # e.g. dates POSIXct
class(NA_class_) <- all_classes
# without list() the POSIXct classes are stripped on assignment.
dots[dots_is_null] <- list(NA_class_)
} else {
dots[dots_is_null] <- NA_class_
}
dots <- do.call(c, c(dots, use.names = FALSE))
class(dots) <- all_classes
return(dots)
}
}
# Here, not every non-NA/NULL element is the same class, and c() isn't very
# clever about homogenising things, so handle factors and dates manually.
# c() ignores nulls, so replace them with NA.
dots[dots_is_null] <- NA
# Convert factors to strings before they're (potentially) coerced to integers
factors <- purrr::map_lgl(classes, ~ .[1] %in% c("factor", "ordered"))
dots[factors] <- purrr::map(dots[factors], as.character)
# Convert dates to strings before they're (potentially) coerced to numbers
dates <- purrr::map_lgl(classes, ~ .[1] %in% c("Date", "POSIXct", "POSIXlt"))
dots[dates] <- purrr::map(dots[dates], format, justify = "none", trim = TRUE)
# Finally go with c()'s default homegnising of remaining classes. Don't use
# purrr::flatten(), because it strips classes from dates.
do.call(c, c(dots, use.names = FALSE))
}
# Return an NA of the same type as the given vector
na_of_type <- function(x) x[NA]
# Apply custom functions to list-elements of a list-column created by pack()
# whose type matches the custom function.
maybe_format_list_element <- function(x, name, functions) {
func <- functions[[name]]
if (is.null(func)) func <- identity
func(x)
}
# Standardise dialects of directions
standardise_direction <- function(direction) {
stopifnot(length(direction) == 1L)
dictionary <-
c(`up-left` = "up-left", `up` = "up", `up-right` = "up-right",
`right-up` = "right-up", `right` = "right", `right-down` = "right-down",
`down-right` = "down-right", `down` = "down", `down-left` = "down-left",
`left-down` = "left-down", `left` = "left", `left-up` = "left-up",
`up-ish` = "up-ish", `right-ish` = "right-ish",
`down-ish` = "down-ish", `left-ish` = "left-ish",
NNW = "up-left", N = "up", NNE = "up-right",
ENE = "right-up", E = "right", ESE = "right-down",
SSE = "down-right", S = "down", SSW = "down-left",
WSW = "left-down", W = "left", WNW = "left-up",
ABOVE = "up-ish", RIGHT = "right-ish",
BELOW = "down-ish", LEFT = "left-ish")
if (direction %in% names(dictionary)) return(unname(dictionary[direction]))
stop("The direction \"", direction, "\" is not recognised. See ?directions.")
}
|
#1
# lonely students effect their score
lone <- bsg %>% select(student = idstud, country = idcntry, left= bsbg16b, contains("bsssci"), contains("bsmmat")) %>%
mutate(score = rowMeans(.[, 4:13])) %>%
filter(left < 5) %>%
mutate(alone= ifelse(left < 3, "yes", "no")) %>%
select(student, country, alone, score)
ggplot(lone, aes(x = score, fill = alone)) + geom_density(alpha= 0.4) + ggtitle("Density of score based on loneliness") +
ylab("density") +
xlab("score") +
guides(fill=guide_legend(title="lonely"))
group1 <- lone %>% filter(alone == "yes")
group2 <- lone %>% filter(alone == "no")
hchart(density(group1$score), type = "area", name=list("alone")) %>%
hc_add_series(density(group2$score), type = "area", name=list("not alone")) %>%
hc_yAxis(title = list(text = "density")) %>%
hc_xAxis(title = list(text = "score")) %>%
hc_title(text = "Density of score based on loneliness", style = list(fontWeight = "bold")) %>%
hc_add_theme(hc_theme_google())
t.test(score~alone, data = lone, alt="greater")
#2
# teachers using discussion get better results
tchr_inq <- btg %>% select(country= idcntry, teacher= idteach, discus = btbg14d) %>%
filter(!is.na(country) & !is.na(teacher) & !is.na(discus)) %>%
filter(discus < 5)
discus_res <- tchr_inq %>% inner_join(tchr_std_perf, by=c("country", "teacher")) %>%
mutate(discussion = ifelse(discus == 1, "Every or almost every lesson",
ifelse(discus == 2, "About half the lessons",
ifelse(discus == 3, "Some lessons", "Never"))))
discus_res %>% ggplot(mapping = aes(discussion, score, fill = discussion)) +
geom_boxplot(notch=FALSE) +
ylab("score") +
xlab("disscusion in class") +
ggtitle("Density of score based on class discussion") +
guides(fill=guide_legend(title="class discussion"))
hchart(density(filter(discus_res, discussion == "Every or almost every lesson")$score), name=list("Every or almost every lesson")) %>%
hc_add_series(density(filter(discus_res, discussion == "About half the lessons")$score), name=list("About half the lessons")) %>%
hc_add_series(density(filter(discus_res, discussion == "Some lessons")$score), name=list("Some lessons")) %>%
hc_add_series(density(filter(discus_res, discussion == "Never")$score), name=list("Never")) %>%
hc_add_theme(hc_theme_ft()) %>%
hc_yAxis(title = list(text = "density")) %>%
hc_xAxis(title = list(text = "score")) %>%
hc_title(text = "Density of score based on class discussion", style = list(fontWeight = "bold"))
summary(aov(score ~ discussion, data = discus_res))
discus_all <- discus_res %>% filter(discussion == "Every or almost every lesson")
discuss_no <- discus_res %>% filter(discussion == "Never")
t.test(discus_all$score, discuss_no$score, alt= "less")
#3
# cellphone worsen students performance
std_edu <- bsg %>% select(student= idstud, country= idcntry, cell= bsbg06f , contains("bsssci"), contains("bsmmat")) %>%
mutate(score = rowMeans(.[, 4:13])) %>%
filter(cell < 3) %>%
filter(!is.na(country) & !is.na(student)) %>%
mutate(cellphone= ifelse(cell == 1, "yes", "no")) %>%
select(student, country, cellphone, score)
std_edu %>% ggplot(mapping = aes(cellphone, score, fill = cellphone)) +
geom_boxplot(notch=FALSE) +
ylab("score") +
xlab("having cellphone") +
ggtitle("Density of score based on cellphone possesion") +
guides(fill=guide_legend(title="cellphone possesion"))
hchart(density(filter(std_edu, cellphone == "yes")$score), name=list("yes")) %>%
hc_add_series(density(filter(std_edu, cellphone == "no")$score), name=list("no")) %>%
hc_yAxis(title = list(text = "density")) %>%
hc_xAxis(title = list(text = "score")) %>%
hc_title(text = "Density of score based on cellphone possesion", style = list(fontWeight = "bold"))
t.test(score~cellphone, data= std_edu, alt="less")
| /hw_04/codes/A11.R | no_license | Minam7/Data_Analysis_HW | R | false | false | 3,887 | r | #1
# lonely students effect their score
lone <- bsg %>% select(student = idstud, country = idcntry, left= bsbg16b, contains("bsssci"), contains("bsmmat")) %>%
mutate(score = rowMeans(.[, 4:13])) %>%
filter(left < 5) %>%
mutate(alone= ifelse(left < 3, "yes", "no")) %>%
select(student, country, alone, score)
ggplot(lone, aes(x = score, fill = alone)) + geom_density(alpha= 0.4) + ggtitle("Density of score based on loneliness") +
ylab("density") +
xlab("score") +
guides(fill=guide_legend(title="lonely"))
group1 <- lone %>% filter(alone == "yes")
group2 <- lone %>% filter(alone == "no")
hchart(density(group1$score), type = "area", name=list("alone")) %>%
hc_add_series(density(group2$score), type = "area", name=list("not alone")) %>%
hc_yAxis(title = list(text = "density")) %>%
hc_xAxis(title = list(text = "score")) %>%
hc_title(text = "Density of score based on loneliness", style = list(fontWeight = "bold")) %>%
hc_add_theme(hc_theme_google())
t.test(score~alone, data = lone, alt="greater")
#2
# teachers using discussion get better results
tchr_inq <- btg %>% select(country= idcntry, teacher= idteach, discus = btbg14d) %>%
filter(!is.na(country) & !is.na(teacher) & !is.na(discus)) %>%
filter(discus < 5)
discus_res <- tchr_inq %>% inner_join(tchr_std_perf, by=c("country", "teacher")) %>%
mutate(discussion = ifelse(discus == 1, "Every or almost every lesson",
ifelse(discus == 2, "About half the lessons",
ifelse(discus == 3, "Some lessons", "Never"))))
discus_res %>% ggplot(mapping = aes(discussion, score, fill = discussion)) +
geom_boxplot(notch=FALSE) +
ylab("score") +
xlab("disscusion in class") +
ggtitle("Density of score based on class discussion") +
guides(fill=guide_legend(title="class discussion"))
hchart(density(filter(discus_res, discussion == "Every or almost every lesson")$score), name=list("Every or almost every lesson")) %>%
hc_add_series(density(filter(discus_res, discussion == "About half the lessons")$score), name=list("About half the lessons")) %>%
hc_add_series(density(filter(discus_res, discussion == "Some lessons")$score), name=list("Some lessons")) %>%
hc_add_series(density(filter(discus_res, discussion == "Never")$score), name=list("Never")) %>%
hc_add_theme(hc_theme_ft()) %>%
hc_yAxis(title = list(text = "density")) %>%
hc_xAxis(title = list(text = "score")) %>%
hc_title(text = "Density of score based on class discussion", style = list(fontWeight = "bold"))
summary(aov(score ~ discussion, data = discus_res))
discus_all <- discus_res %>% filter(discussion == "Every or almost every lesson")
discuss_no <- discus_res %>% filter(discussion == "Never")
t.test(discus_all$score, discuss_no$score, alt= "less")
#3
# cellphone worsen students performance
std_edu <- bsg %>% select(student= idstud, country= idcntry, cell= bsbg06f , contains("bsssci"), contains("bsmmat")) %>%
mutate(score = rowMeans(.[, 4:13])) %>%
filter(cell < 3) %>%
filter(!is.na(country) & !is.na(student)) %>%
mutate(cellphone= ifelse(cell == 1, "yes", "no")) %>%
select(student, country, cellphone, score)
std_edu %>% ggplot(mapping = aes(cellphone, score, fill = cellphone)) +
geom_boxplot(notch=FALSE) +
ylab("score") +
xlab("having cellphone") +
ggtitle("Density of score based on cellphone possesion") +
guides(fill=guide_legend(title="cellphone possesion"))
hchart(density(filter(std_edu, cellphone == "yes")$score), name=list("yes")) %>%
hc_add_series(density(filter(std_edu, cellphone == "no")$score), name=list("no")) %>%
hc_yAxis(title = list(text = "density")) %>%
hc_xAxis(title = list(text = "score")) %>%
hc_title(text = "Density of score based on cellphone possesion", style = list(fontWeight = "bold"))
t.test(score~cellphone, data= std_edu, alt="less")
|
#Checking for 30-70
rm(list=ls(all=TRUE))
source("calculate_output.R")
source("initialise.R")
require(graphics)
for(i in 1:100){
weights_temp <- weights
# print(weights_temp)
e_in <- 0
e_out <- 0
for(j in 1:40){
output <- calculate_output(theta, weights_temp, x_train[j,1], x_train[j,2], x_train[j,3], x_train[j,4])
output_latest <- calculate_output(theta, weights, x_train[j,1], x_train[j,2], x_train[j,3], x_train[j,4])
if(output == -1 && y[j] == 1){
local_error <- 1
}
if(output == 1 && y[j] == -1){
local_error <- -1
}
if(output == y[j]){
local_error <- 0
}
e_in <- e_in + abs(local_error)
if(output_latest != y[j]){
if(output_latest == -1 && y[j] == 1){
local_error <- 1
}
if(output_latest == 1 && y[j] == -1){
local_error <- -1
}
weights[1] <- weights[1] + (local_error*x_train[j,1])
weights[2] <- weights[2] + (local_error*x_train[j,2])
weights[3] <- weights[3] + (local_error*x_train[j,3])
weights[4] <- weights[4] + (local_error*x_train[j,4])
weights[5] <- weights[5] + (local_error)
}
}
e_temp_in <- c(e_temp_in, e_in)
for(j in 1:60){
output <- calculate_output(theta, weights_temp, x_test[j,1], x_test[j,2], x_test[j,3], x_test[j,4])
if(output == -1 && y_test[j] == 1){
local_error <- 1
}
if(output == 1 && y_test[j] == -1){
local_error <- -1
}
if(output == y_test[j]){
local_error <- 0
}
e_out <- e_out + abs(local_error)
}
e_temp_out <- c(e_temp_out, e_out)
}
# print(e_temp_in)
# print(".....\n")
# print(e_temp_out)
# plot(e_temp_out)
# plot(e_temp_in) | /assignment_2/perceptron_40_60.R | no_license | tyagi-iiitv/ai-lab | R | false | false | 1,672 | r | #Checking for 30-70
rm(list=ls(all=TRUE))
source("calculate_output.R")
source("initialise.R")
require(graphics)
for(i in 1:100){
weights_temp <- weights
# print(weights_temp)
e_in <- 0
e_out <- 0
for(j in 1:40){
output <- calculate_output(theta, weights_temp, x_train[j,1], x_train[j,2], x_train[j,3], x_train[j,4])
output_latest <- calculate_output(theta, weights, x_train[j,1], x_train[j,2], x_train[j,3], x_train[j,4])
if(output == -1 && y[j] == 1){
local_error <- 1
}
if(output == 1 && y[j] == -1){
local_error <- -1
}
if(output == y[j]){
local_error <- 0
}
e_in <- e_in + abs(local_error)
if(output_latest != y[j]){
if(output_latest == -1 && y[j] == 1){
local_error <- 1
}
if(output_latest == 1 && y[j] == -1){
local_error <- -1
}
weights[1] <- weights[1] + (local_error*x_train[j,1])
weights[2] <- weights[2] + (local_error*x_train[j,2])
weights[3] <- weights[3] + (local_error*x_train[j,3])
weights[4] <- weights[4] + (local_error*x_train[j,4])
weights[5] <- weights[5] + (local_error)
}
}
e_temp_in <- c(e_temp_in, e_in)
for(j in 1:60){
output <- calculate_output(theta, weights_temp, x_test[j,1], x_test[j,2], x_test[j,3], x_test[j,4])
if(output == -1 && y_test[j] == 1){
local_error <- 1
}
if(output == 1 && y_test[j] == -1){
local_error <- -1
}
if(output == y_test[j]){
local_error <- 0
}
e_out <- e_out + abs(local_error)
}
e_temp_out <- c(e_temp_out, e_out)
}
# print(e_temp_in)
# print(".....\n")
# print(e_temp_out)
# plot(e_temp_out)
# plot(e_temp_in) |
library(dplyr)
ss.bounds <- readRDS("ss.bounds.rds")
alpha <- 0.025
method <- 'wald'
scenario <- 9
param <- 1
anal_type <- "sing"
ss <- ss.bounds%>%
dplyr::filter(method == "wald", scenario.id == scenario)
do_val <- 0.2
x1 <- parallel::mclapply(X = 1:10000,
mc.cores = parallel::detectCores() - 1,
FUN= function(x) {
library(tidyr, warn.conflicts = F, quietly = T)
library(dplyr, warn.conflicts = F, quietly = T)
library(purrr, warn.conflicts = F, quietly = T)
library(reshape2, warn.conflicts = F, quietly = T)
library(mice, warn.conflicts = F, quietly = T)
library(MASS, warn.conflicts = F, quietly = T)
library(nibinom)
set.seed(10000*scenario + x)
#generate full data with desired correlation structure
dt0 <- sim_cont(p_C = ss$p_C, p_T = ss$p_C - ss$M2, n_arm = ss$n.arm,
mu1 = 4, mu2 = 100, sigma1 = 1, sigma2 = 20, r12 = -0.3, b1 = 0.1, b2 = -0.01)
ci.full <- dt0%>%wald_ci(ss$M2,'y')
#define missingness parameters and do rates
m.param <- mpars(do = do_val, atype = anal_type)
#impose missing values and perform analysis
ci.miss <- m.param%>%
dplyr::mutate(results = purrr::pmap(list(b_trt=bt, b_y=by, b_x1=bx1, b_x2=bx2, b_ty = b.ty),
miss_gen_an, dt = dt0, do = do_val,
ci_method = wald_ci,
sing_anal = T,
mice_anal = F,
m2 = ss$M2, seed = 10000*scenario + x,
method = method,
alpha = alpha
))%>%
dplyr::select(missing, results)%>%
dplyr::mutate(scenario.id = ss$scenario.id,
p_C = ss$p_C,
M2 = ss$M2,
type = 't.H0',
do = do_val,
sim.id = x)
ci.all <- list(ci.full, ci.miss)%>%purrr::set_names(c("ci.full","ci.miss"))
return(ci.all)
})
#to summarize type-I error and mean relative bias from the simulated data
source('funs/h0.sing.sum.R')
h0.sing.sum(x1)
| /sim_pgms/wald/do20/2xcontH0_sc9_do20_sing.R | no_license | yuliasidi/nibinom_apply | R | false | false | 2,236 | r | library(dplyr)
ss.bounds <- readRDS("ss.bounds.rds")
alpha <- 0.025
method <- 'wald'
scenario <- 9
param <- 1
anal_type <- "sing"
ss <- ss.bounds%>%
dplyr::filter(method == "wald", scenario.id == scenario)
do_val <- 0.2
x1 <- parallel::mclapply(X = 1:10000,
mc.cores = parallel::detectCores() - 1,
FUN= function(x) {
library(tidyr, warn.conflicts = F, quietly = T)
library(dplyr, warn.conflicts = F, quietly = T)
library(purrr, warn.conflicts = F, quietly = T)
library(reshape2, warn.conflicts = F, quietly = T)
library(mice, warn.conflicts = F, quietly = T)
library(MASS, warn.conflicts = F, quietly = T)
library(nibinom)
set.seed(10000*scenario + x)
#generate full data with desired correlation structure
dt0 <- sim_cont(p_C = ss$p_C, p_T = ss$p_C - ss$M2, n_arm = ss$n.arm,
mu1 = 4, mu2 = 100, sigma1 = 1, sigma2 = 20, r12 = -0.3, b1 = 0.1, b2 = -0.01)
ci.full <- dt0%>%wald_ci(ss$M2,'y')
#define missingness parameters and do rates
m.param <- mpars(do = do_val, atype = anal_type)
#impose missing values and perform analysis
ci.miss <- m.param%>%
dplyr::mutate(results = purrr::pmap(list(b_trt=bt, b_y=by, b_x1=bx1, b_x2=bx2, b_ty = b.ty),
miss_gen_an, dt = dt0, do = do_val,
ci_method = wald_ci,
sing_anal = T,
mice_anal = F,
m2 = ss$M2, seed = 10000*scenario + x,
method = method,
alpha = alpha
))%>%
dplyr::select(missing, results)%>%
dplyr::mutate(scenario.id = ss$scenario.id,
p_C = ss$p_C,
M2 = ss$M2,
type = 't.H0',
do = do_val,
sim.id = x)
ci.all <- list(ci.full, ci.miss)%>%purrr::set_names(c("ci.full","ci.miss"))
return(ci.all)
})
#to summarize type-I error and mean relative bias from the simulated data
source('funs/h0.sing.sum.R')
h0.sing.sum(x1)
|
# This is the script used to clean the completejourney data
library(tidyverse)
library(lubridate)
# transactions -----------------------------------------------------------------
transactions <- read_csv("../../Data sets/Complete_Journey_UV_Version/transaction_data.csv") %>%
# select a one year slice of the data
filter(day >= 285, day < 650) %>%
# convert it to a real date variable
mutate(day = as.Date('2017-01-01') + (day - 285)) %>%
# re-index the week
mutate(week = as.integer(week_no - 40)) %>%
# remove one straggling transaction on Christmas Day we will assume they were closed
filter(day != '2017-12-25') %>%
# create the transaction timestamp, add a random seconds component
mutate(
trans_time = as.integer(trans_time),
hour = substr(sprintf('%04d', trans_time), 1, 2),
min = substr(sprintf('%04d', trans_time), 3, 4),
sec = sprintf('%02d', as.integer(as.numeric(str_sub(as.character(basket_id), start = -2)) * 60/100))
) %>%
# handle weird daylight savings time cases
mutate(hour = ifelse((day == as.Date('2017-03-12') & hour == '02'), '03', hour)) %>%
unite(time, hour, min, sec, sep = ":", remove = FALSE) %>%
mutate(transaction_timestamp = as.POSIXct(paste(day, time),
format="%Y-%m-%d %H:%M:%S",
tz="America/New_York")) %>%
# what should we do about retail discounts that are positive?
# here we convert them to zero
mutate(retail_disc = ifelse(retail_disc > 0, 0, retail_disc)) %>%
# make the discount variables positive
mutate(
retail_disc = abs(retail_disc),
coupon_disc = abs(coupon_disc),
coupon_match_disc = abs(coupon_match_disc)
) %>%
# rename household_key to household_id
rename(household_id = household_key) %>%
# convert the id variables to characters
mutate_at(vars(ends_with("_id")), as.character) %>%
# sort by transaction datetime
arrange(transaction_timestamp) %>%
# reorder the variables
select(household_id, store_id, basket_id, product_id, quantity, sales_value,
retail_disc, coupon_disc, coupon_match_disc, week, transaction_timestamp)
# save final data set
devtools::use_data(transactions, overwrite = TRUE)
# demographics -----------------------------------------------------------------
demographics <- read_csv("../../Data sets/Complete_Journey_UV_Version/hh_demographic.csv") %>%
rename(
household_id = household_key,
age = age_desc,
income = income_desc,
home_ownership = homeowner_desc,
household_size = household_size_desc,
marital_status = marital_status_code,
household_comp = hh_comp_desc,
kids_count = kid_category_desc
) %>%
mutate_at(vars(ends_with("_id")), as.character) %>%
mutate(
marital_status = recode(marital_status, `A` = 'Married', `B` = "Unmarried", `U` = "Unknown"),
home_ownership = ifelse(home_ownership == "Probable Owner", "Probable Homeowner", home_ownership),
household_size = factor(household_size, levels = c("1", "2", "3", "4", "5+"), ordered = TRUE)
) %>%
mutate(household_comp = ifelse((household_comp == "Single Male" |
household_comp == "Single Female") &
household_size == '1', "1 Adult No Kids",
household_comp)) %>%
mutate(household_comp = ifelse((household_comp == "Single Male" |
household_comp == "Single Female") &
as.integer(household_size) > 1,
"1 Adult Kids",
household_comp)) %>%
mutate(kids_count = ifelse(household_comp == "1 Adult No Kids" |
household_comp == "2 Adults No Kids",
'0', kids_count)) %>%
mutate(household_comp = ifelse(household_comp == "Unknown" & kids_count ==
"Unknown" & household_size == '1',
"1 Adult No Kids", household_comp)) %>%
mutate(household_comp = ifelse(household_comp == "Unknown" & household_size ==
'3' & kids_count == '1', "2 Adults Kids",
household_comp)) %>%
mutate(household_comp = ifelse(household_comp == "Unknown" & household_size ==
'5+' & kids_count == '3+', "2 Adults Kids",
household_comp)) %>%
mutate(household_comp = ifelse(household_comp == "Unknown" & household_size ==
'2' & kids_count == '1', "1 Adult Kids",
household_comp)) %>%
mutate(household_comp = ifelse(household_size == '1', "1 Adult No Kids",
household_comp)) %>%
mutate(household_comp = ifelse(household_comp == "Unknown" & marital_status ==
"Married" & household_size == "2",
"2 Adults No Kids", household_comp)) %>%
mutate(kids_count = ifelse(kids_count == "Unknown" & household_comp ==
"1 Adult Kids" & household_size == '2', '1',
kids_count)) %>%
mutate(kids_count = ifelse(kids_count == "Unknown" & marital_status ==
"Married" & household_size == "2", '0',
kids_count)) %>%
mutate(kids_count = ifelse(household_size == '2' & household_comp ==
'1 Adult Kids', '1', kids_count)) %>%
mutate(kids_count = ifelse(household_comp == "2 Adults No Kids", '0',
kids_count)) %>%
mutate(kids_count = ifelse(household_size == '1', '0', kids_count)) %>%
mutate(marital_status = ifelse(marital_status == "Unknown" &
(household_comp == "1 Adult Kids" |
household_comp == "1 Adult No Kids"),
"Unmarried", marital_status)) %>%
mutate(household_comp = factor(household_comp,
levels = c("1 Adult Kids", "1 Adult No Kids",
"2 Adults Kids", "2 Adults No Kids",
"Unknown"),
ordered = TRUE)) %>%
mutate(
kids_count = factor(kids_count, levels = c("0", "1", "2", "3+", "Unknown"), ordered = TRUE),
age = factor(age, levels = c("19-24", "25-34", "35-44", "45-54", "55-64", "65+"), ordered = TRUE),
home_ownership = factor(home_ownership,
levels = c("Renter", "Probable Renter",
"Homeowner", "Probable Homeowner", "Unknown"),
ordered = TRUE),
household_size = factor(household_size, levels = c("1", "2", "3", "4", "5+"), ordered = TRUE),
marital_status = factor(marital_status, levels = c("Married", "Unmarried", "Unknown"), ordered = TRUE),
income = factor(income,
levels = c("Under 15K", "15-24K", "25-34K", "35-49K",
"50-74K", "75-99K", "100-124K", "125-149K",
"150-174K", "175-199K", "200-249K", "250K+"),
ordered = TRUE)
) %>%
na_if("Unknown") %>%
arrange(household_id) %>%
select(household_id, age, income, home_ownership, marital_status,
household_size, household_comp, kids_count)
# save final data set
devtools::use_data(demographics, overwrite = TRUE)
# products ---------------------------------------------------------------------
products <- read_csv("../../Data sets/Complete_Journey_UV_Version/product.csv") %>%
rename(
manufacturer_id = manufacturer,
package_size = curr_size_of_product,
product_category = commodity_desc,
product_type = sub_commodity_desc
) %>%
# convert the id variables to characters
mutate_at(vars(ends_with("_id")), as.character) %>%
mutate(
brand = factor(brand, levels = c("National", "Private")),
# standardize/collapse some departments
department = gsub("MISC\\. TRANS\\.|MISC SALES TRAN", "MISCELLANEOUS", department),
department = gsub("VIDEO RENTAL|VIDEO|PHOTO", "PHOTO & VIDEO", department),
department = gsub("RX|PHARMACY SUPPLY", "DRUG GM", department),
department = gsub("DAIRY DELI|DELI/SNACK BAR", "DELI", department),
department = gsub("PORK|MEAT-WHSE", "MEAT", department),
department = gsub("GRO BAKERY", "GROCERY", department),
department = gsub("KIOSK-GAS", "FUEL", department),
department = gsub("TRAVEL & LEISUR", "TRAVEL & LEISURE", department),
department = gsub("COUP/STR & MFG", "COUPON", department),
department = gsub("HBC", "DRUG GM", department),
# fix as many product size descriptions as possible
package_size = gsub("CANS", "CAN", package_size),
package_size = gsub("COUNT", "CT", package_size),
package_size = gsub("DOZEN", "DZ", package_size),
package_size = gsub("FEET", "FT", package_size),
package_size = gsub("FLOZ", "FL OZ", package_size),
package_size = gsub("GALLON|GL", "GAL", package_size),
package_size = gsub("GRAM", "G", package_size),
package_size = gsub("INCH", "IN", package_size),
package_size = gsub("LIT$|LITRE|LITERS|LITER|LTR", "L", package_size),
package_size = gsub("OUNCE|OZ\\.", "OZ", package_size),
package_size = gsub("PACK|PKT", "PK", package_size),
package_size = gsub("PIECE", "PC", package_size),
package_size = gsub("PINT", "PT", package_size),
package_size = gsub("POUND|POUNDS|LBS|LB\\.", "LB", package_size),
package_size = gsub("QUART", "QT", package_size),
package_size = gsub("SQFT", "SQ FT", package_size),
package_size = gsub("^(\\*|\\+|@|:|\\)|-)", "", package_size),
package_size = gsub("([[:digit:]])([[:alpha:]])", "\\1 \\2", package_size),
package_size = trimws(package_size)) %>%
mutate(
product_type = gsub("\\*ATTERIES", "BATTERIES", product_type),
product_type = gsub("\\*ATH", "BATH", product_type),
product_type = gsub("^\\*", "", product_type)
) %>%
# remove these strange cases
filter(product_category != "(CORP USE ONLY)",
product_category != "MISCELLANEOUS(CORP USE ONLY)",
product_type != "CORPORATE DELETES (DO NOT USE") %>%
# how can we deal with cases where product_category == "UNKNOWN",
# but product_type != "UNKNOWN", and values of NA? (ignore for now)
na_if("UNKNOWN") %>%
na_if("NO COMMODITY DESCRIPTION") %>%
na_if("NO SUBCOMMODITY DESCRIPTION") %>%
na_if("NO-NONSENSE") %>%
select(product_id, manufacturer_id, department, brand, product_category, product_type, package_size)
# save final data set
devtools::use_data(products, overwrite = TRUE)
# promotions -----------------------------------------------------------------
promotions <- read_csv("../../Data sets/Complete_Journey_UV_Version/causal_data.csv") %>%
# convert the id variables to characters
mutate_at(vars(ends_with("_id")), as.character) %>%
# re-index the week
mutate(
display = as.factor(display),
mailer = as.factor(mailer),
week = as.integer(week_no - 40)
) %>%
# only select data from 2017
semi_join(., transactions, by = 'week') %>%
# sort by week first, since that is helpful to understand
arrange(week, product_id, store_id) %>%
select(product_id, store_id, display_location = display, mailer_location = mailer, week)
# save final data set
devtools::use_data(promotions, overwrite = TRUE)
# campaign_descriptions --------------------------------------------------------
campaign_descriptions <- read_csv("../../Data sets/Complete_Journey_UV_Version/campaign_desc.csv") %>%
rename(
campaign_id = campaign,
start_date = start_day,
end_date = end_day
) %>%
# convert the id variables to characters
mutate_at(vars(ends_with("_id")), as.character) %>%
mutate(
description = gsub('(Type)(A|B|C)', '\\1 \\2', description),
description = factor(description, levels = paste('Type', LETTERS[1:3]), ordered = TRUE),
start_date = as.Date('2017-01-01') + (start_date - 285),
end_date = as.Date('2017-01-01') + (end_date - 285)
) %>%
filter(year(start_date) == 2017 | year(end_date) == 2017) %>%
# sort by date since that helps understand the timing of each campaign
arrange(start_date) %>%
select(campaign_id, campaign_type = description, start_date, end_date) %>%
arrange(as.numeric(campaign_id))
# campaigns --------------------------------------------------------------------
campaigns <- read_csv("../../Data sets/Complete_Journey_UV_Version/campaign_table.csv") %>%
rename(
campaign_id = campaign,
household_id = household_key
) %>%
# convert the id variables to characters
mutate_at(vars(ends_with("_id")), as.character) %>%
# remove any campaigns that did not occur in 2017 %>%
semi_join(., campaign_descriptions, by='campaign_id') %>%
# arrange by campaign so we can see each together
arrange(campaign_id, household_id) %>%
select(campaign_id, household_id)
# coupons ----------------------------------------------------------------------
coupons <- read_csv("../../Data sets/Complete_Journey_UV_Version/coupon.csv") %>%
rename(campaign_id = campaign) %>%
mutate(coupon_upc = as.character(coupon_upc)) %>%
# convert the id variables to characters
mutate_at(vars(ends_with("_id")), as.character) %>%
# remove any campaigns that did not occur in 2017 %>%
semi_join(., campaign_descriptions, by='campaign_id') %>%
arrange(coupon_upc, product_id) %>%
select(coupon_upc, product_id, campaign_id)
# coupon_redemptions -----------------------------------------------------------
coupon_redemptions <- read_csv("../../Data sets/Complete_Journey_UV_Version/coupon_redempt.csv") %>%
rename(
household_id = household_key,
campaign_id = campaign
) %>%
# convert the id variables to characters and update dates
mutate_at(vars(ends_with("_id")), as.character) %>%
mutate(
coupon_upc = as.character(coupon_upc),
redemption_date = as.Date('2017-01-01') + (day - 285)
) %>%
filter(year(redemption_date) == 2017) %>%
# remove any campaigns that did not occur in 2017 %>%
semi_join(., campaign_descriptions, by='campaign_id') %>%
arrange(redemption_date) %>%
select(household_id, coupon_upc, campaign_id, redemption_date)
# Reformat campaign ID so they are 1-27 -----------------------------------
# create campaign ID matching vector
old_id <- sort(as.numeric(unique(campaign_descriptions$campaign_id)))
new_id <- seq_along(old_id)
names(new_id) <- old_id
# function that changes campaign ID
switch_id <- function(x) {
for (i in seq_along(x)) {
index <- which(x[i] == names(new_id))
x[i] <- new_id[index]
}
x
}
coupon_redemptions$campaign_id <- switch_id(coupon_redemptions$campaign_id)
campaign_descriptions$campaign_id <- switch_id(campaign_descriptions$campaign_id)
campaigns$campaign_id <- switch_id(campaigns$campaign_id)
coupons$campaign_id <- switch_id(coupons$campaign_id)
devtools::use_data(coupon_redemptions, overwrite = TRUE)
devtools::use_data(campaign_descriptions, overwrite = TRUE)
devtools::use_data(campaigns, overwrite = TRUE)
devtools::use_data(coupons, overwrite = TRUE)
# data check summaries ---------------------------------------------------------
daily_sales <- transactions %>%
mutate(date = as.Date(transaction_timestamp, tz="America/New_York")) %>%
group_by(date) %>%
summarize(total_sales_value = sum(sales_value, na.rm = TRUE))
daily_sales %>%
ggplot() +
geom_line(mapping = aes(x = date, y = total_sales_value))
daily_sales %>%
mutate(dow = strftime(date, '%A')) %>%
mutate(dow = factor(dow, levels=c("Monday", "Tuesday", "Wednesday",
"Thursday", "Friday",
"Saturday", "Sunday"), ordered=TRUE)) %>%
group_by(dow) %>%
summarize(avg_sales = mean(total_sales_value)) %>%
ggplot() +
geom_bar(aes(x=dow, y=avg_sales), stat = 'identity')
| /data-raw/prep-data.R | no_license | StevenMMortimer/completejourney | R | false | false | 16,161 | r | # This is the script used to clean the completejourney data
library(tidyverse)
library(lubridate)
# transactions -----------------------------------------------------------------
transactions <- read_csv("../../Data sets/Complete_Journey_UV_Version/transaction_data.csv") %>%
# select a one year slice of the data
filter(day >= 285, day < 650) %>%
# convert it to a real date variable
mutate(day = as.Date('2017-01-01') + (day - 285)) %>%
# re-index the week
mutate(week = as.integer(week_no - 40)) %>%
# remove one straggling transaction on Christmas Day we will assume they were closed
filter(day != '2017-12-25') %>%
# create the transaction timestamp, add a random seconds component
mutate(
trans_time = as.integer(trans_time),
hour = substr(sprintf('%04d', trans_time), 1, 2),
min = substr(sprintf('%04d', trans_time), 3, 4),
sec = sprintf('%02d', as.integer(as.numeric(str_sub(as.character(basket_id), start = -2)) * 60/100))
) %>%
# handle weird daylight savings time cases
mutate(hour = ifelse((day == as.Date('2017-03-12') & hour == '02'), '03', hour)) %>%
unite(time, hour, min, sec, sep = ":", remove = FALSE) %>%
mutate(transaction_timestamp = as.POSIXct(paste(day, time),
format="%Y-%m-%d %H:%M:%S",
tz="America/New_York")) %>%
# what should we do about retail discounts that are positive?
# here we convert them to zero
mutate(retail_disc = ifelse(retail_disc > 0, 0, retail_disc)) %>%
# make the discount variables positive
mutate(
retail_disc = abs(retail_disc),
coupon_disc = abs(coupon_disc),
coupon_match_disc = abs(coupon_match_disc)
) %>%
# rename household_key to household_id
rename(household_id = household_key) %>%
# convert the id variables to characters
mutate_at(vars(ends_with("_id")), as.character) %>%
# sort by transaction datetime
arrange(transaction_timestamp) %>%
# reorder the variables
select(household_id, store_id, basket_id, product_id, quantity, sales_value,
retail_disc, coupon_disc, coupon_match_disc, week, transaction_timestamp)
# save final data set
devtools::use_data(transactions, overwrite = TRUE)
# demographics -----------------------------------------------------------------
demographics <- read_csv("../../Data sets/Complete_Journey_UV_Version/hh_demographic.csv") %>%
rename(
household_id = household_key,
age = age_desc,
income = income_desc,
home_ownership = homeowner_desc,
household_size = household_size_desc,
marital_status = marital_status_code,
household_comp = hh_comp_desc,
kids_count = kid_category_desc
) %>%
mutate_at(vars(ends_with("_id")), as.character) %>%
mutate(
marital_status = recode(marital_status, `A` = 'Married', `B` = "Unmarried", `U` = "Unknown"),
home_ownership = ifelse(home_ownership == "Probable Owner", "Probable Homeowner", home_ownership),
household_size = factor(household_size, levels = c("1", "2", "3", "4", "5+"), ordered = TRUE)
) %>%
mutate(household_comp = ifelse((household_comp == "Single Male" |
household_comp == "Single Female") &
household_size == '1', "1 Adult No Kids",
household_comp)) %>%
mutate(household_comp = ifelse((household_comp == "Single Male" |
household_comp == "Single Female") &
as.integer(household_size) > 1,
"1 Adult Kids",
household_comp)) %>%
mutate(kids_count = ifelse(household_comp == "1 Adult No Kids" |
household_comp == "2 Adults No Kids",
'0', kids_count)) %>%
mutate(household_comp = ifelse(household_comp == "Unknown" & kids_count ==
"Unknown" & household_size == '1',
"1 Adult No Kids", household_comp)) %>%
mutate(household_comp = ifelse(household_comp == "Unknown" & household_size ==
'3' & kids_count == '1', "2 Adults Kids",
household_comp)) %>%
mutate(household_comp = ifelse(household_comp == "Unknown" & household_size ==
'5+' & kids_count == '3+', "2 Adults Kids",
household_comp)) %>%
mutate(household_comp = ifelse(household_comp == "Unknown" & household_size ==
'2' & kids_count == '1', "1 Adult Kids",
household_comp)) %>%
mutate(household_comp = ifelse(household_size == '1', "1 Adult No Kids",
household_comp)) %>%
mutate(household_comp = ifelse(household_comp == "Unknown" & marital_status ==
"Married" & household_size == "2",
"2 Adults No Kids", household_comp)) %>%
mutate(kids_count = ifelse(kids_count == "Unknown" & household_comp ==
"1 Adult Kids" & household_size == '2', '1',
kids_count)) %>%
mutate(kids_count = ifelse(kids_count == "Unknown" & marital_status ==
"Married" & household_size == "2", '0',
kids_count)) %>%
mutate(kids_count = ifelse(household_size == '2' & household_comp ==
'1 Adult Kids', '1', kids_count)) %>%
mutate(kids_count = ifelse(household_comp == "2 Adults No Kids", '0',
kids_count)) %>%
mutate(kids_count = ifelse(household_size == '1', '0', kids_count)) %>%
mutate(marital_status = ifelse(marital_status == "Unknown" &
(household_comp == "1 Adult Kids" |
household_comp == "1 Adult No Kids"),
"Unmarried", marital_status)) %>%
mutate(household_comp = factor(household_comp,
levels = c("1 Adult Kids", "1 Adult No Kids",
"2 Adults Kids", "2 Adults No Kids",
"Unknown"),
ordered = TRUE)) %>%
mutate(
kids_count = factor(kids_count, levels = c("0", "1", "2", "3+", "Unknown"), ordered = TRUE),
age = factor(age, levels = c("19-24", "25-34", "35-44", "45-54", "55-64", "65+"), ordered = TRUE),
home_ownership = factor(home_ownership,
levels = c("Renter", "Probable Renter",
"Homeowner", "Probable Homeowner", "Unknown"),
ordered = TRUE),
household_size = factor(household_size, levels = c("1", "2", "3", "4", "5+"), ordered = TRUE),
marital_status = factor(marital_status, levels = c("Married", "Unmarried", "Unknown"), ordered = TRUE),
income = factor(income,
levels = c("Under 15K", "15-24K", "25-34K", "35-49K",
"50-74K", "75-99K", "100-124K", "125-149K",
"150-174K", "175-199K", "200-249K", "250K+"),
ordered = TRUE)
) %>%
na_if("Unknown") %>%
arrange(household_id) %>%
select(household_id, age, income, home_ownership, marital_status,
household_size, household_comp, kids_count)
# save final data set
devtools::use_data(demographics, overwrite = TRUE)
# products ---------------------------------------------------------------------
products <- read_csv("../../Data sets/Complete_Journey_UV_Version/product.csv") %>%
rename(
manufacturer_id = manufacturer,
package_size = curr_size_of_product,
product_category = commodity_desc,
product_type = sub_commodity_desc
) %>%
# convert the id variables to characters
mutate_at(vars(ends_with("_id")), as.character) %>%
mutate(
brand = factor(brand, levels = c("National", "Private")),
# standardize/collapse some departments
department = gsub("MISC\\. TRANS\\.|MISC SALES TRAN", "MISCELLANEOUS", department),
department = gsub("VIDEO RENTAL|VIDEO|PHOTO", "PHOTO & VIDEO", department),
department = gsub("RX|PHARMACY SUPPLY", "DRUG GM", department),
department = gsub("DAIRY DELI|DELI/SNACK BAR", "DELI", department),
department = gsub("PORK|MEAT-WHSE", "MEAT", department),
department = gsub("GRO BAKERY", "GROCERY", department),
department = gsub("KIOSK-GAS", "FUEL", department),
department = gsub("TRAVEL & LEISUR", "TRAVEL & LEISURE", department),
department = gsub("COUP/STR & MFG", "COUPON", department),
department = gsub("HBC", "DRUG GM", department),
# fix as many product size descriptions as possible
package_size = gsub("CANS", "CAN", package_size),
package_size = gsub("COUNT", "CT", package_size),
package_size = gsub("DOZEN", "DZ", package_size),
package_size = gsub("FEET", "FT", package_size),
package_size = gsub("FLOZ", "FL OZ", package_size),
package_size = gsub("GALLON|GL", "GAL", package_size),
package_size = gsub("GRAM", "G", package_size),
package_size = gsub("INCH", "IN", package_size),
package_size = gsub("LIT$|LITRE|LITERS|LITER|LTR", "L", package_size),
package_size = gsub("OUNCE|OZ\\.", "OZ", package_size),
package_size = gsub("PACK|PKT", "PK", package_size),
package_size = gsub("PIECE", "PC", package_size),
package_size = gsub("PINT", "PT", package_size),
package_size = gsub("POUND|POUNDS|LBS|LB\\.", "LB", package_size),
package_size = gsub("QUART", "QT", package_size),
package_size = gsub("SQFT", "SQ FT", package_size),
package_size = gsub("^(\\*|\\+|@|:|\\)|-)", "", package_size),
package_size = gsub("([[:digit:]])([[:alpha:]])", "\\1 \\2", package_size),
package_size = trimws(package_size)) %>%
mutate(
product_type = gsub("\\*ATTERIES", "BATTERIES", product_type),
product_type = gsub("\\*ATH", "BATH", product_type),
product_type = gsub("^\\*", "", product_type)
) %>%
# remove these strange cases
filter(product_category != "(CORP USE ONLY)",
product_category != "MISCELLANEOUS(CORP USE ONLY)",
product_type != "CORPORATE DELETES (DO NOT USE") %>%
# how can we deal with cases where product_category == "UNKNOWN",
# but product_type != "UNKNOWN", and values of NA? (ignore for now)
na_if("UNKNOWN") %>%
na_if("NO COMMODITY DESCRIPTION") %>%
na_if("NO SUBCOMMODITY DESCRIPTION") %>%
na_if("NO-NONSENSE") %>%
select(product_id, manufacturer_id, department, brand, product_category, product_type, package_size)
# save final data set
devtools::use_data(products, overwrite = TRUE)
# promotions -----------------------------------------------------------------
promotions <- read_csv("../../Data sets/Complete_Journey_UV_Version/causal_data.csv") %>%
# convert the id variables to characters
mutate_at(vars(ends_with("_id")), as.character) %>%
# re-index the week
mutate(
display = as.factor(display),
mailer = as.factor(mailer),
week = as.integer(week_no - 40)
) %>%
# only select data from 2017
semi_join(., transactions, by = 'week') %>%
# sort by week first, since that is helpful to understand
arrange(week, product_id, store_id) %>%
select(product_id, store_id, display_location = display, mailer_location = mailer, week)
# save final data set
devtools::use_data(promotions, overwrite = TRUE)
# campaign_descriptions --------------------------------------------------------
campaign_descriptions <- read_csv("../../Data sets/Complete_Journey_UV_Version/campaign_desc.csv") %>%
rename(
campaign_id = campaign,
start_date = start_day,
end_date = end_day
) %>%
# convert the id variables to characters
mutate_at(vars(ends_with("_id")), as.character) %>%
mutate(
description = gsub('(Type)(A|B|C)', '\\1 \\2', description),
description = factor(description, levels = paste('Type', LETTERS[1:3]), ordered = TRUE),
start_date = as.Date('2017-01-01') + (start_date - 285),
end_date = as.Date('2017-01-01') + (end_date - 285)
) %>%
filter(year(start_date) == 2017 | year(end_date) == 2017) %>%
# sort by date since that helps understand the timing of each campaign
arrange(start_date) %>%
select(campaign_id, campaign_type = description, start_date, end_date) %>%
arrange(as.numeric(campaign_id))
# campaigns --------------------------------------------------------------------
campaigns <- read_csv("../../Data sets/Complete_Journey_UV_Version/campaign_table.csv") %>%
rename(
campaign_id = campaign,
household_id = household_key
) %>%
# convert the id variables to characters
mutate_at(vars(ends_with("_id")), as.character) %>%
# remove any campaigns that did not occur in 2017 %>%
semi_join(., campaign_descriptions, by='campaign_id') %>%
# arrange by campaign so we can see each together
arrange(campaign_id, household_id) %>%
select(campaign_id, household_id)
# coupons ----------------------------------------------------------------------
coupons <- read_csv("../../Data sets/Complete_Journey_UV_Version/coupon.csv") %>%
rename(campaign_id = campaign) %>%
mutate(coupon_upc = as.character(coupon_upc)) %>%
# convert the id variables to characters
mutate_at(vars(ends_with("_id")), as.character) %>%
# remove any campaigns that did not occur in 2017 %>%
semi_join(., campaign_descriptions, by='campaign_id') %>%
arrange(coupon_upc, product_id) %>%
select(coupon_upc, product_id, campaign_id)
# coupon_redemptions -----------------------------------------------------------
coupon_redemptions <- read_csv("../../Data sets/Complete_Journey_UV_Version/coupon_redempt.csv") %>%
rename(
household_id = household_key,
campaign_id = campaign
) %>%
# convert the id variables to characters and update dates
mutate_at(vars(ends_with("_id")), as.character) %>%
mutate(
coupon_upc = as.character(coupon_upc),
redemption_date = as.Date('2017-01-01') + (day - 285)
) %>%
filter(year(redemption_date) == 2017) %>%
# remove any campaigns that did not occur in 2017 %>%
semi_join(., campaign_descriptions, by='campaign_id') %>%
arrange(redemption_date) %>%
select(household_id, coupon_upc, campaign_id, redemption_date)
# Reformat campaign ID so they are 1-27 -----------------------------------
# create campaign ID matching vector
old_id <- sort(as.numeric(unique(campaign_descriptions$campaign_id)))
new_id <- seq_along(old_id)
names(new_id) <- old_id
# function that changes campaign ID
switch_id <- function(x) {
for (i in seq_along(x)) {
index <- which(x[i] == names(new_id))
x[i] <- new_id[index]
}
x
}
coupon_redemptions$campaign_id <- switch_id(coupon_redemptions$campaign_id)
campaign_descriptions$campaign_id <- switch_id(campaign_descriptions$campaign_id)
campaigns$campaign_id <- switch_id(campaigns$campaign_id)
coupons$campaign_id <- switch_id(coupons$campaign_id)
devtools::use_data(coupon_redemptions, overwrite = TRUE)
devtools::use_data(campaign_descriptions, overwrite = TRUE)
devtools::use_data(campaigns, overwrite = TRUE)
devtools::use_data(coupons, overwrite = TRUE)
# data check summaries ---------------------------------------------------------
daily_sales <- transactions %>%
mutate(date = as.Date(transaction_timestamp, tz="America/New_York")) %>%
group_by(date) %>%
summarize(total_sales_value = sum(sales_value, na.rm = TRUE))
daily_sales %>%
ggplot() +
geom_line(mapping = aes(x = date, y = total_sales_value))
daily_sales %>%
mutate(dow = strftime(date, '%A')) %>%
mutate(dow = factor(dow, levels=c("Monday", "Tuesday", "Wednesday",
"Thursday", "Friday",
"Saturday", "Sunday"), ordered=TRUE)) %>%
group_by(dow) %>%
summarize(avg_sales = mean(total_sales_value)) %>%
ggplot() +
geom_bar(aes(x=dow, y=avg_sales), stat = 'identity')
|
`detrend.series` <-
function(y, y.name = "", make.plot = TRUE,
method = c("Spline", "ModNegExp", "Mean", "Ar", "Friedman",
"ModHugershoff", "AgeDepSpline"),
nyrs = NULL, f = 0.5, pos.slope = FALSE,
constrain.nls = c("never", "when.fail", "always"),
verbose = FALSE, return.info = FALSE,
wt, span = "cv", bass = 0, difference = FALSE)
{
check.flags(make.plot, pos.slope, verbose, return.info)
if (length(y.name) == 0) {
y.name2 <- ""
} else {
y.name2 <- as.character(y.name)[1]
stopifnot(Encoding(y.name2) != "bytes")
}
known.methods <- c("Spline", "ModNegExp", "Mean", "Ar", "Friedman",
"ModHugershoff", "AgeDepSpline")
constrain2 <- match.arg(constrain.nls)
method2 <- match.arg(arg = method,
choices = known.methods,
several.ok = TRUE)
wt.missing <- missing(wt)
wt.description <- NULL
if (verbose) {
widthOpt <- getOption("width")
indentSize <- 1
indent <- function(x) {
paste0(paste0(rep.int(" ", indentSize), collapse = ""), x)
}
sepLine <-
indent(paste0(rep.int("~", max(1, widthOpt - 2 * indentSize)),
collapse = ""))
cat(sepLine,
gettext("Verbose output: ", domain="R-dplR"), y.name2,
sep = "\n")
wt.description <- if (wt.missing) "default" else deparse(wt)
opts <- c("make.plot" = make.plot,
"method(s)" = deparse(method2),
"nyrs" = if (is.null(nyrs)) "NULL" else nyrs,
"f" = f,
"pos.slope" = pos.slope,
"constrain.nls" = constrain2,
"verbose" = verbose,
"return.info" = return.info,
"wt" = wt.description,
"span" = span,
"bass" = bass,
"difference" = difference)
optNames <- names(opts)
optChar <- c(gettext("Options", domain="R-dplR"),
paste(str_pad(optNames,
width = max(nchar(optNames)),
side = "right"),
opts, sep = " "))
cat(sepLine, indent(optChar), sep = "\n")
}
## Remove NA from the data (they will be reinserted later)
good.y <- which(!is.na(y))
if(length(good.y) == 0) {
stop("all values are 'NA'")
} else if(any(diff(good.y) != 1)) {
stop("'NA's are not allowed in the middle of the series")
}
y2 <- y[good.y]
nY2 <- length(y2)
## Recode any zero values to 0.001
if (verbose || return.info) {
years <- names(y2)
if (is.null(years)) {
years <- good.y
}
zeroFun <- function(x) list(zero.years = years[is.finite(x) & x == 0])
nFun <- function(x) list(n.zeros = length(x[[1]]))
zero.years.data <- zeroFun(y2)
n.zeros.data <- nFun(zero.years.data)
dataStats <- c(n.zeros.data, zero.years.data)
if (verbose) {
cat("", sepLine, sep = "\n")
if (n.zeros.data[[1]] > 0){
if (is.character(years)) {
cat(indent(gettext("Zero years in input series:\n",
domain="R-dplR")))
} else {
cat(indent(gettext("Zero indices in input series:\n",
domain="R-dplR")))
}
cat(indent(paste(zero.years.data[[1]], collapse = " ")),
"\n", sep = "")
} else {
cat(indent(gettext("No zeros in input series.\n",
domain="R-dplR")))
}
}
}
y2[y2 == 0] <- 0.001
resids <- list()
curves <- list()
modelStats <- list()
################################################################################
################################################################################
# Ok. Let's start the methods
################################################################################
if("ModNegExp" %in% method2){
## Nec or lm
nec.func <- function(Y, constrain) {
nY <- length(Y)
a <- mean(Y[seq_len(max(1, floor(nY * 0.1)))])
b <- -0.01
k <- mean(Y[floor(nY * 0.9):nY])
nlsForm <- Y ~ I(a * exp(b * seq_along(Y)) + k)
nlsStart <- list(a=a, b=b, k=k)
checked <- FALSE
constrained <- FALSE
## Note: nls() may signal an error
if (constrain == "never") {
nec <- nls(formula = nlsForm, start = nlsStart)
} else if (constrain == "always") {
nec <- nls(formula = nlsForm, start = nlsStart,
lower = c(a=0, b=-Inf, k=0),
upper = c(a=Inf, b=0, k=Inf),
algorithm = "port")
constrained <- TRUE
} else {
nec <- nls(formula = nlsForm, start = nlsStart)
coefs <- coef(nec)
if (coefs[1] <= 0 || coefs[2] >= 0) {
stop()
}
fits <- predict(nec)
if (fits[nY] > 0) {
checked <- TRUE
} else {
nec <- nls(formula = nlsForm, start = nlsStart,
lower = c(a=0, b=-Inf, k=0),
upper = c(a=Inf, b=0, k=Inf),
algorithm = "port")
constrained <- TRUE
}
}
if (!checked) {
coefs <- coef(nec)
if (coefs[1] <= 0 || coefs[2] >= 0) {
stop()
}
fits <- predict(nec)
if (fits[nY] <= 0) {
## This error is a special case that needs to be
## detected (if only for giving a warning). Any
## smarter way to implement this?
return(NULL)
}
}
tmpFormula <- nlsForm
formEnv <- new.env(parent = environment(detrend.series))
formEnv[["Y"]] <- Y
formEnv[["a"]] <- coefs["a"]
formEnv[["b"]] <- coefs["b"]
formEnv[["k"]] <- coefs["k"]
environment(tmpFormula) <- formEnv
structure(fits, constrained = constrained,
formula = tmpFormula, summary = summary(nec))
}
ModNegExp <- try(nec.func(y2, constrain2), silent=TRUE)
mneNotPositive <- is.null(ModNegExp)
if (verbose) {
cat("", sepLine, sep = "\n")
cat(indent(gettext("Detrend by ModNegExp.\n", domain = "R-dplR")))
cat(indent(gettext("Trying to fit nls model...\n",
domain = "R-dplR")))
}
if (mneNotPositive || class(ModNegExp) == "try-error") {
if (verbose) {
cat(indent(gettext("nls failed... fitting linear model...",
domain = "R-dplR")))
}
## Straight line via linear regression
if (mneNotPositive) {
msg <- gettext("Fits from method==\'ModNegExp\' are not all positive. \n See constrain.nls argument in detrend.series. \n ARSTAN would tell you to plot that dirty dog at this point.\n Proceed with caution.",
domain = "R-dplR")
if(y.name2==""){
msg2 <- gettext(msg, domain = "R-dplR")
}
else {
msg2 <- c(gettextf("In raw series %s: ", y.name2, domain = "R-dplR"),
gettext(msg, domain = "R-dplR"))
}
warning(msg2)
if (verbose) {
cat(sepLine, indent(msg), sepLine, sep = "\n")
}
}
x <- seq_len(nY2)
lm1 <- lm(y2 ~ x)
coefs <- coef(lm1)
xIdx <- names(coefs) == "x"
coefs <- c(coefs[!xIdx], coefs[xIdx])
if (verbose) {
cat(indent(c(gettext("Linear model fit", domain = "R-dplR"),
gettextf("Intercept: %s", format(coefs[1]),
domain = "R-dplR"),
gettextf("Slope: %s", format(coefs[2]),
domain = "R-dplR"))),
sep = "\n")
}
if (all(is.finite(coefs)) && (coefs[2] <= 0 || pos.slope)) {
tm <- cbind(1, x)
ModNegExp <- drop(tm %*% coefs)
useMean <- !isTRUE(ModNegExp[1] > 0 &&
ModNegExp[nY2] > 0)
if (useMean) {
msg <- gettext("Linear fit (backup of method==\'ModNegExp\') is not all positive. \n Proceed with caution. \n ARSTAN would tell you to plot that dirty dog at this point.",
domain = "R-dplR")
if(y.name2==""){
msg2 <- gettext(msg, domain = "R-dplR")
}
else {
msg2 <- c(gettextf("In raw series %s: ", y.name2, domain = "R-dplR"),
gettext(msg, domain = "R-dplR"))
}
warning(msg2)
if (verbose) {
cat(sepLine, indent(msg), sepLine, sep = "\n")
}
}
} else {
useMean <- TRUE
}
if (useMean) {
theMean <- mean(y2)
if (verbose) {
cat(indent(c(gettext("lm has a positive slope",
"pos.slope = FALSE",
"Detrend by mean.",
domain = "R-dplR"),
gettextf("Mean = %s", format(theMean),
domain = "R-dplR"))),
sep = "\n")
}
ModNegExp <- rep.int(theMean, nY2)
mneStats <- list(method = "Mean", mean = theMean)
} else {
mneStats <- list(method = "Line", coefs = coef(summary(lm1)))
}
} else if (verbose || return.info) {
mneSummary <- attr(ModNegExp, "summary")
mneCoefs <- mneSummary[["coefficients"]]
mneCoefsE <- mneCoefs[, 1]
if (verbose) {
cat(indent(c(gettext("nls coefs", domain = "R-dplR"),
paste0(names(mneCoefsE), ": ",
format(mneCoefsE)))),
sep = "\n")
}
mneStats <- list(method = "NegativeExponential",
is.constrained = attr(ModNegExp, "constrained"),
formula = attr(ModNegExp, "formula"),
coefs = mneCoefs)
} else {
mneStats <- NULL
}
if(difference){ resids$ModNegExp <- y2 - ModNegExp }
else{ resids$ModNegExp <- y2 / ModNegExp }
curves$ModNegExp <- ModNegExp
modelStats$ModNegExp <- mneStats
do.mne <- TRUE
} else {
do.mne <- FALSE
}
################################################################################
if("ModHugershoff" %in% method2){
## hug or lm
hug.func <- function(Y, constrain) {
nY <- length(Y)
a <- mean(Y[floor(nY * 0.9):nY])
b <- 1
g <- 0.1
d <- mean(Y[floor(nY * 0.9):nY])
nlsForm <- Y ~ I(a*seq_along(Y)^b*exp(-g*seq_along(Y))+d)
nlsStart <- list(a=a, b=b, g=g, d=d)
checked <- FALSE
constrained <- FALSE
## Note: nls() may signal an error
if (constrain == "never") {
hug <- nls(formula = nlsForm, start = nlsStart)
} else if (constrain == "always") {
hug <- nls(formula = nlsForm, start = nlsStart,
lower = c(a=0, b=-Inf, g=0, d=0),
upper = c(a=Inf, b=0, g=Inf, d=Inf),
algorithm = "port")
constrained <- TRUE
} else {
hug <- nls(formula = nlsForm, start = nlsStart)
coefs <- coef(hug)
if (coefs[1] <= 0 || coefs[2] <= 0) {
stop()
}
fits <- predict(hug)
if (fits[nY] > 0) {
checked <- TRUE
} else {
hug <- nls(formula = nlsForm, start = nlsStart,
lower = c(a=0, b=-Inf, g=0, d=0),
upper = c(a=Inf, b=0, g=Inf, d=Inf),
algorithm = "port")
constrained <- TRUE
}
}
if (!checked) {
coefs <- coef(hug)
if (coefs[1] <= 0 || coefs[2] <= 0) {
stop()
}
fits <- predict(hug)
if (fits[nY] <= 0) {
## This error is a special case that needs to be
## detected (if only for giving a warning). Any
## smarter way to implement this?
return(NULL)
}
}
tmpFormula <- nlsForm
formEnv <- new.env(parent = environment(detrend.series))
formEnv[["Y"]] <- Y
formEnv[["a"]] <- coefs["a"]
formEnv[["b"]] <- coefs["b"]
formEnv[["g"]] <- coefs["g"]
formEnv[["d"]] <- coefs["d"]
environment(tmpFormula) <- formEnv
structure(fits, constrained = constrained,
formula = tmpFormula, summary = summary(hug))
}
ModHugershoff <- try(hug.func(y2, constrain2), silent=TRUE)
hugNotPositive <- is.null(ModHugershoff)
if (verbose) {
cat("", sepLine, sep = "\n")
cat(indent(gettext("Detrend by ModHugershoff.\n", domain = "R-dplR")))
cat(indent(gettext("Trying to fit nls model...\n",
domain = "R-dplR")))
}
if (hugNotPositive || class(ModHugershoff) == "try-error") {
if (verbose) {
cat(indent(gettext("nls failed... fitting linear model...",
domain = "R-dplR")))
}
## Straight line via linear regression
if (hugNotPositive) {
msg <- gettext("Fits from method==\'ModHugershoff\' are not all positive. \n See constrain.nls argument in detrend.series. \n ARSTAN would tell you to plot that dirty dog at this point.\n Proceed with caution.",
domain = "R-dplR")
if(y.name2==""){
msg2 <- gettext(msg, domain = "R-dplR")
}
else {
msg2 <- c(gettextf("In raw series %s: ", y.name2, domain = "R-dplR"),
gettext(msg, domain = "R-dplR"))
}
warning(msg2)
if (verbose) {
cat(sepLine, indent(msg), sepLine, sep = "\n")
}
}
x <- seq_len(nY2)
lm1 <- lm(y2 ~ x)
coefs <- coef(lm1)
xIdx <- names(coefs) == "x"
coefs <- c(coefs[!xIdx], coefs[xIdx])
if (verbose) {
cat(indent(c(gettext("Linear model fit", domain = "R-dplR"),
gettextf("Intercept: %s", format(coefs[1]),
domain = "R-dplR"),
gettextf("Slope: %s", format(coefs[2]),
domain = "R-dplR"))),
sep = "\n")
}
if (all(is.finite(coefs)) && (coefs[2] <= 0 || pos.slope)) {
tm <- cbind(1, x)
ModHugershoff <- drop(tm %*% coefs)
useMean <- !isTRUE(ModHugershoff[1] > 0 &&
ModHugershoff[nY2] > 0)
if (useMean) {
msg <- gettext("Linear fit (backup of method==\'ModHugershoff\') is not all positive. \n ARSTAN would tell you to plot that dirty dog at this point. \n Proceed with caution.",
domain = "R-dplR")
if(y.name2==""){
msg2 <- gettext(msg, domain = "R-dplR")
}
else {
msg2 <- c(gettextf("In raw series %s: ", y.name2, domain = "R-dplR"),
gettext(msg, domain = "R-dplR"))
}
warning(msg2)
if (verbose) {
cat(sepLine, indent(msg), sepLine, sep = "\n")
}
}
} else {
useMean <- TRUE
}
if (useMean) {
theMean <- mean(y2)
if (verbose) {
cat(indent(c(gettext("lm has a positive slope",
"pos.slope = FALSE",
"Detrend by mean.",
domain = "R-dplR"),
gettextf("Mean = %s", format(theMean),
domain = "R-dplR"))),
sep = "\n")
}
ModHugershoff <- rep.int(theMean, nY2)
hugStats <- list(method = "Mean", mean = theMean)
} else {
hugStats <- list(method = "Line", coefs = coef(summary(lm1)))
}
} else if (verbose || return.info) {
hugSummary <- attr(ModHugershoff, "summary")
hugCoefs <- hugSummary[["coefficients"]]
hugCoefsE <- hugCoefs[, 1]
if (verbose) {
cat(indent(c(gettext("nls coefs", domain = "R-dplR"),
paste0(names(hugCoefsE), ": ",
format(hugCoefsE)))),
sep = "\n")
}
hugStats <- list(method = "Hugershoff",
is.constrained = attr(ModHugershoff, "constrained"),
formula = attr(ModHugershoff, "formula"),
coefs = hugCoefs)
} else {
hugStats <- NULL
}
if(difference){ resids$ModHugershoff <- y2 - ModHugershoff }
else{ resids$ModHugershoff <- y2 / ModHugershoff }
curves$ModHugershoff <- ModHugershoff
modelStats$ModHugershoff <- hugStats
do.hug <- TRUE
} else {
do.hug <- FALSE
}
################################################################################
if("AgeDepSpline" %in% method2){
## Age dep smoothing spline with nyrs (50 default) as the init stiffness
## are NULL
if(is.null(nyrs))
nyrs2 <- 50
else
nyrs2 <- nyrs
if (verbose) {
cat("", sepLine, sep = "\n")
cat(indent(c(gettext(c("Detrend by age-dependent spline.",
"Spline parameters"), domain = "R-dplR"),
paste0("nyrs = ", nyrs2, ", pos.slope = ", pos.slope))),
sep = "\n")
}
AgeDepSpline <- ads(y=y2, nyrs0=nyrs2, pos.slope = pos.slope)
if (any(AgeDepSpline <= 0)) {
msg <- "Fits from method==\'AgeDepSpline\' are not all positive. \n This is extremely rare. Series will be detrended with method==\'Mean\'. \n This might not be what you want. \n ARSTAN would tell you to plot that dirty dog at this point. \n Proceed with caution."
if(y.name2==""){
msg2 <- gettext(msg, domain = "R-dplR")
}
else {
msg2 <- c(gettextf("In raw series %s: ", y.name2, domain = "R-dplR"),
gettext(msg, domain = "R-dplR"))
}
warning(msg2)
if (verbose) {
cat(sepLine, indent(msg), sepLine, sep = "\n")
}
theMean <- mean(y2)
AgeDepSpline <- rep.int(theMean, nY2)
AgeDepSplineStats <- list(method = "Mean", mean = theMean)
} else {
AgeDepSplineStats <- list(method = "Age-Dep Spline", nyrs = nyrs2, pos.slope=pos.slope)
}
if(difference){ resids$AgeDepSpline <- y2 - AgeDepSpline }
else{ resids$AgeDepSpline <- y2 / AgeDepSpline }
curves$AgeDepSpline <- AgeDepSpline
modelStats$AgeDepSpline <- AgeDepSplineStats
do.ads <- TRUE
} else {
do.ads <- FALSE
}
################################################################################
if("Spline" %in% method2){
## Smoothing spline
## "n-year spline" as the spline whose frequency response is
## 50%, or 0.50, at a wavelength of 67%n years if nyrs and f
## are NULL
if(is.null(nyrs))
nyrs2 <- floor(nY2 * 0.67)
else
nyrs2 <- nyrs
if (verbose) {
cat("", sepLine, sep = "\n")
cat(indent(c(gettext(c("Detrend by spline.",
"Spline parameters"), domain = "R-dplR"),
paste0("nyrs = ", nyrs2, ", f = ", f))),
sep = "\n")
}
#Spline <- ffcsaps(y=y2, x=seq_len(nY2), nyrs=nyrs2, f=f)
Spline <- caps(y=y2, nyrs=nyrs2, f=f)
if (any(Spline <= 0)) {
msg <- "Fits from method==\'Spline\' are not all positive. \n Series will be detrended with method==\'Mean\'. \n This might not be what you want. \n ARSTAN would tell you to plot that dirty dog at this point. \n Proceed with caution."
if(y.name2==""){
msg2 <- gettext(msg, domain = "R-dplR")
}
else {
msg2 <- c(gettextf("In raw series %s: ", y.name2, domain = "R-dplR"),
gettext(msg, domain = "R-dplR"))
}
warning(msg2)
if (verbose) {
cat(sepLine, indent(msg), sepLine, sep = "\n")
}
theMean <- mean(y2)
Spline <- rep.int(theMean, nY2)
splineStats <- list(method = "Mean", mean = theMean)
} else {
splineStats <- list(method = "Spline", nyrs = nyrs2, f = f)
}
if(difference){ resids$Spline <- y2 - Spline }
else{ resids$Spline <- y2 / Spline }
curves$Spline <- Spline
modelStats$Spline <- splineStats
do.spline <- TRUE
} else {
do.spline <- FALSE
}
################################################################################
if("Mean" %in% method2){
## Fit a horiz line
theMean <- mean(y2)
Mean <- rep.int(theMean, nY2)
if (verbose) {
cat("", sepLine, sep = "\n")
cat(indent(c(gettext("Detrend by mean.", domain = "R-dplR"),
paste("Mean = ", format(theMean)))),
sep = "\n")
}
meanStats <- list(method = "Mean", mean = theMean)
if(difference){ resids$Mean <- y2 - Mean }
else{ resids$Mean <- y2 / Mean }
curves$Mean <- Mean
modelStats$Mean <- meanStats
do.mean <- TRUE
} else {
do.mean <- FALSE
}
################################################################################
if("Ar" %in% method2){
## Fit an ar model - aka prewhiten
Ar <- ar.func(y2, model = TRUE)
arModel <- attr(Ar, "model")
if (verbose) {
cat("", sepLine, sep = "\n")
cat(indent(gettext("Detrend by prewhitening.", domain = "R-dplR")))
print(arModel)
}
arStats <- list(method = "Ar", order = arModel[["order"]],
ar = arModel[["ar"]])
# This will propagate NA to rwi as a result of detrending.
# Other methods don't. Problem when interacting with other
# methods?
# Also, this can (and does!) produce negative RWI values.
# See example using CAM011. Thus:
if (any(Ar <= 0, na.rm = TRUE)) {
msg <- "Fits from method==\'Ar\' are not all positive. \n Setting values <0 to 0. \n This might not be what you want. \n ARSTAN would tell you to plot that dirty dog at this point. \n Proceed with caution."
if(y.name2==""){
msg2 <- gettext(msg, domain = "R-dplR")
}
else {
msg2 <- c(gettextf("In raw series %s: ", y.name2, domain = "R-dplR"),
gettext(msg, domain = "R-dplR"))
}
warning(msg2)
if (verbose) {
cat(sepLine, indent(msg), sepLine, sep = "\n")
}
Ar[Ar<0] <- 0
}
if(difference){ Ar - mean(Ar,na.rm=TRUE) }
else{ resids$Ar <- Ar / mean(Ar,na.rm=TRUE) }
curves$Ar <- mean(Ar,na.rm=TRUE)
modelStats$Ar <- arStats
do.ar <- TRUE
} else {
do.ar <- FALSE
}
################################################################################
if ("Friedman" %in% method2) {
if (is.null(wt.description)) {
wt.description <- if (wt.missing) "default" else deparse(wt)
}
if (verbose) {
cat("", sepLine, sep = "\n")
cat(indent(c(gettext(c("Detrend by Friedman's super smoother.",
"Smoother parameters"), domain = "R-dplR"),
paste0("span = ", span, ", bass = ", bass),
paste0("wt = ", wt.description))),
sep = "\n")
}
if (wt.missing) {
Friedman <- supsmu(x = seq_len(nY2), y = y2, span = span,
periodic = FALSE, bass = bass)[["y"]]
} else {
Friedman <- supsmu(x = seq_len(nY2), y = y2, wt = wt, span = span,
periodic = FALSE, bass = bass)[["y"]]
}
if (any(Friedman <= 0)) {
msg <- "Fits from method==\'Friedman\' are not all positive. \n Series will be detrended with method==\'Mean\'. \n This might not be what you want. \n ARSTAN would tell you to plot that dirty dog at this point. \n Proceed with caution."
if(y.name2==""){
msg2 <- gettext(msg, domain = "R-dplR")
}
else {
msg2 <- c(gettextf("In raw series %s: ", y.name2, domain = "R-dplR"),
gettext(msg, domain = "R-dplR"))
}
warning(msg2)
if (verbose) {
cat(sepLine, indent(msg), sepLine, sep = "\n")
}
theMean <- mean(y2)
Friedman <- rep.int(theMean, nY2)
friedmanStats <- list(method = "Mean", mean = theMean)
} else {
friedmanStats <- list(method = "Friedman", wt = wt.description, span = span, bass = bass)
}
if(difference){ resids$Friedman <- y2 - Friedman }
else{ resids$Friedman <- y2 / Friedman }
curves$Friedman <- Friedman
modelStats$Friedman <-
list(method = "Friedman",
wt = if (wt.missing) "default" else wt,
span = span, bass = bass)
do.friedman <- TRUE
} else {
do.friedman <- FALSE
}
################################################################################
################################################################################
resids <- data.frame(resids)
curves <- data.frame(curves)
if (verbose || return.info) {
zero.years <- lapply(resids, zeroFun)
n.zeros <- lapply(zero.years, nFun)
modelStats <- mapply(c, modelStats, n.zeros, zero.years,
SIMPLIFY = FALSE)
if (verbose) {
n.zeros2 <- unlist(n.zeros, use.names = FALSE)
zeroFlag <- n.zeros2 > 0
methodNames <- names(modelStats)
if (any(zeroFlag)) {
cat("", sepLine, sep = "\n")
for (i in which(zeroFlag)) {
if (is.character(years)) {
cat(indent(gettextf("Zero years in %s series:\n",
methodNames[i], domain="R-dplR")))
} else {
cat(indent(gettextf("Zero indices in %s series:\n",
methodNames[i], domain="R-dplR")))
}
cat(indent(paste(zero.years[[i]][[1]], collapse = " ")),
"\n", sep = "")
}
}
}
}
if(make.plot){
cols <- c("#24492e","#015b58","#2c6184","#59629b","#89689d","#ba7999","#e69b99")
op <- par(no.readonly=TRUE)
on.exit(par(op))
n.methods <- ncol(resids)
par(mar=c(2.1, 2.1, 2.1, 2.1), mgp=c(1.1, 0.1, 0),
tcl=0.5, xaxs="i")
if (n.methods > 4) {
par(cex.main = min(1, par("cex.main")))
}
mat <- switch(n.methods,
matrix(c(1,2), nrow=2, ncol=1, byrow=TRUE),
matrix(c(1,1,2,3), nrow=2, ncol=2, byrow=TRUE),
matrix(c(1,2,3,4), nrow=2, ncol=2, byrow=TRUE),
matrix(c(1,1,2,3,4,5), nrow=3, ncol=2, byrow=TRUE),
matrix(c(1,1,1,2,3,4,5,6,0), nrow=3, ncol=3, byrow=TRUE),
matrix(c(1,1,1,2,3,4,5,6,7), nrow=3, ncol=3, byrow=TRUE),
matrix(c(1,2,3,4,5,6,7,8), nrow=4, ncol=2, byrow=TRUE))
layout(mat,
widths=rep.int(0.5, ncol(mat)),
heights=rep.int(1, nrow(mat)))
# 1
plot(y2, type="l", ylab="mm", col = "grey",
xlab=gettext("Age (Yrs)", domain="R-dplR"),
main=gettextf("Raw Series %s", y.name2, domain="R-dplR"))
if(do.spline) lines(Spline, col=cols[1], lwd=2)
if(do.mne) lines(ModNegExp, col=cols[2], lwd=2)
if(do.mean) lines(Mean, col=cols[3], lwd=2)
if(do.friedman) lines(Friedman, col=cols[5], lwd=2)
if(do.hug) lines(ModHugershoff, col=cols[6], lwd=2)
if(do.ads) lines(AgeDepSpline, col=cols[7], lwd=2)
# 1
if(do.spline){
plot(resids$Spline, type="l", col=cols[1],
main=gettext("Spline", domain="R-dplR"),
xlab=gettext("Age (Yrs)", domain="R-dplR"),
ylab=gettext("RWI", domain="R-dplR"))
if(difference){ abline(h=0) }
else{ abline(h=1) }
}
# 2
if(do.mne){
plot(resids$ModNegExp, type="l", col=cols[2],
main=gettext("Neg. Exp. Curve or Straight Line",
domain="R-dplR"),
xlab=gettext("Age (Yrs)", domain="R-dplR"),
ylab=gettext("RWI", domain="R-dplR"))
if(difference){ abline(h=0) }
else{ abline(h=1) }
}
# 3
if(do.mean){
plot(resids$Mean, type="l", col=cols[3],
main=gettext("Horizontal Line (Mean)", domain="R-dplR"),
xlab=gettext("Age (Yrs)", domain="R-dplR"),
ylab=gettext("RWI", domain="R-dplR"))
if(difference){ abline(h=0) }
else{ abline(h=1) }
}
# 4
if(do.ar){
plot(resids$Ar, type="l", col=cols[4],
main=gettextf("Ar", domain="R-dplR"),
xlab=gettext("Age (Yrs)", domain="R-dplR"),
ylab=gettext("RWI", domain="R-dplR"))
if(difference){ abline(h=0) }
else{ abline(h=1) }
mtext(text="(Not plotted with raw series)",side=3,line=-1,cex=0.75)
}
# 5
if (do.friedman) {
plot(resids$Friedman, type="l", col=cols[5],
main=gettext("Friedman's Super Smoother", domain="R-dplR"),
xlab=gettext("Age (Yrs)", domain="R-dplR"),
ylab=gettext("RWI", domain="R-dplR"))
if(difference){ abline(h=0) }
else{ abline(h=1) }
}
# 6
if(do.hug){
plot(resids$ModHugershoff, type="l", col=cols[6],
main=gettext("Hugershoff or Straight Line",
domain="R-dplR"),
xlab=gettext("Age (Yrs)", domain="R-dplR"),
ylab=gettext("RWI", domain="R-dplR"))
if(difference){ abline(h=0) }
else{ abline(h=1) }
}
# 7
if(do.ads){
plot(resids$AgeDepSpline, type="l", col=cols[7],
main=gettext("Age Dep Spline",
domain="R-dplR"),
xlab=gettext("Age (Yrs)", domain="R-dplR"),
ylab=gettext("RWI", domain="R-dplR"))
if(difference){ abline(h=0) }
else{ abline(h=1) }
}
}
# Done
resids2 <- matrix(NA, ncol=ncol(resids), nrow=length(y))
resids2 <- data.frame(resids2)
names(resids2) <- names(resids)
if(!is.null(names(y))) row.names(resids2) <- names(y)
resids2[good.y, ] <- resids
curves2 <- matrix(NA, ncol=ncol(curves), nrow=length(y))
curves2 <- data.frame(curves2)
names(curves2) <- names(curves)
if(!is.null(names(y))) row.names(curves2) <- names(y)
curves2[good.y, ] <- curves
## Reorder columns of output to match the order of the argument
## "method".
resids2 <- resids2[, method2]
curves2 <- curves2[, method2]
## Make sure names (years) are included if there is only one method
if(!is.data.frame(resids2)) names(resids2) <- names(y)
if (return.info) {
list(series = resids2,
curves = curves2,
model.info = modelStats[method2],
data.info = dataStats)
} else {
resids2
}
}
| /R/detrend.series.R | no_license | mvkorpel/dplR | R | false | false | 31,997 | r | `detrend.series` <-
function(y, y.name = "", make.plot = TRUE,
method = c("Spline", "ModNegExp", "Mean", "Ar", "Friedman",
"ModHugershoff", "AgeDepSpline"),
nyrs = NULL, f = 0.5, pos.slope = FALSE,
constrain.nls = c("never", "when.fail", "always"),
verbose = FALSE, return.info = FALSE,
wt, span = "cv", bass = 0, difference = FALSE)
{
check.flags(make.plot, pos.slope, verbose, return.info)
if (length(y.name) == 0) {
y.name2 <- ""
} else {
y.name2 <- as.character(y.name)[1]
stopifnot(Encoding(y.name2) != "bytes")
}
known.methods <- c("Spline", "ModNegExp", "Mean", "Ar", "Friedman",
"ModHugershoff", "AgeDepSpline")
constrain2 <- match.arg(constrain.nls)
method2 <- match.arg(arg = method,
choices = known.methods,
several.ok = TRUE)
wt.missing <- missing(wt)
wt.description <- NULL
if (verbose) {
widthOpt <- getOption("width")
indentSize <- 1
indent <- function(x) {
paste0(paste0(rep.int(" ", indentSize), collapse = ""), x)
}
sepLine <-
indent(paste0(rep.int("~", max(1, widthOpt - 2 * indentSize)),
collapse = ""))
cat(sepLine,
gettext("Verbose output: ", domain="R-dplR"), y.name2,
sep = "\n")
wt.description <- if (wt.missing) "default" else deparse(wt)
opts <- c("make.plot" = make.plot,
"method(s)" = deparse(method2),
"nyrs" = if (is.null(nyrs)) "NULL" else nyrs,
"f" = f,
"pos.slope" = pos.slope,
"constrain.nls" = constrain2,
"verbose" = verbose,
"return.info" = return.info,
"wt" = wt.description,
"span" = span,
"bass" = bass,
"difference" = difference)
optNames <- names(opts)
optChar <- c(gettext("Options", domain="R-dplR"),
paste(str_pad(optNames,
width = max(nchar(optNames)),
side = "right"),
opts, sep = " "))
cat(sepLine, indent(optChar), sep = "\n")
}
## Remove NA from the data (they will be reinserted later)
good.y <- which(!is.na(y))
if(length(good.y) == 0) {
stop("all values are 'NA'")
} else if(any(diff(good.y) != 1)) {
stop("'NA's are not allowed in the middle of the series")
}
y2 <- y[good.y]
nY2 <- length(y2)
## Recode any zero values to 0.001
if (verbose || return.info) {
years <- names(y2)
if (is.null(years)) {
years <- good.y
}
zeroFun <- function(x) list(zero.years = years[is.finite(x) & x == 0])
nFun <- function(x) list(n.zeros = length(x[[1]]))
zero.years.data <- zeroFun(y2)
n.zeros.data <- nFun(zero.years.data)
dataStats <- c(n.zeros.data, zero.years.data)
if (verbose) {
cat("", sepLine, sep = "\n")
if (n.zeros.data[[1]] > 0){
if (is.character(years)) {
cat(indent(gettext("Zero years in input series:\n",
domain="R-dplR")))
} else {
cat(indent(gettext("Zero indices in input series:\n",
domain="R-dplR")))
}
cat(indent(paste(zero.years.data[[1]], collapse = " ")),
"\n", sep = "")
} else {
cat(indent(gettext("No zeros in input series.\n",
domain="R-dplR")))
}
}
}
y2[y2 == 0] <- 0.001
resids <- list()
curves <- list()
modelStats <- list()
################################################################################
################################################################################
# Ok. Let's start the methods
################################################################################
if("ModNegExp" %in% method2){
## Nec or lm
nec.func <- function(Y, constrain) {
nY <- length(Y)
a <- mean(Y[seq_len(max(1, floor(nY * 0.1)))])
b <- -0.01
k <- mean(Y[floor(nY * 0.9):nY])
nlsForm <- Y ~ I(a * exp(b * seq_along(Y)) + k)
nlsStart <- list(a=a, b=b, k=k)
checked <- FALSE
constrained <- FALSE
## Note: nls() may signal an error
if (constrain == "never") {
nec <- nls(formula = nlsForm, start = nlsStart)
} else if (constrain == "always") {
nec <- nls(formula = nlsForm, start = nlsStart,
lower = c(a=0, b=-Inf, k=0),
upper = c(a=Inf, b=0, k=Inf),
algorithm = "port")
constrained <- TRUE
} else {
nec <- nls(formula = nlsForm, start = nlsStart)
coefs <- coef(nec)
if (coefs[1] <= 0 || coefs[2] >= 0) {
stop()
}
fits <- predict(nec)
if (fits[nY] > 0) {
checked <- TRUE
} else {
nec <- nls(formula = nlsForm, start = nlsStart,
lower = c(a=0, b=-Inf, k=0),
upper = c(a=Inf, b=0, k=Inf),
algorithm = "port")
constrained <- TRUE
}
}
if (!checked) {
coefs <- coef(nec)
if (coefs[1] <= 0 || coefs[2] >= 0) {
stop()
}
fits <- predict(nec)
if (fits[nY] <= 0) {
## This error is a special case that needs to be
## detected (if only for giving a warning). Any
## smarter way to implement this?
return(NULL)
}
}
tmpFormula <- nlsForm
formEnv <- new.env(parent = environment(detrend.series))
formEnv[["Y"]] <- Y
formEnv[["a"]] <- coefs["a"]
formEnv[["b"]] <- coefs["b"]
formEnv[["k"]] <- coefs["k"]
environment(tmpFormula) <- formEnv
structure(fits, constrained = constrained,
formula = tmpFormula, summary = summary(nec))
}
ModNegExp <- try(nec.func(y2, constrain2), silent=TRUE)
mneNotPositive <- is.null(ModNegExp)
if (verbose) {
cat("", sepLine, sep = "\n")
cat(indent(gettext("Detrend by ModNegExp.\n", domain = "R-dplR")))
cat(indent(gettext("Trying to fit nls model...\n",
domain = "R-dplR")))
}
if (mneNotPositive || class(ModNegExp) == "try-error") {
if (verbose) {
cat(indent(gettext("nls failed... fitting linear model...",
domain = "R-dplR")))
}
## Straight line via linear regression
if (mneNotPositive) {
msg <- gettext("Fits from method==\'ModNegExp\' are not all positive. \n See constrain.nls argument in detrend.series. \n ARSTAN would tell you to plot that dirty dog at this point.\n Proceed with caution.",
domain = "R-dplR")
if(y.name2==""){
msg2 <- gettext(msg, domain = "R-dplR")
}
else {
msg2 <- c(gettextf("In raw series %s: ", y.name2, domain = "R-dplR"),
gettext(msg, domain = "R-dplR"))
}
warning(msg2)
if (verbose) {
cat(sepLine, indent(msg), sepLine, sep = "\n")
}
}
x <- seq_len(nY2)
lm1 <- lm(y2 ~ x)
coefs <- coef(lm1)
xIdx <- names(coefs) == "x"
coefs <- c(coefs[!xIdx], coefs[xIdx])
if (verbose) {
cat(indent(c(gettext("Linear model fit", domain = "R-dplR"),
gettextf("Intercept: %s", format(coefs[1]),
domain = "R-dplR"),
gettextf("Slope: %s", format(coefs[2]),
domain = "R-dplR"))),
sep = "\n")
}
if (all(is.finite(coefs)) && (coefs[2] <= 0 || pos.slope)) {
tm <- cbind(1, x)
ModNegExp <- drop(tm %*% coefs)
useMean <- !isTRUE(ModNegExp[1] > 0 &&
ModNegExp[nY2] > 0)
if (useMean) {
msg <- gettext("Linear fit (backup of method==\'ModNegExp\') is not all positive. \n Proceed with caution. \n ARSTAN would tell you to plot that dirty dog at this point.",
domain = "R-dplR")
if(y.name2==""){
msg2 <- gettext(msg, domain = "R-dplR")
}
else {
msg2 <- c(gettextf("In raw series %s: ", y.name2, domain = "R-dplR"),
gettext(msg, domain = "R-dplR"))
}
warning(msg2)
if (verbose) {
cat(sepLine, indent(msg), sepLine, sep = "\n")
}
}
} else {
useMean <- TRUE
}
if (useMean) {
theMean <- mean(y2)
if (verbose) {
cat(indent(c(gettext("lm has a positive slope",
"pos.slope = FALSE",
"Detrend by mean.",
domain = "R-dplR"),
gettextf("Mean = %s", format(theMean),
domain = "R-dplR"))),
sep = "\n")
}
ModNegExp <- rep.int(theMean, nY2)
mneStats <- list(method = "Mean", mean = theMean)
} else {
mneStats <- list(method = "Line", coefs = coef(summary(lm1)))
}
} else if (verbose || return.info) {
mneSummary <- attr(ModNegExp, "summary")
mneCoefs <- mneSummary[["coefficients"]]
mneCoefsE <- mneCoefs[, 1]
if (verbose) {
cat(indent(c(gettext("nls coefs", domain = "R-dplR"),
paste0(names(mneCoefsE), ": ",
format(mneCoefsE)))),
sep = "\n")
}
mneStats <- list(method = "NegativeExponential",
is.constrained = attr(ModNegExp, "constrained"),
formula = attr(ModNegExp, "formula"),
coefs = mneCoefs)
} else {
mneStats <- NULL
}
if(difference){ resids$ModNegExp <- y2 - ModNegExp }
else{ resids$ModNegExp <- y2 / ModNegExp }
curves$ModNegExp <- ModNegExp
modelStats$ModNegExp <- mneStats
do.mne <- TRUE
} else {
do.mne <- FALSE
}
################################################################################
if("ModHugershoff" %in% method2){
## hug or lm
hug.func <- function(Y, constrain) {
nY <- length(Y)
a <- mean(Y[floor(nY * 0.9):nY])
b <- 1
g <- 0.1
d <- mean(Y[floor(nY * 0.9):nY])
nlsForm <- Y ~ I(a*seq_along(Y)^b*exp(-g*seq_along(Y))+d)
nlsStart <- list(a=a, b=b, g=g, d=d)
checked <- FALSE
constrained <- FALSE
## Note: nls() may signal an error
if (constrain == "never") {
hug <- nls(formula = nlsForm, start = nlsStart)
} else if (constrain == "always") {
hug <- nls(formula = nlsForm, start = nlsStart,
lower = c(a=0, b=-Inf, g=0, d=0),
upper = c(a=Inf, b=0, g=Inf, d=Inf),
algorithm = "port")
constrained <- TRUE
} else {
hug <- nls(formula = nlsForm, start = nlsStart)
coefs <- coef(hug)
if (coefs[1] <= 0 || coefs[2] <= 0) {
stop()
}
fits <- predict(hug)
if (fits[nY] > 0) {
checked <- TRUE
} else {
hug <- nls(formula = nlsForm, start = nlsStart,
lower = c(a=0, b=-Inf, g=0, d=0),
upper = c(a=Inf, b=0, g=Inf, d=Inf),
algorithm = "port")
constrained <- TRUE
}
}
if (!checked) {
coefs <- coef(hug)
if (coefs[1] <= 0 || coefs[2] <= 0) {
stop()
}
fits <- predict(hug)
if (fits[nY] <= 0) {
## This error is a special case that needs to be
## detected (if only for giving a warning). Any
## smarter way to implement this?
return(NULL)
}
}
tmpFormula <- nlsForm
formEnv <- new.env(parent = environment(detrend.series))
formEnv[["Y"]] <- Y
formEnv[["a"]] <- coefs["a"]
formEnv[["b"]] <- coefs["b"]
formEnv[["g"]] <- coefs["g"]
formEnv[["d"]] <- coefs["d"]
environment(tmpFormula) <- formEnv
structure(fits, constrained = constrained,
formula = tmpFormula, summary = summary(hug))
}
ModHugershoff <- try(hug.func(y2, constrain2), silent=TRUE)
hugNotPositive <- is.null(ModHugershoff)
if (verbose) {
cat("", sepLine, sep = "\n")
cat(indent(gettext("Detrend by ModHugershoff.\n", domain = "R-dplR")))
cat(indent(gettext("Trying to fit nls model...\n",
domain = "R-dplR")))
}
if (hugNotPositive || class(ModHugershoff) == "try-error") {
if (verbose) {
cat(indent(gettext("nls failed... fitting linear model...",
domain = "R-dplR")))
}
## Straight line via linear regression
if (hugNotPositive) {
msg <- gettext("Fits from method==\'ModHugershoff\' are not all positive. \n See constrain.nls argument in detrend.series. \n ARSTAN would tell you to plot that dirty dog at this point.\n Proceed with caution.",
domain = "R-dplR")
if(y.name2==""){
msg2 <- gettext(msg, domain = "R-dplR")
}
else {
msg2 <- c(gettextf("In raw series %s: ", y.name2, domain = "R-dplR"),
gettext(msg, domain = "R-dplR"))
}
warning(msg2)
if (verbose) {
cat(sepLine, indent(msg), sepLine, sep = "\n")
}
}
x <- seq_len(nY2)
lm1 <- lm(y2 ~ x)
coefs <- coef(lm1)
xIdx <- names(coefs) == "x"
coefs <- c(coefs[!xIdx], coefs[xIdx])
if (verbose) {
cat(indent(c(gettext("Linear model fit", domain = "R-dplR"),
gettextf("Intercept: %s", format(coefs[1]),
domain = "R-dplR"),
gettextf("Slope: %s", format(coefs[2]),
domain = "R-dplR"))),
sep = "\n")
}
if (all(is.finite(coefs)) && (coefs[2] <= 0 || pos.slope)) {
tm <- cbind(1, x)
ModHugershoff <- drop(tm %*% coefs)
useMean <- !isTRUE(ModHugershoff[1] > 0 &&
ModHugershoff[nY2] > 0)
if (useMean) {
msg <- gettext("Linear fit (backup of method==\'ModHugershoff\') is not all positive. \n ARSTAN would tell you to plot that dirty dog at this point. \n Proceed with caution.",
domain = "R-dplR")
if(y.name2==""){
msg2 <- gettext(msg, domain = "R-dplR")
}
else {
msg2 <- c(gettextf("In raw series %s: ", y.name2, domain = "R-dplR"),
gettext(msg, domain = "R-dplR"))
}
warning(msg2)
if (verbose) {
cat(sepLine, indent(msg), sepLine, sep = "\n")
}
}
} else {
useMean <- TRUE
}
if (useMean) {
theMean <- mean(y2)
if (verbose) {
cat(indent(c(gettext("lm has a positive slope",
"pos.slope = FALSE",
"Detrend by mean.",
domain = "R-dplR"),
gettextf("Mean = %s", format(theMean),
domain = "R-dplR"))),
sep = "\n")
}
ModHugershoff <- rep.int(theMean, nY2)
hugStats <- list(method = "Mean", mean = theMean)
} else {
hugStats <- list(method = "Line", coefs = coef(summary(lm1)))
}
} else if (verbose || return.info) {
hugSummary <- attr(ModHugershoff, "summary")
hugCoefs <- hugSummary[["coefficients"]]
hugCoefsE <- hugCoefs[, 1]
if (verbose) {
cat(indent(c(gettext("nls coefs", domain = "R-dplR"),
paste0(names(hugCoefsE), ": ",
format(hugCoefsE)))),
sep = "\n")
}
hugStats <- list(method = "Hugershoff",
is.constrained = attr(ModHugershoff, "constrained"),
formula = attr(ModHugershoff, "formula"),
coefs = hugCoefs)
} else {
hugStats <- NULL
}
if(difference){ resids$ModHugershoff <- y2 - ModHugershoff }
else{ resids$ModHugershoff <- y2 / ModHugershoff }
curves$ModHugershoff <- ModHugershoff
modelStats$ModHugershoff <- hugStats
do.hug <- TRUE
} else {
do.hug <- FALSE
}
################################################################################
if("AgeDepSpline" %in% method2){
## Age dep smoothing spline with nyrs (50 default) as the init stiffness
## are NULL
if(is.null(nyrs))
nyrs2 <- 50
else
nyrs2 <- nyrs
if (verbose) {
cat("", sepLine, sep = "\n")
cat(indent(c(gettext(c("Detrend by age-dependent spline.",
"Spline parameters"), domain = "R-dplR"),
paste0("nyrs = ", nyrs2, ", pos.slope = ", pos.slope))),
sep = "\n")
}
AgeDepSpline <- ads(y=y2, nyrs0=nyrs2, pos.slope = pos.slope)
if (any(AgeDepSpline <= 0)) {
msg <- "Fits from method==\'AgeDepSpline\' are not all positive. \n This is extremely rare. Series will be detrended with method==\'Mean\'. \n This might not be what you want. \n ARSTAN would tell you to plot that dirty dog at this point. \n Proceed with caution."
if(y.name2==""){
msg2 <- gettext(msg, domain = "R-dplR")
}
else {
msg2 <- c(gettextf("In raw series %s: ", y.name2, domain = "R-dplR"),
gettext(msg, domain = "R-dplR"))
}
warning(msg2)
if (verbose) {
cat(sepLine, indent(msg), sepLine, sep = "\n")
}
theMean <- mean(y2)
AgeDepSpline <- rep.int(theMean, nY2)
AgeDepSplineStats <- list(method = "Mean", mean = theMean)
} else {
AgeDepSplineStats <- list(method = "Age-Dep Spline", nyrs = nyrs2, pos.slope=pos.slope)
}
if(difference){ resids$AgeDepSpline <- y2 - AgeDepSpline }
else{ resids$AgeDepSpline <- y2 / AgeDepSpline }
curves$AgeDepSpline <- AgeDepSpline
modelStats$AgeDepSpline <- AgeDepSplineStats
do.ads <- TRUE
} else {
do.ads <- FALSE
}
################################################################################
if("Spline" %in% method2){
## Smoothing spline
## "n-year spline" as the spline whose frequency response is
## 50%, or 0.50, at a wavelength of 67%n years if nyrs and f
## are NULL
if(is.null(nyrs))
nyrs2 <- floor(nY2 * 0.67)
else
nyrs2 <- nyrs
if (verbose) {
cat("", sepLine, sep = "\n")
cat(indent(c(gettext(c("Detrend by spline.",
"Spline parameters"), domain = "R-dplR"),
paste0("nyrs = ", nyrs2, ", f = ", f))),
sep = "\n")
}
#Spline <- ffcsaps(y=y2, x=seq_len(nY2), nyrs=nyrs2, f=f)
Spline <- caps(y=y2, nyrs=nyrs2, f=f)
if (any(Spline <= 0)) {
msg <- "Fits from method==\'Spline\' are not all positive. \n Series will be detrended with method==\'Mean\'. \n This might not be what you want. \n ARSTAN would tell you to plot that dirty dog at this point. \n Proceed with caution."
if(y.name2==""){
msg2 <- gettext(msg, domain = "R-dplR")
}
else {
msg2 <- c(gettextf("In raw series %s: ", y.name2, domain = "R-dplR"),
gettext(msg, domain = "R-dplR"))
}
warning(msg2)
if (verbose) {
cat(sepLine, indent(msg), sepLine, sep = "\n")
}
theMean <- mean(y2)
Spline <- rep.int(theMean, nY2)
splineStats <- list(method = "Mean", mean = theMean)
} else {
splineStats <- list(method = "Spline", nyrs = nyrs2, f = f)
}
if(difference){ resids$Spline <- y2 - Spline }
else{ resids$Spline <- y2 / Spline }
curves$Spline <- Spline
modelStats$Spline <- splineStats
do.spline <- TRUE
} else {
do.spline <- FALSE
}
################################################################################
if("Mean" %in% method2){
## Fit a horiz line
theMean <- mean(y2)
Mean <- rep.int(theMean, nY2)
if (verbose) {
cat("", sepLine, sep = "\n")
cat(indent(c(gettext("Detrend by mean.", domain = "R-dplR"),
paste("Mean = ", format(theMean)))),
sep = "\n")
}
meanStats <- list(method = "Mean", mean = theMean)
if(difference){ resids$Mean <- y2 - Mean }
else{ resids$Mean <- y2 / Mean }
curves$Mean <- Mean
modelStats$Mean <- meanStats
do.mean <- TRUE
} else {
do.mean <- FALSE
}
################################################################################
if("Ar" %in% method2){
## Fit an ar model - aka prewhiten
Ar <- ar.func(y2, model = TRUE)
arModel <- attr(Ar, "model")
if (verbose) {
cat("", sepLine, sep = "\n")
cat(indent(gettext("Detrend by prewhitening.", domain = "R-dplR")))
print(arModel)
}
arStats <- list(method = "Ar", order = arModel[["order"]],
ar = arModel[["ar"]])
# This will propagate NA to rwi as a result of detrending.
# Other methods don't. Problem when interacting with other
# methods?
# Also, this can (and does!) produce negative RWI values.
# See example using CAM011. Thus:
if (any(Ar <= 0, na.rm = TRUE)) {
msg <- "Fits from method==\'Ar\' are not all positive. \n Setting values <0 to 0. \n This might not be what you want. \n ARSTAN would tell you to plot that dirty dog at this point. \n Proceed with caution."
if(y.name2==""){
msg2 <- gettext(msg, domain = "R-dplR")
}
else {
msg2 <- c(gettextf("In raw series %s: ", y.name2, domain = "R-dplR"),
gettext(msg, domain = "R-dplR"))
}
warning(msg2)
if (verbose) {
cat(sepLine, indent(msg), sepLine, sep = "\n")
}
Ar[Ar<0] <- 0
}
if(difference){ Ar - mean(Ar,na.rm=TRUE) }
else{ resids$Ar <- Ar / mean(Ar,na.rm=TRUE) }
curves$Ar <- mean(Ar,na.rm=TRUE)
modelStats$Ar <- arStats
do.ar <- TRUE
} else {
do.ar <- FALSE
}
################################################################################
if ("Friedman" %in% method2) {
if (is.null(wt.description)) {
wt.description <- if (wt.missing) "default" else deparse(wt)
}
if (verbose) {
cat("", sepLine, sep = "\n")
cat(indent(c(gettext(c("Detrend by Friedman's super smoother.",
"Smoother parameters"), domain = "R-dplR"),
paste0("span = ", span, ", bass = ", bass),
paste0("wt = ", wt.description))),
sep = "\n")
}
if (wt.missing) {
Friedman <- supsmu(x = seq_len(nY2), y = y2, span = span,
periodic = FALSE, bass = bass)[["y"]]
} else {
Friedman <- supsmu(x = seq_len(nY2), y = y2, wt = wt, span = span,
periodic = FALSE, bass = bass)[["y"]]
}
if (any(Friedman <= 0)) {
msg <- "Fits from method==\'Friedman\' are not all positive. \n Series will be detrended with method==\'Mean\'. \n This might not be what you want. \n ARSTAN would tell you to plot that dirty dog at this point. \n Proceed with caution."
if(y.name2==""){
msg2 <- gettext(msg, domain = "R-dplR")
}
else {
msg2 <- c(gettextf("In raw series %s: ", y.name2, domain = "R-dplR"),
gettext(msg, domain = "R-dplR"))
}
warning(msg2)
if (verbose) {
cat(sepLine, indent(msg), sepLine, sep = "\n")
}
theMean <- mean(y2)
Friedman <- rep.int(theMean, nY2)
friedmanStats <- list(method = "Mean", mean = theMean)
} else {
friedmanStats <- list(method = "Friedman", wt = wt.description, span = span, bass = bass)
}
if(difference){ resids$Friedman <- y2 - Friedman }
else{ resids$Friedman <- y2 / Friedman }
curves$Friedman <- Friedman
modelStats$Friedman <-
list(method = "Friedman",
wt = if (wt.missing) "default" else wt,
span = span, bass = bass)
do.friedman <- TRUE
} else {
do.friedman <- FALSE
}
################################################################################
################################################################################
resids <- data.frame(resids)
curves <- data.frame(curves)
if (verbose || return.info) {
zero.years <- lapply(resids, zeroFun)
n.zeros <- lapply(zero.years, nFun)
modelStats <- mapply(c, modelStats, n.zeros, zero.years,
SIMPLIFY = FALSE)
if (verbose) {
n.zeros2 <- unlist(n.zeros, use.names = FALSE)
zeroFlag <- n.zeros2 > 0
methodNames <- names(modelStats)
if (any(zeroFlag)) {
cat("", sepLine, sep = "\n")
for (i in which(zeroFlag)) {
if (is.character(years)) {
cat(indent(gettextf("Zero years in %s series:\n",
methodNames[i], domain="R-dplR")))
} else {
cat(indent(gettextf("Zero indices in %s series:\n",
methodNames[i], domain="R-dplR")))
}
cat(indent(paste(zero.years[[i]][[1]], collapse = " ")),
"\n", sep = "")
}
}
}
}
if(make.plot){
cols <- c("#24492e","#015b58","#2c6184","#59629b","#89689d","#ba7999","#e69b99")
op <- par(no.readonly=TRUE)
on.exit(par(op))
n.methods <- ncol(resids)
par(mar=c(2.1, 2.1, 2.1, 2.1), mgp=c(1.1, 0.1, 0),
tcl=0.5, xaxs="i")
if (n.methods > 4) {
par(cex.main = min(1, par("cex.main")))
}
mat <- switch(n.methods,
matrix(c(1,2), nrow=2, ncol=1, byrow=TRUE),
matrix(c(1,1,2,3), nrow=2, ncol=2, byrow=TRUE),
matrix(c(1,2,3,4), nrow=2, ncol=2, byrow=TRUE),
matrix(c(1,1,2,3,4,5), nrow=3, ncol=2, byrow=TRUE),
matrix(c(1,1,1,2,3,4,5,6,0), nrow=3, ncol=3, byrow=TRUE),
matrix(c(1,1,1,2,3,4,5,6,7), nrow=3, ncol=3, byrow=TRUE),
matrix(c(1,2,3,4,5,6,7,8), nrow=4, ncol=2, byrow=TRUE))
layout(mat,
widths=rep.int(0.5, ncol(mat)),
heights=rep.int(1, nrow(mat)))
# 1
plot(y2, type="l", ylab="mm", col = "grey",
xlab=gettext("Age (Yrs)", domain="R-dplR"),
main=gettextf("Raw Series %s", y.name2, domain="R-dplR"))
if(do.spline) lines(Spline, col=cols[1], lwd=2)
if(do.mne) lines(ModNegExp, col=cols[2], lwd=2)
if(do.mean) lines(Mean, col=cols[3], lwd=2)
if(do.friedman) lines(Friedman, col=cols[5], lwd=2)
if(do.hug) lines(ModHugershoff, col=cols[6], lwd=2)
if(do.ads) lines(AgeDepSpline, col=cols[7], lwd=2)
# 1
if(do.spline){
plot(resids$Spline, type="l", col=cols[1],
main=gettext("Spline", domain="R-dplR"),
xlab=gettext("Age (Yrs)", domain="R-dplR"),
ylab=gettext("RWI", domain="R-dplR"))
if(difference){ abline(h=0) }
else{ abline(h=1) }
}
# 2
if(do.mne){
plot(resids$ModNegExp, type="l", col=cols[2],
main=gettext("Neg. Exp. Curve or Straight Line",
domain="R-dplR"),
xlab=gettext("Age (Yrs)", domain="R-dplR"),
ylab=gettext("RWI", domain="R-dplR"))
if(difference){ abline(h=0) }
else{ abline(h=1) }
}
# 3
if(do.mean){
plot(resids$Mean, type="l", col=cols[3],
main=gettext("Horizontal Line (Mean)", domain="R-dplR"),
xlab=gettext("Age (Yrs)", domain="R-dplR"),
ylab=gettext("RWI", domain="R-dplR"))
if(difference){ abline(h=0) }
else{ abline(h=1) }
}
# 4
if(do.ar){
plot(resids$Ar, type="l", col=cols[4],
main=gettextf("Ar", domain="R-dplR"),
xlab=gettext("Age (Yrs)", domain="R-dplR"),
ylab=gettext("RWI", domain="R-dplR"))
if(difference){ abline(h=0) }
else{ abline(h=1) }
mtext(text="(Not plotted with raw series)",side=3,line=-1,cex=0.75)
}
# 5
if (do.friedman) {
plot(resids$Friedman, type="l", col=cols[5],
main=gettext("Friedman's Super Smoother", domain="R-dplR"),
xlab=gettext("Age (Yrs)", domain="R-dplR"),
ylab=gettext("RWI", domain="R-dplR"))
if(difference){ abline(h=0) }
else{ abline(h=1) }
}
# 6
if(do.hug){
plot(resids$ModHugershoff, type="l", col=cols[6],
main=gettext("Hugershoff or Straight Line",
domain="R-dplR"),
xlab=gettext("Age (Yrs)", domain="R-dplR"),
ylab=gettext("RWI", domain="R-dplR"))
if(difference){ abline(h=0) }
else{ abline(h=1) }
}
# 7
if(do.ads){
plot(resids$AgeDepSpline, type="l", col=cols[7],
main=gettext("Age Dep Spline",
domain="R-dplR"),
xlab=gettext("Age (Yrs)", domain="R-dplR"),
ylab=gettext("RWI", domain="R-dplR"))
if(difference){ abline(h=0) }
else{ abline(h=1) }
}
}
# Done
resids2 <- matrix(NA, ncol=ncol(resids), nrow=length(y))
resids2 <- data.frame(resids2)
names(resids2) <- names(resids)
if(!is.null(names(y))) row.names(resids2) <- names(y)
resids2[good.y, ] <- resids
curves2 <- matrix(NA, ncol=ncol(curves), nrow=length(y))
curves2 <- data.frame(curves2)
names(curves2) <- names(curves)
if(!is.null(names(y))) row.names(curves2) <- names(y)
curves2[good.y, ] <- curves
## Reorder columns of output to match the order of the argument
## "method".
resids2 <- resids2[, method2]
curves2 <- curves2[, method2]
## Make sure names (years) are included if there is only one method
if(!is.data.frame(resids2)) names(resids2) <- names(y)
if (return.info) {
list(series = resids2,
curves = curves2,
model.info = modelStats[method2],
data.info = dataStats)
} else {
resids2
}
}
|
library(sf)
library(sp)
library(rgeos)
library(rgdal)
library(raster)
library(mapview)
library(spocc)
library(scrubr)
library(dplyr)
library(doParallel)
library(ggplot2)
library(geosphere)
library(dismo)
library(tidyr)
GPS = read.csv("~/Documents/Maรฎtrise/E2018/Summer_school/Project/GPS_clust_cent.csv")
#Split dataframe so that every bird is separate from the others
birds = split(GPS,GPS$bird_id)
centroid = birds[[1]] %>% select("clust_val","cent_long", "cent_lat") %>% distinct()
# Plot circles
for (i in 66:1){
xy <- SpatialPointsDataFrame(birds[[i]][2:3], data.frame(ID=seq(1:nrow(birds[[i]]))),
proj4string=CRS("+proj=longlat +ellps=WGS84 +datum=WGS84"))
centroid = birds[[i]] %>% select("clust_val","cent_long", "cent_lat") %>% distinct()
centroid = centroid[c("cent_long", "cent_lat", "clust_val")]
circles <- circles(centroid, d=200000, lonlat=T)
plot(circles@polygons, axes=T, main=i)
plot(xy, col=rainbow(nrow(centroid))[birds[[i]]$clust_val], add=T, main=i)
}
| /graphs.R | no_license | vincelessard/long_billed_curlew_dispersal_patterns | R | false | false | 1,030 | r | library(sf)
library(sp)
library(rgeos)
library(rgdal)
library(raster)
library(mapview)
library(spocc)
library(scrubr)
library(dplyr)
library(doParallel)
library(ggplot2)
library(geosphere)
library(dismo)
library(tidyr)
GPS = read.csv("~/Documents/Maรฎtrise/E2018/Summer_school/Project/GPS_clust_cent.csv")
#Split dataframe so that every bird is separate from the others
birds = split(GPS,GPS$bird_id)
centroid = birds[[1]] %>% select("clust_val","cent_long", "cent_lat") %>% distinct()
# Plot circles
for (i in 66:1){
xy <- SpatialPointsDataFrame(birds[[i]][2:3], data.frame(ID=seq(1:nrow(birds[[i]]))),
proj4string=CRS("+proj=longlat +ellps=WGS84 +datum=WGS84"))
centroid = birds[[i]] %>% select("clust_val","cent_long", "cent_lat") %>% distinct()
centroid = centroid[c("cent_long", "cent_lat", "clust_val")]
circles <- circles(centroid, d=200000, lonlat=T)
plot(circles@polygons, axes=T, main=i)
plot(xy, col=rainbow(nrow(centroid))[birds[[i]]$clust_val], add=T, main=i)
}
|
## ------------------------------------------------------------------------
library(powers)
## ------------------------------------------------------------------------
square(1:10)
cube(1:10)
reciprocal(1:10)
## ------------------------------------------------------------------------
my_list <- list(1:10, 0.5, -0.7)
## So base-R-boring!
lapply(my_list, function(x) x^2)
## Use powers instead!
lapply(my_list, square)
## ------------------------------------------------------------------------
reciprocal(1:10, plot_it=TRUE)
## ------------------------------------------------------------------------
bctrans(1:10,2)
## ------------------------------------------------------------------------
bctrans(1:10,1)
## ------------------------------------------------------------------------
bctrans(1:10,0)
## ------------------------------------------------------------------------
bctrans_inv(-10:10,1)
## ------------------------------------------------------------------------
bctrans_inv(-10:10,0)
| /powers.Rcheck/powers/doc/using_powers.R | no_license | STAT545-UBC-hw-2018-19/hw07-garyzhubc | R | false | false | 1,007 | r | ## ------------------------------------------------------------------------
library(powers)
## ------------------------------------------------------------------------
square(1:10)
cube(1:10)
reciprocal(1:10)
## ------------------------------------------------------------------------
my_list <- list(1:10, 0.5, -0.7)
## So base-R-boring!
lapply(my_list, function(x) x^2)
## Use powers instead!
lapply(my_list, square)
## ------------------------------------------------------------------------
reciprocal(1:10, plot_it=TRUE)
## ------------------------------------------------------------------------
bctrans(1:10,2)
## ------------------------------------------------------------------------
bctrans(1:10,1)
## ------------------------------------------------------------------------
bctrans(1:10,0)
## ------------------------------------------------------------------------
bctrans_inv(-10:10,1)
## ------------------------------------------------------------------------
bctrans_inv(-10:10,0)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/enrich.R
\name{hyper_enrich}
\alias{hyper_enrich}
\title{Perform hypergeometic enrichment test on a set of genes}
\usage{
hyper_enrich(gids, tgrp)
}
\description{
Perform hypergeometic enrichment test on a set of genes
}
| /man/hyper_enrich.Rd | permissive | orionzhou/rmaize | R | false | true | 299 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/enrich.R
\name{hyper_enrich}
\alias{hyper_enrich}
\title{Perform hypergeometic enrichment test on a set of genes}
\usage{
hyper_enrich(gids, tgrp)
}
\description{
Perform hypergeometic enrichment test on a set of genes
}
|
#' @template dbispec-sub
#' @format NULL
#' @section Driver:
#' \subsection{Construction}{
spec_driver_constructor <- list(
constructor = function(ctx) {
pkg_name <- package_name(ctx)
#' The backend must support creation of an instance of this driver class
#' with a \dfn{constructor function}.
#' By default, its name is the package name without the leading \sQuote{R}
#' (if it exists), e.g., \code{SQLite} for the \pkg{RSQLite} package.
default_constructor_name <- gsub("^R", "", pkg_name)
#' For the automated tests, the constructor name can be tweaked using the
#' \code{constructor_name} tweak.
constructor_name <- ctx$tweaks$constructor_name %||% default_constructor_name
#'
#' The constructor must be exported, and
pkg_env <- getNamespace(pkg_name)
eval(bquote(
expect_true(.(constructor_name) %in% getNamespaceExports(pkg_env))))
#' it must be a function
eval(bquote(
expect_true(exists(.(constructor_name), mode = "function", pkg_env))))
constructor <- get(constructor_name, mode = "function", pkg_env)
#' that is callable without arguments.
#' For the automated tests, unless the
#' \code{constructor_relax_args} tweak is set to \code{TRUE},
if (!isTRUE(ctx$tweaks$constructor_relax_args)) {
#' an empty argument list is expected.
expect_that(constructor, arglist_is_empty())
} else {
#' Otherwise, an argument list where all arguments have default values
#' is also accepted.
expect_that(constructor, all_args_have_default_values())
}
#'
},
#' }
NULL
)
| /R/spec-driver-constructor.R | no_license | jimhester/DBItest | R | false | false | 1,613 | r | #' @template dbispec-sub
#' @format NULL
#' @section Driver:
#' \subsection{Construction}{
spec_driver_constructor <- list(
constructor = function(ctx) {
pkg_name <- package_name(ctx)
#' The backend must support creation of an instance of this driver class
#' with a \dfn{constructor function}.
#' By default, its name is the package name without the leading \sQuote{R}
#' (if it exists), e.g., \code{SQLite} for the \pkg{RSQLite} package.
default_constructor_name <- gsub("^R", "", pkg_name)
#' For the automated tests, the constructor name can be tweaked using the
#' \code{constructor_name} tweak.
constructor_name <- ctx$tweaks$constructor_name %||% default_constructor_name
#'
#' The constructor must be exported, and
pkg_env <- getNamespace(pkg_name)
eval(bquote(
expect_true(.(constructor_name) %in% getNamespaceExports(pkg_env))))
#' it must be a function
eval(bquote(
expect_true(exists(.(constructor_name), mode = "function", pkg_env))))
constructor <- get(constructor_name, mode = "function", pkg_env)
#' that is callable without arguments.
#' For the automated tests, unless the
#' \code{constructor_relax_args} tweak is set to \code{TRUE},
if (!isTRUE(ctx$tweaks$constructor_relax_args)) {
#' an empty argument list is expected.
expect_that(constructor, arglist_is_empty())
} else {
#' Otherwise, an argument list where all arguments have default values
#' is also accepted.
expect_that(constructor, all_args_have_default_values())
}
#'
},
#' }
NULL
)
|
#' @name humanSexDEedgeR
#' @title edgeR object for DE genes betwen Male and Females
#' @description edgeR object for DE genes betwen Male and Females
#' @docType data
#' @usage humanSexDEedgeR
#' @format edgeR object
#' @source gEUvadis
#' @author Lorena Pantano, 2014-05-31
NULL | /R/humanSexDEedgeR-data.R | no_license | hjanime/DEGreport | R | false | false | 280 | r | #' @name humanSexDEedgeR
#' @title edgeR object for DE genes betwen Male and Females
#' @description edgeR object for DE genes betwen Male and Females
#' @docType data
#' @usage humanSexDEedgeR
#' @format edgeR object
#' @source gEUvadis
#' @author Lorena Pantano, 2014-05-31
NULL |
# Functions used in the Settlement Vegetation analysis:
# The Morisita density estimator:
morisita <- function(processed.data, correction.factor = NULL, veil=FALSE) {
# Function to calculate stem density using the morista function. The input
# is 'processed.data', which should be the file 'used.data'. 'correction
# factor' is the modified Cottam Correction factor determined in
# 'load.estimate.correction.R' using a generalized linear model (with a Gamma
# distribution).
azim <- processed.data@data[,c('az1', 'az2', 'az3', 'az4')]
diam <- processed.data@data[,c('diam1', 'diam2', 'diam3', 'diam4')]
dist <- processed.data@data[,c('dist1', 'dist2', 'dist3', 'dist4')]
spec <- processed.data@data[,c('species1', 'species2', 'species3', 'species4')]
if(veil){
diam[diam < 8] <- NA
}
m.diam <- diam * 2.54 / 100
dist <- floor(apply(dist, 2, function(x)as.numeric(as.character(x))))
azim <- floor(apply(azim, 2, function(x)as.numeric(as.character(x))))
# This tells us how many quadrats are used. I'd prefer to use all points
# where samples are drawn from two quadrats, but in some cases it seems that
# there are NAs in the data.
# The current method eliminates 14,372 across the region.
# If a point has recorded azimuths we state that they must be in two different
# quadrats:
two.quads <- apply(azim[,1:2], 1, function(x) sum(!is.na(unique(floor(x/90)))))
# There are 10,155 points for which the first two trees were sampled in the
# same quadrat. In general these are randomly distributed, but interestingly
# there's a big clump of them in Wisconsin. Even so, there are lots of other
# points around. We can accept that these points are categorically wrong.
#
# sum((two.quads == 1 & !(is.na(azim[,1]) | is.na(azim[,2]))))
# There are 16,197 points with an NA for the azimuth, but with two recorded
# distances, these are pretty much all in Michigan. These are the ones
# we need to change:
two.quads[((two.quads < 2 & (is.na(azim[,1]) | is.na(azim[,2]))) &
!(is.na(dist[,1]) | is.na(dist[,2])))] <- 2
# Exclusions include:
# Plots with a tree as plot center:
two.quads[dist[,1] == 0] <- 0
# Plots where one of the trees has no measured diameter:
two.quads[is.na(diam[,1]) | is.na(diam[,2])] <- 0
# Plots where a distance to tree is missing:
two.quads[is.na(dist[,1]) | is.na(dist[,2])] <- 0
# This is the same as k in Charlie's spreadhseet:
q <- two.quads
# Tree dist is measured in links in the dataset, I am converting to
# meters and adding one half a dimater (in cm), on Charlie's advice.
m.dist <- dist * 0.201168 + 0.5 * m.diam
# rsum is the sum of the squared radii, in cases where there are two trees in
# the same quadrant I'm going to drop the site, as I will with any corner
# with only one tree since the morista density estimator can't calculate
# density with less than two trees, and requires two quadrats.
# I'm going to let the NAs stand in this instance.
rsum <- rowSums((m.dist[,1:2])^2, na.rm=T)
# A set of conditions to be met for the rsum to be valid:
rsum[rowSums(is.na(m.dist[,1:2])) == 2 | q < 2 | rsum == 0 | rowSums(m.dist[,1:2], na.rm=T) < 0.6035] <- NA
# From the formula,
# lambda = kappa * theta * (q - 1)/(pi * n) * (q / sum_(1:q)(r^2))
# here, n is equal to 1.
# units are in stems / m^2
morisita.est <- ((q - 1) / (pi * 1)) * (2 / rsum) *
correction.factor$kappa * correction.factor$theta * correction.factor$zeta * correction.factor$phi
morisita.est[q < 2] <- NA
# Now they're in stems / ha
morisita.est <- morisita.est * 10000
# Basal area is the average diameter times the stem density.
# The stem density is measured in trees / ha.
met.rad <- (diam / 2) * 2.54 / 100
basal.area <- morisita.est * rowSums(pi * met.rad^2, na.rm=TRUE)
basal.area[ q < 2 ] <- NA
return(list(morisita.est, basal.area))
}
| /R/deprecated/simons_misc_fun.r | no_license | Kah5/bimodality | R | false | false | 4,134 | r | # Functions used in the Settlement Vegetation analysis:
# The Morisita density estimator:
morisita <- function(processed.data, correction.factor = NULL, veil=FALSE) {
# Function to calculate stem density using the morista function. The input
# is 'processed.data', which should be the file 'used.data'. 'correction
# factor' is the modified Cottam Correction factor determined in
# 'load.estimate.correction.R' using a generalized linear model (with a Gamma
# distribution).
azim <- processed.data@data[,c('az1', 'az2', 'az3', 'az4')]
diam <- processed.data@data[,c('diam1', 'diam2', 'diam3', 'diam4')]
dist <- processed.data@data[,c('dist1', 'dist2', 'dist3', 'dist4')]
spec <- processed.data@data[,c('species1', 'species2', 'species3', 'species4')]
if(veil){
diam[diam < 8] <- NA
}
m.diam <- diam * 2.54 / 100
dist <- floor(apply(dist, 2, function(x)as.numeric(as.character(x))))
azim <- floor(apply(azim, 2, function(x)as.numeric(as.character(x))))
# This tells us how many quadrats are used. I'd prefer to use all points
# where samples are drawn from two quadrats, but in some cases it seems that
# there are NAs in the data.
# The current method eliminates 14,372 across the region.
# If a point has recorded azimuths we state that they must be in two different
# quadrats:
two.quads <- apply(azim[,1:2], 1, function(x) sum(!is.na(unique(floor(x/90)))))
# There are 10,155 points for which the first two trees were sampled in the
# same quadrat. In general these are randomly distributed, but interestingly
# there's a big clump of them in Wisconsin. Even so, there are lots of other
# points around. We can accept that these points are categorically wrong.
#
# sum((two.quads == 1 & !(is.na(azim[,1]) | is.na(azim[,2]))))
# There are 16,197 points with an NA for the azimuth, but with two recorded
# distances, these are pretty much all in Michigan. These are the ones
# we need to change:
two.quads[((two.quads < 2 & (is.na(azim[,1]) | is.na(azim[,2]))) &
!(is.na(dist[,1]) | is.na(dist[,2])))] <- 2
# Exclusions include:
# Plots with a tree as plot center:
two.quads[dist[,1] == 0] <- 0
# Plots where one of the trees has no measured diameter:
two.quads[is.na(diam[,1]) | is.na(diam[,2])] <- 0
# Plots where a distance to tree is missing:
two.quads[is.na(dist[,1]) | is.na(dist[,2])] <- 0
# This is the same as k in Charlie's spreadhseet:
q <- two.quads
# Tree dist is measured in links in the dataset, I am converting to
# meters and adding one half a dimater (in cm), on Charlie's advice.
m.dist <- dist * 0.201168 + 0.5 * m.diam
# rsum is the sum of the squared radii, in cases where there are two trees in
# the same quadrant I'm going to drop the site, as I will with any corner
# with only one tree since the morista density estimator can't calculate
# density with less than two trees, and requires two quadrats.
# I'm going to let the NAs stand in this instance.
rsum <- rowSums((m.dist[,1:2])^2, na.rm=T)
# A set of conditions to be met for the rsum to be valid:
rsum[rowSums(is.na(m.dist[,1:2])) == 2 | q < 2 | rsum == 0 | rowSums(m.dist[,1:2], na.rm=T) < 0.6035] <- NA
# From the formula,
# lambda = kappa * theta * (q - 1)/(pi * n) * (q / sum_(1:q)(r^2))
# here, n is equal to 1.
# units are in stems / m^2
morisita.est <- ((q - 1) / (pi * 1)) * (2 / rsum) *
correction.factor$kappa * correction.factor$theta * correction.factor$zeta * correction.factor$phi
morisita.est[q < 2] <- NA
# Now they're in stems / ha
morisita.est <- morisita.est * 10000
# Basal area is the average diameter times the stem density.
# The stem density is measured in trees / ha.
met.rad <- (diam / 2) * 2.54 / 100
basal.area <- morisita.est * rowSums(pi * met.rad^2, na.rm=TRUE)
basal.area[ q < 2 ] <- NA
return(list(morisita.est, basal.area))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/commandContexts.R, R/commandContextsDoc.R
\name{getWindowHandles}
\alias{getWindowHandles}
\title{Get all window handles.}
\usage{
getWindowHandles(remDr, ...)
}
\arguments{
\item{remDr}{An object of class "rDriver". A remote driver object see
\code{\link{remoteDr}}.}
\item{...}{Additonal function arguments - Currently passes the
\code{\link{retry}} argument.}
}
\value{
Returns a list of windows handles. Each element of the list is a
string. The order window handles are returned is arbitrary.
}
\description{
\code{getWindowHandles} Retrieve the list of all window handles available
to the session.
}
\examples{
\dontrun{
remDr <- remoteDr()
remDr \%>\% getWindowHandle() # The current window handle
remDr \%>\% getWindowHandles() # All windows in the session
# Get the window position
remDr \%>\% getWindowPosition
# Some browsers are still using the old JSON wire end points
remDr \%>\% getWindowPositionOld
# Get the size of the window
remDr \%>\% getWindowSize
# Some browsers are still using the old JSON wire end points
# remDr \%>\% getWindowSizeOld
# Set the window size
remDr \%>\% setWindowSize(500, 500)
# Some browsers are still using the old JSON wire end points
remDr \%>\% setWindowSizeOld(500, 500)
# Set the position of the window
remDr \%>\% setWindowPositionOld(400, 100)
# Some browsers are still using the old JSON wire end points
# remDr \%>\% setWindowPositionOld(400, 100)
# Maximise the window
remDr \%>\% maximizeWindow
# Some browsers are still using the old JSON wire end points
# remDr \%>\% maximizeWindowold()
remDr \%>\% go("http://www.google.com/ncr")
# search for the "R project"
remDr \%>\% findElement("name", "q") \%>\%
elementSendKeys("R project", key = "enter")
webElem <- remDr \%>\% findElement("css", "h3.r a")
remDr \%>\% deleteSession
}
}
\seealso{
Other commandContexts functions: \code{\link{closeWindow}},
\code{\link{fullscreenWindow}},
\code{\link{getWindowHandle}},
\code{\link{getWindowPosition}},
\code{\link{getWindowSize}},
\code{\link{maximizeWindow}},
\code{\link{setWindowPosition}},
\code{\link{setWindowSize}}, \code{\link{switchToFrame}},
\code{\link{switchToParentFrame}},
\code{\link{switchToWindow}}
}
| /man/getWindowHandles.Rd | no_license | johndharrison/seleniumPipes | R | false | true | 2,347 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/commandContexts.R, R/commandContextsDoc.R
\name{getWindowHandles}
\alias{getWindowHandles}
\title{Get all window handles.}
\usage{
getWindowHandles(remDr, ...)
}
\arguments{
\item{remDr}{An object of class "rDriver". A remote driver object see
\code{\link{remoteDr}}.}
\item{...}{Additonal function arguments - Currently passes the
\code{\link{retry}} argument.}
}
\value{
Returns a list of windows handles. Each element of the list is a
string. The order window handles are returned is arbitrary.
}
\description{
\code{getWindowHandles} Retrieve the list of all window handles available
to the session.
}
\examples{
\dontrun{
remDr <- remoteDr()
remDr \%>\% getWindowHandle() # The current window handle
remDr \%>\% getWindowHandles() # All windows in the session
# Get the window position
remDr \%>\% getWindowPosition
# Some browsers are still using the old JSON wire end points
remDr \%>\% getWindowPositionOld
# Get the size of the window
remDr \%>\% getWindowSize
# Some browsers are still using the old JSON wire end points
# remDr \%>\% getWindowSizeOld
# Set the window size
remDr \%>\% setWindowSize(500, 500)
# Some browsers are still using the old JSON wire end points
remDr \%>\% setWindowSizeOld(500, 500)
# Set the position of the window
remDr \%>\% setWindowPositionOld(400, 100)
# Some browsers are still using the old JSON wire end points
# remDr \%>\% setWindowPositionOld(400, 100)
# Maximise the window
remDr \%>\% maximizeWindow
# Some browsers are still using the old JSON wire end points
# remDr \%>\% maximizeWindowold()
remDr \%>\% go("http://www.google.com/ncr")
# search for the "R project"
remDr \%>\% findElement("name", "q") \%>\%
elementSendKeys("R project", key = "enter")
webElem <- remDr \%>\% findElement("css", "h3.r a")
remDr \%>\% deleteSession
}
}
\seealso{
Other commandContexts functions: \code{\link{closeWindow}},
\code{\link{fullscreenWindow}},
\code{\link{getWindowHandle}},
\code{\link{getWindowPosition}},
\code{\link{getWindowSize}},
\code{\link{maximizeWindow}},
\code{\link{setWindowPosition}},
\code{\link{setWindowSize}}, \code{\link{switchToFrame}},
\code{\link{switchToParentFrame}},
\code{\link{switchToWindow}}
}
|
library(stylo)
library(stringi)
library(sqldf)
library(reshape)
library(fastmatch)
## Function to load N-grams from a specific source that were saved with SaveSource
LoadSource <- function(directory,sourcename,MinCount=2) {
setwd(directory)
freq = list()
for (i in 1:4) {
handle = file(paste0(sourcename,i,'gram'),"rb")
load(handle)
freq[[i]] = unserialize(get(paste0(sourcename,i,'gram')))
}
rm(handle)
if (MinCount > 1) {
for (i in 1:4) {
freq[[i]] = TopNGramFreq(freq[[i]],MinCount)
}
}
freq
}
## Function to save N-grams of a specific source and passed as argument (Freqs)
SaveSource <- function(directory,sourcename,Freqs) {
setwd(directory)
for (i in 1:4) {
handle = file(paste0(sourcename,i,'gram'),"wb")
assign(paste0(sourcename,i,'gram'),serialize(Freqs[[i]], NULL))
save(list=paste0(sourcename,i,'gram'),file=handle)
}
close(handle)
rm(handle)
}
## Function to calculate and save N-Gram of a specific size
## averaged over the three sources
SaveMeanNGram <- function(directory,NGramSize,MinCount) {
freq = LoadNGrams(directory,NGramSize,MinCount)
blogf = data.frame(ngram=names(freq[[1]]),freq1=freq[[1]])
newsf = data.frame(ngram=names(freq[[2]]),freq2=freq[[2]])
twitf = data.frame(ngram=names(freq[[3]]),freq3=freq[[3]])
meanf = merge(merge(blogf,newsf,by='ngram',all=TRUE),twitf,by='ngram',all=TRUE)
meanf$freq1[which(is.na(meanf$freq1))]=0
meanf$freq2[which(is.na(meanf$freq2))]=0
meanf$freq3[which(is.na(meanf$freq3))]=0
meanf = transform(meanf, freq=(freq1+freq2+freq3)/3)
ngrams = meanf[,'ngram']
meanf = meanf[,'freq']
names(meanf) = ngrams
setwd(directory)
handle = file(paste0('mean',NGramSize,'gram'),"wb")
assign(paste0('mean',NGramSize,'gram'),serialize(meanf, NULL))
save(list=paste0('mean',NGramSize,'gram'),file=handle)
close(handle)
rm(handle)
}
## Function to load N-grams of a specific size from all sources
LoadNGrams <- function(directory,NGramSize,MinCount=2) {
setwd(directory)
freq = list()
sources = c('blog','news','twit')
for (i in 1:3) {
handle = file(paste0(sources[i],NGramSize,'gram'),"rb")
load(handle)
freq[[i]] = unserialize(get(paste0(sources[i],NGramSize,'gram')))
}
rm(handle)
if (MinCount > 1) {
for (i in 1:3) {
freq[[i]] = TopNGramFreq(freq[[i]],MinCount)
}
}
freq
}
## Function to load a text file to be tokenized
LoadTextFile <- function(directory, filename) {
myfile = file(paste0(directory,'/',filename), open="rb")
textfile = readLines(myfile, encoding="UTF-8",skipNul=TRUE)
textfile = iconv(textfile, from="UTF-8", to="latin1", sub=" ")
close(myfile)
rm(myfile)
textfile
}
## Function to tokenize a text file
Tokenizer <- function(textfile) {
my.text = textfile
# To lower case
my.text = tolower(my.text)
# Removes numbers and special characters (except for the ' as we intend to keep contractions)
my.text.eos = stri_replace_all(my.text, '', regex='[0-9]+|[+"()@#$%^&*_=|/<>]+|-')
# Substitutes punctuation (.!?) for end of sentence (</s>) followed by begin of sentence <s>
# and pastes a begin of sentence at the very beginning of the text.
# This way we can keep avoid cross-sentence Ngrams.
# Ngrams containing any of (</s>, <s>) will be removed later.
my.text.eos = paste('<s>', stri_replace_all(my.text.eos, ' </s> <s>', regex='[.?!]'))
# Tokenizes the text splitting by spaces, commas, colons, semicolons, tabs and newlines.
my.text.tokenized = txt.to.words(my.text.eos, splitting.rule = "([a-z]+_)|[ ,;:\n\t]")
# Removes temporary objects from memory
rm(my.text)
rm(my.text.eos)
# Returns tokenized text
my.text.tokenized
}
## Function to generate a table of frequencies given a tokenized file and a N-gram size
NGramFreq <- function(tokenized,NGramSize=1,MinCount=2) {
# Generates Ngrams from the tokenized text
if (NGramSize > 1) {
my.NGrams = txt.to.features(tokenized, ngram.size=NGramSize)
} else {
my.NGrams = tokenized
}
# According to the size, make sure the sentences do not begin nor end with '.
expr = paste0("^([a-z]+([a-z]+|('+[a-z]+)){0,1}[ ]{0,1}){1,",NGramSize,"}$")
my.NGrams = my.NGrams[grep(expr,my.NGrams)]
# Generates the complete list of unique Ngrams
complete.Ngrams.list = names(sort(table(unlist(my.NGrams)),
decreasing = TRUE))
# Calculates Ngrams frequencies
Ngram.freq = make.table.of.frequencies(my.NGrams, complete.Ngrams.list)
# Assumes minimum frequency = 1 occurrence
# Keeps only frequency corresponding to MinCount
top.Ngram.freq = Ngram.freq
if (MinCount > 1) {
top.Ngram.freq = TopNGramFreq(Ngram.freq,MinCount)
}
# Removes temporary objects from memory
rm(my.NGrams)
rm(complete.Ngrams.list)
# Returns frequencies
top.Ngram.freq
}
## Function to filter out rare frequencies, assuming the mininum frequency equals a count of one
TopNGramFreq <- function(NGramFreq,MinCount=2) {
top.Ngram.freq = head(sort(NGramFreq,decreasing=TRUE),sum(NGramFreq>(MinCount-1)*min(NGramFreq)))
top.Ngram.freq
}
## Function to generate a prediction table from a table of frequencies and a given number of options to keep
PredictionTable <- function(NGramFreq,QtOptions=5,HashTable=FALSE) {
# Calculates Ngram size
NGramSize = length(unlist((stri_split_fixed(names(NGramFreq)[1]," "))))
# Splits NGrams into a continuous list
pairs = unlist(stri_split_fixed(names(NGramFreq)," "))
# According to size N, generates a DF with N columns and order by descendent frequency
# For N > 2, concatenates (N-1) first words into "first" column to be used as predictor
if (NGramSize == 2) {
pfreq = data.frame(first=pairs[seq(1,length(pairs)-1,2)],second=pairs[seq(2,length(pairs),2)],freq=as.vector(NGramFreq))
pairs.ord = sqldf("select * from pfreq order by first asc, freq desc")
tot.pairs = pairs.ord
} else if (NGramSize == 3) {
pfreq = data.frame(first=pairs[seq(1,length(pairs)-2,3)],second=pairs[seq(2,length(pairs)-1,3)],third=pairs[seq(3,length(pairs),3)],freq=as.vector(NGramFreq))
pairs.ord = sqldf("select * from pfreq order by first asc, second asc, freq desc")
tot.pairs = data.frame(first=paste(pairs.ord$first,pairs.ord$second),second=pairs.ord$third,freq=pairs.ord$freq)
} else if (NGramSize == 4) {
pfreq = data.frame(first=pairs[seq(1,length(pairs)-3,4)],second=pairs[seq(2,length(pairs)-2,4)],third=pairs[seq(3,length(pairs)-1,4)],fourth=pairs[seq(4,length(pairs),4)],freq=as.vector(NGramFreq))
pairs.ord = sqldf("select * from pfreq order by first asc, second asc, third asc, freq desc")
tot.pairs = data.frame(first=paste(pairs.ord$first,pairs.ord$second,pairs.ord$third),second=pairs.ord$fourth,freq=pairs.ord$freq)
} else {
return(NULL)
}
# Calculates total frequencies for each unique occurrences of (N-1) words (column "first")
tot.pairs = transform(tot.pairs, tot.freq = ave(freq, first, FUN=sum))
# Calculates cumulative frequency of the unique predictors and sort in descending order
tot.pairs2 = transform(tot.pairs, first = as.character(first))
tot.pairs2 = tot.pairs2[,c('first','tot.freq')]
tot.pairs2 = unique(tot.pairs2)
sum.freq = sum(tot.pairs2$tot.freq)
tot.pairs2 = transform(tot.pairs2, tot.freq = tot.freq/sum.freq)
tot.pairs2 = tot.pairs2[order(-tot.pairs2$tot.freq),]
# Obtains predictor's probability at the level of 99.5% probability mass
min.prob = tot.pairs2$tot.freq[min(which(cumsum(tot.pairs2$tot.freq)>.995))]
# Considers only predictors with probabilities above this threshold
tot.pairs2 = tot.pairs2[tot.pairs2$tot.freq>=min.prob,]
# Subsets the predictors
tot.pairs = tot.pairs[tot.pairs$first%in%tot.pairs2$first,]
# Recalculates cumulative frequency of predicted words within a same predictor
tot.pairs = transform(tot.pairs, cumsum = ave(freq/tot.freq, first, FUN = cumsum))
# For predictors with a large number of predicted words,
# keeps only those within the 95% probability mass OR
# those with probability greater than 5%.
tot.pairs = tot.pairs[(tot.pairs$cumsum<.95)|(tot.pairs$cumsum==1.0&(tot.pairs$freq/tot.pairs$tot.freq)>.05),]
# Creates a DF with predictors and predicted words
Ngram.pred = data.frame(first=tot.pairs$first,pred=tot.pairs$second,freq=tot.pairs$freq)
# Adds a column full of 1's in order to compute a ranking of the predicted words
# within a given predictor
Ngram.pred = transform(cbind(Ngram.pred,ones=1), count = ave(ones, first, FUN=cumsum))
# Filters out options above the specified threshold
Ngram.pred = Ngram.pred[Ngram.pred$count<=QtOptions,]
# Concatenates the ranking after the predictor in order to disambiguate
Ngram.pred = data.frame(first=paste0(as.character(Ngram.pred$first),'.',Ngram.pred$count),pred=as.character(Ngram.pred$pred),freq=Ngram.pred$freq)
# Removes temporary objects from memory
rm(pairs)
rm(pairs.ord)
rm(pfreq)
rm(tot.pairs)
# Generates a hash table of the pairs (predictors, predicted)
if (HashTable) {
Ngram.hash = hash(Ngram.pred[,1],Ngram.pred[,2])
Ngram.hash
} else {
Ngram.pred
}
}
## Function to actually make a prediction
Prediction <- function(predictor) {
require(stringi)
require(fastmatch)
# defines a subfunction to trim leading and trailing spaces
trim <- function( x ) {
gsub("(^[[:space:]]+|[[:space:]]+$)", "", x)
}
# convert predictor to lower case
predictor = tolower(predictor)
# if the user types punctuation, separate sentences in order to avoid cross-sentence N-grams
last.end = stri_locate_last(predictor,regex='[.?!]')[1]
if (is.na(last.end)) last.end = 0
if (last.end > 0) predictor = trim(substr(predictor,start=last.end+1,stop=nchar(predictor)))
# removes special characters and numbers
predictor = stri_replace_all(predictor, '', regex='[0-9]+|[+"()@#$%^&*_=|/<>]+|-')
# removes punctuation
predictor = stri_replace_all(predictor, ' ', regex='[,;:\n\t]')
# check if the user has typed a space...
if (stri_sub(predictor,nchar(predictor)) == ' ' | predictor == '') {
predictor = trim(predictor)
} else {
# in case it is still typing a word, checks for the most likely word being typed
wordpart = unlist(stri_split_fixed(predictor," "))
wordpart = wordpart[length(wordpart)]
wordset = unigram[grep(paste0("^",wordpart,".*"),unigram$word),]
word = as.character(wordset[which.max(wordset$freq),'word'])
if (!length(word)) word = ""
return(stri_sub(word,nchar(wordpart)+1))
}
# split the input into words
words = unlist(stri_split_fixed(predictor," "))
NGram = length(words)
# if there are more than three words...
if (NGram > 3) {
# keeps only the last typed 3-gram
predictor = paste(words[(NGram-2):NGram],collapse=" ")
words = words[(NGram-2):NGram]
NGram = 3
}
# since we are interested in the most likely word to follow, we have to check for a N-gram one size bigger
NGram = NGram + 1
next.word = NULL
# simple back-off strategy: if there is no N-gram of size 4, steps down one size at a time
# it steps down also if there are not enough suggestions
for (i in seq(NGram,1,by=-1)) {
if (i > 1) {
next.word = unique(c(next.word,as.character(pred[[i]][fmatch(paste0(predictor,".",seq(1:5)),pred[[i]]$first),'pred'])))
next.word = next.word[!is.na(next.word)]
} else {
# if gets to unigrams, just add the top 5 words
next.word = unique(c(next.word, pred[[1]][1:5]))
}
# filters out profanity as suggestion
next.word = setdiff(next.word,profanityWords)
# if there are more than 5 words suggested, keeps only the top 5
if (length(next.word) >= 5) {
return(next.word[1:5])
}
if (i > 2) {
predictor = paste(words[(NGram-i+2):(NGram-1)],collapse=" ")
}
}
}
###########################################################################################
## Function calls in order to build the unigram and pred[[]] variables which are used by ##
## the Prediction function in the .RData stored in ShinyApps ##
###########################################################################################
source.dir = "./Data/en_US"
savedir = "./"
blog = LoadTextFile(source.dir,'en_US.blogs.txt')
tokenized = Tokenizer(blog)
freq = list()
for (i in 1:4) {
freq[[i]] = NGramFreq(tokenized,i,1)
}
SaveSource(savedir,'blog',freq)
news = LoadTextFile(source.dir,'en_US.news.txt')
tokenized = Tokenizer(news)
freq = list()
for (i in 1:4) {
freq[[i]] = NGramFreq(tokenized,i,1)
}
SaveSource(savedir,'news',freq)
twit = LoadTextFile(source.dir,'en_US.twitter.txt')
tokenized = Tokenizer(twit)
freq = list()
for (i in 1:4) {
freq[[i]] = NGramFreq(tokenized,i,1)
}
SaveSource(savedir,'twit',freq)
for (i in 1:4) {
SaveMeanNGram(savedir,i,2)
}
profanityWords = LoadTextFile(savedir,"ProfanityWords.txt")
meanGrams = LoadSource(savedir,'mean',3)
unigram = data.frame(word=names(meanGrams[[1]]),freq=meanGrams[[1]])
pred = list()
pred[[1]] = as.character(unigram$word[order(-unigram$freq)])[1:5]
for (i in 2:4) {
pred[[i]] = PredictionTable(meanGrams[[i]],5,FALSE)
pred[[i]]=data.frame(first=as.character(pred[[i]]$first),pred=as.character(pred[[i]]$pred))
} | /Word Predictor App/PredictionSource.R | no_license | tenglongli/WordPredictor | R | false | false | 13,307 | r | library(stylo)
library(stringi)
library(sqldf)
library(reshape)
library(fastmatch)
## Function to load N-grams from a specific source that were saved with SaveSource
LoadSource <- function(directory,sourcename,MinCount=2) {
setwd(directory)
freq = list()
for (i in 1:4) {
handle = file(paste0(sourcename,i,'gram'),"rb")
load(handle)
freq[[i]] = unserialize(get(paste0(sourcename,i,'gram')))
}
rm(handle)
if (MinCount > 1) {
for (i in 1:4) {
freq[[i]] = TopNGramFreq(freq[[i]],MinCount)
}
}
freq
}
## Function to save N-grams of a specific source and passed as argument (Freqs)
SaveSource <- function(directory,sourcename,Freqs) {
setwd(directory)
for (i in 1:4) {
handle = file(paste0(sourcename,i,'gram'),"wb")
assign(paste0(sourcename,i,'gram'),serialize(Freqs[[i]], NULL))
save(list=paste0(sourcename,i,'gram'),file=handle)
}
close(handle)
rm(handle)
}
## Function to calculate and save N-Gram of a specific size
## averaged over the three sources
SaveMeanNGram <- function(directory,NGramSize,MinCount) {
freq = LoadNGrams(directory,NGramSize,MinCount)
blogf = data.frame(ngram=names(freq[[1]]),freq1=freq[[1]])
newsf = data.frame(ngram=names(freq[[2]]),freq2=freq[[2]])
twitf = data.frame(ngram=names(freq[[3]]),freq3=freq[[3]])
meanf = merge(merge(blogf,newsf,by='ngram',all=TRUE),twitf,by='ngram',all=TRUE)
meanf$freq1[which(is.na(meanf$freq1))]=0
meanf$freq2[which(is.na(meanf$freq2))]=0
meanf$freq3[which(is.na(meanf$freq3))]=0
meanf = transform(meanf, freq=(freq1+freq2+freq3)/3)
ngrams = meanf[,'ngram']
meanf = meanf[,'freq']
names(meanf) = ngrams
setwd(directory)
handle = file(paste0('mean',NGramSize,'gram'),"wb")
assign(paste0('mean',NGramSize,'gram'),serialize(meanf, NULL))
save(list=paste0('mean',NGramSize,'gram'),file=handle)
close(handle)
rm(handle)
}
## Function to load N-grams of a specific size from all sources
LoadNGrams <- function(directory,NGramSize,MinCount=2) {
setwd(directory)
freq = list()
sources = c('blog','news','twit')
for (i in 1:3) {
handle = file(paste0(sources[i],NGramSize,'gram'),"rb")
load(handle)
freq[[i]] = unserialize(get(paste0(sources[i],NGramSize,'gram')))
}
rm(handle)
if (MinCount > 1) {
for (i in 1:3) {
freq[[i]] = TopNGramFreq(freq[[i]],MinCount)
}
}
freq
}
## Function to load a text file to be tokenized
LoadTextFile <- function(directory, filename) {
myfile = file(paste0(directory,'/',filename), open="rb")
textfile = readLines(myfile, encoding="UTF-8",skipNul=TRUE)
textfile = iconv(textfile, from="UTF-8", to="latin1", sub=" ")
close(myfile)
rm(myfile)
textfile
}
## Function to tokenize a text file
Tokenizer <- function(textfile) {
my.text = textfile
# To lower case
my.text = tolower(my.text)
# Removes numbers and special characters (except for the ' as we intend to keep contractions)
my.text.eos = stri_replace_all(my.text, '', regex='[0-9]+|[+"()@#$%^&*_=|/<>]+|-')
# Substitutes punctuation (.!?) for end of sentence (</s>) followed by begin of sentence <s>
# and pastes a begin of sentence at the very beginning of the text.
# This way we can keep avoid cross-sentence Ngrams.
# Ngrams containing any of (</s>, <s>) will be removed later.
my.text.eos = paste('<s>', stri_replace_all(my.text.eos, ' </s> <s>', regex='[.?!]'))
# Tokenizes the text splitting by spaces, commas, colons, semicolons, tabs and newlines.
my.text.tokenized = txt.to.words(my.text.eos, splitting.rule = "([a-z]+_)|[ ,;:\n\t]")
# Removes temporary objects from memory
rm(my.text)
rm(my.text.eos)
# Returns tokenized text
my.text.tokenized
}
## Function to generate a table of frequencies given a tokenized file and a N-gram size
NGramFreq <- function(tokenized,NGramSize=1,MinCount=2) {
# Generates Ngrams from the tokenized text
if (NGramSize > 1) {
my.NGrams = txt.to.features(tokenized, ngram.size=NGramSize)
} else {
my.NGrams = tokenized
}
# According to the size, make sure the sentences do not begin nor end with '.
expr = paste0("^([a-z]+([a-z]+|('+[a-z]+)){0,1}[ ]{0,1}){1,",NGramSize,"}$")
my.NGrams = my.NGrams[grep(expr,my.NGrams)]
# Generates the complete list of unique Ngrams
complete.Ngrams.list = names(sort(table(unlist(my.NGrams)),
decreasing = TRUE))
# Calculates Ngrams frequencies
Ngram.freq = make.table.of.frequencies(my.NGrams, complete.Ngrams.list)
# Assumes minimum frequency = 1 occurrence
# Keeps only frequency corresponding to MinCount
top.Ngram.freq = Ngram.freq
if (MinCount > 1) {
top.Ngram.freq = TopNGramFreq(Ngram.freq,MinCount)
}
# Removes temporary objects from memory
rm(my.NGrams)
rm(complete.Ngrams.list)
# Returns frequencies
top.Ngram.freq
}
## Function to filter out rare frequencies, assuming the mininum frequency equals a count of one
TopNGramFreq <- function(NGramFreq,MinCount=2) {
top.Ngram.freq = head(sort(NGramFreq,decreasing=TRUE),sum(NGramFreq>(MinCount-1)*min(NGramFreq)))
top.Ngram.freq
}
## Function to generate a prediction table from a table of frequencies and a given number of options to keep
PredictionTable <- function(NGramFreq,QtOptions=5,HashTable=FALSE) {
# Calculates Ngram size
NGramSize = length(unlist((stri_split_fixed(names(NGramFreq)[1]," "))))
# Splits NGrams into a continuous list
pairs = unlist(stri_split_fixed(names(NGramFreq)," "))
# According to size N, generates a DF with N columns and order by descendent frequency
# For N > 2, concatenates (N-1) first words into "first" column to be used as predictor
if (NGramSize == 2) {
pfreq = data.frame(first=pairs[seq(1,length(pairs)-1,2)],second=pairs[seq(2,length(pairs),2)],freq=as.vector(NGramFreq))
pairs.ord = sqldf("select * from pfreq order by first asc, freq desc")
tot.pairs = pairs.ord
} else if (NGramSize == 3) {
pfreq = data.frame(first=pairs[seq(1,length(pairs)-2,3)],second=pairs[seq(2,length(pairs)-1,3)],third=pairs[seq(3,length(pairs),3)],freq=as.vector(NGramFreq))
pairs.ord = sqldf("select * from pfreq order by first asc, second asc, freq desc")
tot.pairs = data.frame(first=paste(pairs.ord$first,pairs.ord$second),second=pairs.ord$third,freq=pairs.ord$freq)
} else if (NGramSize == 4) {
pfreq = data.frame(first=pairs[seq(1,length(pairs)-3,4)],second=pairs[seq(2,length(pairs)-2,4)],third=pairs[seq(3,length(pairs)-1,4)],fourth=pairs[seq(4,length(pairs),4)],freq=as.vector(NGramFreq))
pairs.ord = sqldf("select * from pfreq order by first asc, second asc, third asc, freq desc")
tot.pairs = data.frame(first=paste(pairs.ord$first,pairs.ord$second,pairs.ord$third),second=pairs.ord$fourth,freq=pairs.ord$freq)
} else {
return(NULL)
}
# Calculates total frequencies for each unique occurrences of (N-1) words (column "first")
tot.pairs = transform(tot.pairs, tot.freq = ave(freq, first, FUN=sum))
# Calculates cumulative frequency of the unique predictors and sort in descending order
tot.pairs2 = transform(tot.pairs, first = as.character(first))
tot.pairs2 = tot.pairs2[,c('first','tot.freq')]
tot.pairs2 = unique(tot.pairs2)
sum.freq = sum(tot.pairs2$tot.freq)
tot.pairs2 = transform(tot.pairs2, tot.freq = tot.freq/sum.freq)
tot.pairs2 = tot.pairs2[order(-tot.pairs2$tot.freq),]
# Obtains predictor's probability at the level of 99.5% probability mass
min.prob = tot.pairs2$tot.freq[min(which(cumsum(tot.pairs2$tot.freq)>.995))]
# Considers only predictors with probabilities above this threshold
tot.pairs2 = tot.pairs2[tot.pairs2$tot.freq>=min.prob,]
# Subsets the predictors
tot.pairs = tot.pairs[tot.pairs$first%in%tot.pairs2$first,]
# Recalculates cumulative frequency of predicted words within a same predictor
tot.pairs = transform(tot.pairs, cumsum = ave(freq/tot.freq, first, FUN = cumsum))
# For predictors with a large number of predicted words,
# keeps only those within the 95% probability mass OR
# those with probability greater than 5%.
tot.pairs = tot.pairs[(tot.pairs$cumsum<.95)|(tot.pairs$cumsum==1.0&(tot.pairs$freq/tot.pairs$tot.freq)>.05),]
# Creates a DF with predictors and predicted words
Ngram.pred = data.frame(first=tot.pairs$first,pred=tot.pairs$second,freq=tot.pairs$freq)
# Adds a column full of 1's in order to compute a ranking of the predicted words
# within a given predictor
Ngram.pred = transform(cbind(Ngram.pred,ones=1), count = ave(ones, first, FUN=cumsum))
# Filters out options above the specified threshold
Ngram.pred = Ngram.pred[Ngram.pred$count<=QtOptions,]
# Concatenates the ranking after the predictor in order to disambiguate
Ngram.pred = data.frame(first=paste0(as.character(Ngram.pred$first),'.',Ngram.pred$count),pred=as.character(Ngram.pred$pred),freq=Ngram.pred$freq)
# Removes temporary objects from memory
rm(pairs)
rm(pairs.ord)
rm(pfreq)
rm(tot.pairs)
# Generates a hash table of the pairs (predictors, predicted)
if (HashTable) {
Ngram.hash = hash(Ngram.pred[,1],Ngram.pred[,2])
Ngram.hash
} else {
Ngram.pred
}
}
## Function to actually make a prediction
Prediction <- function(predictor) {
require(stringi)
require(fastmatch)
# defines a subfunction to trim leading and trailing spaces
trim <- function( x ) {
gsub("(^[[:space:]]+|[[:space:]]+$)", "", x)
}
# convert predictor to lower case
predictor = tolower(predictor)
# if the user types punctuation, separate sentences in order to avoid cross-sentence N-grams
last.end = stri_locate_last(predictor,regex='[.?!]')[1]
if (is.na(last.end)) last.end = 0
if (last.end > 0) predictor = trim(substr(predictor,start=last.end+1,stop=nchar(predictor)))
# removes special characters and numbers
predictor = stri_replace_all(predictor, '', regex='[0-9]+|[+"()@#$%^&*_=|/<>]+|-')
# removes punctuation
predictor = stri_replace_all(predictor, ' ', regex='[,;:\n\t]')
# check if the user has typed a space...
if (stri_sub(predictor,nchar(predictor)) == ' ' | predictor == '') {
predictor = trim(predictor)
} else {
# in case it is still typing a word, checks for the most likely word being typed
wordpart = unlist(stri_split_fixed(predictor," "))
wordpart = wordpart[length(wordpart)]
wordset = unigram[grep(paste0("^",wordpart,".*"),unigram$word),]
word = as.character(wordset[which.max(wordset$freq),'word'])
if (!length(word)) word = ""
return(stri_sub(word,nchar(wordpart)+1))
}
# split the input into words
words = unlist(stri_split_fixed(predictor," "))
NGram = length(words)
# if there are more than three words...
if (NGram > 3) {
# keeps only the last typed 3-gram
predictor = paste(words[(NGram-2):NGram],collapse=" ")
words = words[(NGram-2):NGram]
NGram = 3
}
# since we are interested in the most likely word to follow, we have to check for a N-gram one size bigger
NGram = NGram + 1
next.word = NULL
# simple back-off strategy: if there is no N-gram of size 4, steps down one size at a time
# it steps down also if there are not enough suggestions
for (i in seq(NGram,1,by=-1)) {
if (i > 1) {
next.word = unique(c(next.word,as.character(pred[[i]][fmatch(paste0(predictor,".",seq(1:5)),pred[[i]]$first),'pred'])))
next.word = next.word[!is.na(next.word)]
} else {
# if gets to unigrams, just add the top 5 words
next.word = unique(c(next.word, pred[[1]][1:5]))
}
# filters out profanity as suggestion
next.word = setdiff(next.word,profanityWords)
# if there are more than 5 words suggested, keeps only the top 5
if (length(next.word) >= 5) {
return(next.word[1:5])
}
if (i > 2) {
predictor = paste(words[(NGram-i+2):(NGram-1)],collapse=" ")
}
}
}
###########################################################################################
## Function calls in order to build the unigram and pred[[]] variables which are used by ##
## the Prediction function in the .RData stored in ShinyApps ##
###########################################################################################
source.dir = "./Data/en_US"
savedir = "./"
blog = LoadTextFile(source.dir,'en_US.blogs.txt')
tokenized = Tokenizer(blog)
freq = list()
for (i in 1:4) {
freq[[i]] = NGramFreq(tokenized,i,1)
}
SaveSource(savedir,'blog',freq)
news = LoadTextFile(source.dir,'en_US.news.txt')
tokenized = Tokenizer(news)
freq = list()
for (i in 1:4) {
freq[[i]] = NGramFreq(tokenized,i,1)
}
SaveSource(savedir,'news',freq)
twit = LoadTextFile(source.dir,'en_US.twitter.txt')
tokenized = Tokenizer(twit)
freq = list()
for (i in 1:4) {
freq[[i]] = NGramFreq(tokenized,i,1)
}
SaveSource(savedir,'twit',freq)
for (i in 1:4) {
SaveMeanNGram(savedir,i,2)
}
profanityWords = LoadTextFile(savedir,"ProfanityWords.txt")
meanGrams = LoadSource(savedir,'mean',3)
unigram = data.frame(word=names(meanGrams[[1]]),freq=meanGrams[[1]])
pred = list()
pred[[1]] = as.character(unigram$word[order(-unigram$freq)])[1:5]
for (i in 2:4) {
pred[[i]] = PredictionTable(meanGrams[[i]],5,FALSE)
pred[[i]]=data.frame(first=as.character(pred[[i]]$first),pred=as.character(pred[[i]]$pred))
} |
library(reshape2)
library(ggplot2)
library(xts)
library(zoo)
library(TTR)
library(quantmod)
library(fArma)
getSymbols("GLD", from='2016-01-04', to='2016-08-10')
getFX("EUR/TWD", from='2016-01-04', to='2016-08-10')
getFX("GBP/TWD", from='2016-01-04', to='2016-08-10')
getFX("USD/TWD", from='2016-01-04', to='2016-08-10')
fxid = index(EURTWD)
goldid = index(GLD)
undelid = c()
for(i in 1:length(fxid))
{
for(j in 1:length(goldid))
{
if( fxid[i] == goldid[j] )
{
print(i)
print(fxid[i])
print(goldid[j])
undelid = rbind(undelid, i)
break
}
}
}
price=data.frame(goldid[undelid], EURTWD[undelid,], GBPTWD[undelid,], USDTWD[undelid,], GLD[,4])
price[,2:5] = log(price[,2:5])
names(price) = c("date", "EUR", "GBP", "USD", "GOLD")
mdf <- melt(price, id.vars="date", value.name="Price", variable.name="FX")
at = seq(1, length(price[,1]), by=10)
ggplot(data=mdf, aes(x=date, y=Price, group=FX, colour=FX)) +
geom_line() +
geom_point( size=1, shape=1, fill="white" ) +
scale_x_discrete(at, mdf$date[at]) +
xlab("Date")
EURDiff = diff( price$EUR )
EURDiff = as.ts( tail( EURDiff ) )
fit = armaFit( formula=~arma(2,2), data=EURDiff)
fit@fit$aic
as.numeric( predict( fit, n.ahead=1, doplot=F )$pred )
# regression y = b1 x1 + b2 x2 + b3 x3
train = 1:100
predict = 101:153
oneV = rep(1, length(train))
X = as.matrix( cbind(oneV, price[train,2:4]) )
Y = as.matrix( price[train, 5] )
Beta = solve(t(X) %*% X) %*% t(X) %*% Y
oneV = rep(1, length(predict))
Xpred = as.matrix( cbind(oneV, price[predict, 2:4]) )
plot(predict, Xpred%*%Beta)
lines(predict, price[predict,5]) | /Gold/ReFit.R | no_license | smallone1/R_practice | R | false | false | 1,619 | r | library(reshape2)
library(ggplot2)
library(xts)
library(zoo)
library(TTR)
library(quantmod)
library(fArma)
getSymbols("GLD", from='2016-01-04', to='2016-08-10')
getFX("EUR/TWD", from='2016-01-04', to='2016-08-10')
getFX("GBP/TWD", from='2016-01-04', to='2016-08-10')
getFX("USD/TWD", from='2016-01-04', to='2016-08-10')
fxid = index(EURTWD)
goldid = index(GLD)
undelid = c()
for(i in 1:length(fxid))
{
for(j in 1:length(goldid))
{
if( fxid[i] == goldid[j] )
{
print(i)
print(fxid[i])
print(goldid[j])
undelid = rbind(undelid, i)
break
}
}
}
price=data.frame(goldid[undelid], EURTWD[undelid,], GBPTWD[undelid,], USDTWD[undelid,], GLD[,4])
price[,2:5] = log(price[,2:5])
names(price) = c("date", "EUR", "GBP", "USD", "GOLD")
mdf <- melt(price, id.vars="date", value.name="Price", variable.name="FX")
at = seq(1, length(price[,1]), by=10)
ggplot(data=mdf, aes(x=date, y=Price, group=FX, colour=FX)) +
geom_line() +
geom_point( size=1, shape=1, fill="white" ) +
scale_x_discrete(at, mdf$date[at]) +
xlab("Date")
EURDiff = diff( price$EUR )
EURDiff = as.ts( tail( EURDiff ) )
fit = armaFit( formula=~arma(2,2), data=EURDiff)
fit@fit$aic
as.numeric( predict( fit, n.ahead=1, doplot=F )$pred )
# regression y = b1 x1 + b2 x2 + b3 x3
train = 1:100
predict = 101:153
oneV = rep(1, length(train))
X = as.matrix( cbind(oneV, price[train,2:4]) )
Y = as.matrix( price[train, 5] )
Beta = solve(t(X) %*% X) %*% t(X) %*% Y
oneV = rep(1, length(predict))
Xpred = as.matrix( cbind(oneV, price[predict, 2:4]) )
plot(predict, Xpred%*%Beta)
lines(predict, price[predict,5]) |
testlist <- list(n = 705822720L)
result <- do.call(breakfast:::setBitNumber,testlist)
str(result) | /breakfast/inst/testfiles/setBitNumber/libFuzzer_setBitNumber/setBitNumber_valgrind_files/1609960659-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 97 | r | testlist <- list(n = 705822720L)
result <- do.call(breakfast:::setBitNumber,testlist)
str(result) |
## Put comments here that give an overall description of what your functions do
# This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL # Initialize the inverse property.
set <- function(y) { # Set the value of vector
x <<- y # update the old matrix to the new one
m <<- NULL # reset the inverse of the matrix
}
get <- function() x # Method to get the actual matrix and return the matrix
setinverse <- function(inverse) m <<- inverse # Set the inverse of the matrix
getinverse <- function() m # Get the inverse of the matrix
list(set = set, get = get, # Return a list of the available functions
setinverse = setinverse,
getinverse = getinverse)
}
# This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
# If the inverse has already been calculated (and the matrix has not changed),
# then the cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
m <- x$getinverse() # Return a matrix that is the inverse of 'x'
if(!is.null(m)) { # check if this inverse of the matrix has been calculated
message("getting cached data") # if so, print "getting cached data" and
return(m) # returns the inverse of the matrix.
}
#If the inverse has not been calculated:
data <- x$get() # Get the matrix
m <- solve(data, ...) # Calculate the inverse of the matrix using solve
x$setinverse(m) # Updating the variable, set the inverse of the matrix
m # Return the matrix
}
| /cachematrix.R | no_license | TingtingZha/ProgrammingAssignment2 | R | false | false | 1,986 | r | ## Put comments here that give an overall description of what your functions do
# This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL # Initialize the inverse property.
set <- function(y) { # Set the value of vector
x <<- y # update the old matrix to the new one
m <<- NULL # reset the inverse of the matrix
}
get <- function() x # Method to get the actual matrix and return the matrix
setinverse <- function(inverse) m <<- inverse # Set the inverse of the matrix
getinverse <- function() m # Get the inverse of the matrix
list(set = set, get = get, # Return a list of the available functions
setinverse = setinverse,
getinverse = getinverse)
}
# This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
# If the inverse has already been calculated (and the matrix has not changed),
# then the cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
m <- x$getinverse() # Return a matrix that is the inverse of 'x'
if(!is.null(m)) { # check if this inverse of the matrix has been calculated
message("getting cached data") # if so, print "getting cached data" and
return(m) # returns the inverse of the matrix.
}
#If the inverse has not been calculated:
data <- x$get() # Get the matrix
m <- solve(data, ...) # Calculate the inverse of the matrix using solve
x$setinverse(m) # Updating the variable, set the inverse of the matrix
m # Return the matrix
}
|
#faststructure parallelization script
#setwd("/Users/cj/Dropbox/structure_simulations/satrapa/")
setwd("/media/burke/bigMac/Dropbox/structure_simulations/satrapa")
library(foreach);library(doMC);library(data.table);library(tidyr)
registerDoMC(cores=8)
#strip taxa names and population column, add six empty columns to structure input to match faststructure input reqs (srsly...)
files <- list.files("str_in",full.names = T)
str2faststr <- function(file){
str <- read.table(file)
#str <- str[,-c(1:2)] #use this row if there's a population column
str <- str[,-1] #use this if no population column
blank <- data.frame(matrix(nrow=nrow(str),ncol=6,data="faststructuremademeputthishere"))
str <- cbind(blank,str)
outname <- basename(file) %>% tools::file_path_sans_ext()
write.table(str,paste0("./fstr_in/",outname,".str"),row.names = F,col.names = F)
}
foreach(i=files) %dopar% str2faststr(i)
#build list of commands to run faststructure in parallel
files <- list.files("fstr_in",full.names = T)
commands <- c()
nreps <- 10
for(i in files){
for(j in 1:nreps){
outname <- basename(i) %>% tools::file_path_sans_ext()
outname <- paste0(outname,"_",j)
command <- paste0("python ~/fastStructure/structure.py -K 3 --format=str --input=/media/burke/bigMac/Dropbox/structure_simulations/satrapa/",
tools::file_path_sans_ext(i),
" --output=/media/burke/bigMac/Dropbox/structure_simulations/satrapa/fstr_out/",
outname,
" --seed=",sample(1:1e6,1))
commands <- append(commands,command)
}
}
#run in parallel
foreach(i=commands) %dopar% system(i)
| /run_faststructure.R | no_license | cjbattey/LinckBattey2017_MAF_clustering | R | false | false | 1,642 | r | #faststructure parallelization script
#setwd("/Users/cj/Dropbox/structure_simulations/satrapa/")
setwd("/media/burke/bigMac/Dropbox/structure_simulations/satrapa")
library(foreach);library(doMC);library(data.table);library(tidyr)
registerDoMC(cores=8)
#strip taxa names and population column, add six empty columns to structure input to match faststructure input reqs (srsly...)
files <- list.files("str_in",full.names = T)
str2faststr <- function(file){
str <- read.table(file)
#str <- str[,-c(1:2)] #use this row if there's a population column
str <- str[,-1] #use this if no population column
blank <- data.frame(matrix(nrow=nrow(str),ncol=6,data="faststructuremademeputthishere"))
str <- cbind(blank,str)
outname <- basename(file) %>% tools::file_path_sans_ext()
write.table(str,paste0("./fstr_in/",outname,".str"),row.names = F,col.names = F)
}
foreach(i=files) %dopar% str2faststr(i)
#build list of commands to run faststructure in parallel
files <- list.files("fstr_in",full.names = T)
commands <- c()
nreps <- 10
for(i in files){
for(j in 1:nreps){
outname <- basename(i) %>% tools::file_path_sans_ext()
outname <- paste0(outname,"_",j)
command <- paste0("python ~/fastStructure/structure.py -K 3 --format=str --input=/media/burke/bigMac/Dropbox/structure_simulations/satrapa/",
tools::file_path_sans_ext(i),
" --output=/media/burke/bigMac/Dropbox/structure_simulations/satrapa/fstr_out/",
outname,
" --seed=",sample(1:1e6,1))
commands <- append(commands,command)
}
}
#run in parallel
foreach(i=commands) %dopar% system(i)
|
answer <- data.frame(PassengerId = test$passengerid, Survived = survived)
write.csv(answer, 'submit-003.csv', quote = FALSE, row.names = FALSE)
sum(read.csv('submit-002.csv') != answer)
sum(read.csv('submit-001.csv') != answer) | /submit.R | no_license | bimehta/titanic | R | false | false | 227 | r | answer <- data.frame(PassengerId = test$passengerid, Survived = survived)
write.csv(answer, 'submit-003.csv', quote = FALSE, row.names = FALSE)
sum(read.csv('submit-002.csv') != answer)
sum(read.csv('submit-001.csv') != answer) |
new_rcmdcheck <- function(stdout,
stderr,
description,
status = 0L,
duration = 0L,
timeout = FALSE,
test_fail = NULL,
session_info = NULL) {
stopifnot(inherits(description, "description"))
# Make sure we don't have \r on windows
stdout <- win2unix(stdout)
stderr <- win2unix(stderr)
entries <- strsplit(paste0("\n", stdout), "\n* ", fixed = TRUE)[[1]][-1]
checkdir <- parse_checkdir(entries)
notdone <- function(x) grep("DONE", x, invert = TRUE, value = TRUE)
res <- structure(
list(
stdout = stdout,
stderr = stderr,
status = status,
duration = duration,
timeout = timeout,
rversion = parse_rversion(entries),
platform = parse_platform(entries),
errors = notdone(grep("ERROR\n", entries, value = TRUE)),
warnings = notdone(grep("WARNING\n", entries, value = TRUE)),
notes = notdone(grep("NOTE\n", entries, value = TRUE)),
description = description$str(normalize = FALSE),
package = description$get("Package")[[1]],
version = description$get("Version")[[1]],
cran = description$get_field("Repository", "") == "CRAN",
bioc = description$has_fields("biocViews"),
checkdir = checkdir,
test_fail = test_fail %||% get_test_fail(checkdir),
install_out = get_install_out(checkdir)
),
class = "rcmdcheck"
)
res$session_info <- get_session_info(res$package, session_info)
if (isTRUE(timeout)) {
res$errors <- c(res$errors, "R CMD check timed out")
}
res
}
parse_rversion <- function(entries) {
line <- grep("^using R version", entries, value = TRUE)
sub("^using R version ([^\\s]+)\\s.*$", "\\1", line, perl = TRUE)
}
parse_platform <- function(entries) {
line <- grep("^using platform:", entries, value = TRUE)
sub("^using platform: ([^\\s]+)\\s.*$", "\\1", line, perl = TRUE)
}
parse_checkdir <- function(entries) {
quotes <- "\\x91\\x92\u2018\u2019`'"
line <- grep("^using log directory", entries, value = TRUE)
sub(
paste0("^using log directory [", quotes, "]([^", quotes, "]+)[", quotes, "]$"),
"\\1",
line,
perl = TRUE
)
}
#' @export
as.data.frame.rcmdcheck <- function(x,
row.names = NULL,
optional = FALSE,
...,
which) {
entries <- list(
type = c(
rep("error", length(x$errors)),
rep("warning", length(x$warnings)),
rep("note", length(x$notes))
),
output = c(x$errors, x$warnings, x$notes)
)
data_frame(
which = which,
platform = x$platform %||% NA_character_,
rversion = x$rversion %||% NA_character_,
package = x$package %||% NA_character_,
version = x$version %||% NA_character_,
type = entries$type,
output = entries$output,
hash = hash_check(entries$output)
)
}
#' @importFrom digest digest
hash_check <- function(check) {
cleancheck <- gsub("[^a-zA-Z0-9]", "", first_line(check))
vapply(cleancheck, digest, "")
}
#' Parse \code{R CMD check} results from a file or string
#'
#' At most one of \code{file} or \code{text} can be given.
#' If both are \code{NULL}, then the current working directory
#' is checked for a \code{00check.log} file.
#'
#' @param file The \code{00check.log} file, or a directory that
#' contains that file. It can also be a connection object.
#' @param text The contentst of a \code{00check.log} file.
#' @param ... Other arguments passed onto the constructor.
#' Used for testing.
#' @return An \code{rcmdcheck} object, the check results.
#'
#' @seealso \code{\link{parse_check_url}}
#' @export
#' @importFrom desc description
parse_check <- function(file = NULL, text = NULL, ...) {
## If no text, then find the file, and read it in
if (is.null(text)) {
file <- find_check_file(file)
text <- readLines(file)
}
stdout <- paste(text, collapse = "\n")
# Simulate minimal description from info in log
entries <- strsplit(paste0("\n", stdout), "\n* ", fixed = TRUE)[[1]][-1]
desc <- desc::description$new("!new")
desc$set(
Package = parse_package(entries),
Version = parse_version(entries)
)
new_rcmdcheck(
stdout = stdout,
stderr = "",
description = desc,
...
)
}
parse_package <- function(entries) {
line <- grep("^this is package .* version", entries, value = TRUE)
sub(
"^this is package .([a-zA-Z0-9\\.]+)[^a-zA-Z0-9\\.].*$",
"\\1",
line,
perl = TRUE
)
}
parse_version <- function(entries) {
line <- grep("^this is package .* version", entries, value = TRUE)
sub(
"^this is package .[a-zA-Z0-9\\.]+. version .([-0-9\\.]+)[^-0-9\\.].*$",
"\\1",
line,
perl = TRUE
)
}
#' Shorthand to parse R CMD check results from a URL
#'
#' @param url URL to parse the results from. Note that it should
#' not contain HTML markup, just the text output.
#' @param quiet Passed to \code{download.file}.
#' @return An \code{rcmdcheck} object, the check results.
#'
#' @seealso \code{\link{parse_check}}
#' @export
parse_check_url <- function(url, quiet = TRUE) {
parse_check(text = download_file(url, quiet = quiet))
}
find_check_file <- function(file) {
if (is.null(file)) file <- "."
if (file.exists(file) && file.info(file)$isdir) {
find_check_file_indir(file)
} else if (file.exists(file)) {
file
} else {
stop("Cannot find R CMD check output file")
}
}
find_check_file_indir <- function(dir) {
if (file.exists(logfile <- file.path(dir, "00check.log"))) {
logfile
} else {
stop("Cannot find R CMD check output file")
}
}
| /R/parse.R | no_license | makarevichy/rcmdcheck | R | false | false | 5,870 | r |
new_rcmdcheck <- function(stdout,
stderr,
description,
status = 0L,
duration = 0L,
timeout = FALSE,
test_fail = NULL,
session_info = NULL) {
stopifnot(inherits(description, "description"))
# Make sure we don't have \r on windows
stdout <- win2unix(stdout)
stderr <- win2unix(stderr)
entries <- strsplit(paste0("\n", stdout), "\n* ", fixed = TRUE)[[1]][-1]
checkdir <- parse_checkdir(entries)
notdone <- function(x) grep("DONE", x, invert = TRUE, value = TRUE)
res <- structure(
list(
stdout = stdout,
stderr = stderr,
status = status,
duration = duration,
timeout = timeout,
rversion = parse_rversion(entries),
platform = parse_platform(entries),
errors = notdone(grep("ERROR\n", entries, value = TRUE)),
warnings = notdone(grep("WARNING\n", entries, value = TRUE)),
notes = notdone(grep("NOTE\n", entries, value = TRUE)),
description = description$str(normalize = FALSE),
package = description$get("Package")[[1]],
version = description$get("Version")[[1]],
cran = description$get_field("Repository", "") == "CRAN",
bioc = description$has_fields("biocViews"),
checkdir = checkdir,
test_fail = test_fail %||% get_test_fail(checkdir),
install_out = get_install_out(checkdir)
),
class = "rcmdcheck"
)
res$session_info <- get_session_info(res$package, session_info)
if (isTRUE(timeout)) {
res$errors <- c(res$errors, "R CMD check timed out")
}
res
}
parse_rversion <- function(entries) {
line <- grep("^using R version", entries, value = TRUE)
sub("^using R version ([^\\s]+)\\s.*$", "\\1", line, perl = TRUE)
}
parse_platform <- function(entries) {
line <- grep("^using platform:", entries, value = TRUE)
sub("^using platform: ([^\\s]+)\\s.*$", "\\1", line, perl = TRUE)
}
parse_checkdir <- function(entries) {
quotes <- "\\x91\\x92\u2018\u2019`'"
line <- grep("^using log directory", entries, value = TRUE)
sub(
paste0("^using log directory [", quotes, "]([^", quotes, "]+)[", quotes, "]$"),
"\\1",
line,
perl = TRUE
)
}
#' @export
as.data.frame.rcmdcheck <- function(x,
row.names = NULL,
optional = FALSE,
...,
which) {
entries <- list(
type = c(
rep("error", length(x$errors)),
rep("warning", length(x$warnings)),
rep("note", length(x$notes))
),
output = c(x$errors, x$warnings, x$notes)
)
data_frame(
which = which,
platform = x$platform %||% NA_character_,
rversion = x$rversion %||% NA_character_,
package = x$package %||% NA_character_,
version = x$version %||% NA_character_,
type = entries$type,
output = entries$output,
hash = hash_check(entries$output)
)
}
#' @importFrom digest digest
hash_check <- function(check) {
cleancheck <- gsub("[^a-zA-Z0-9]", "", first_line(check))
vapply(cleancheck, digest, "")
}
#' Parse \code{R CMD check} results from a file or string
#'
#' At most one of \code{file} or \code{text} can be given.
#' If both are \code{NULL}, then the current working directory
#' is checked for a \code{00check.log} file.
#'
#' @param file The \code{00check.log} file, or a directory that
#' contains that file. It can also be a connection object.
#' @param text The contentst of a \code{00check.log} file.
#' @param ... Other arguments passed onto the constructor.
#' Used for testing.
#' @return An \code{rcmdcheck} object, the check results.
#'
#' @seealso \code{\link{parse_check_url}}
#' @export
#' @importFrom desc description
parse_check <- function(file = NULL, text = NULL, ...) {
## If no text, then find the file, and read it in
if (is.null(text)) {
file <- find_check_file(file)
text <- readLines(file)
}
stdout <- paste(text, collapse = "\n")
# Simulate minimal description from info in log
entries <- strsplit(paste0("\n", stdout), "\n* ", fixed = TRUE)[[1]][-1]
desc <- desc::description$new("!new")
desc$set(
Package = parse_package(entries),
Version = parse_version(entries)
)
new_rcmdcheck(
stdout = stdout,
stderr = "",
description = desc,
...
)
}
parse_package <- function(entries) {
line <- grep("^this is package .* version", entries, value = TRUE)
sub(
"^this is package .([a-zA-Z0-9\\.]+)[^a-zA-Z0-9\\.].*$",
"\\1",
line,
perl = TRUE
)
}
parse_version <- function(entries) {
line <- grep("^this is package .* version", entries, value = TRUE)
sub(
"^this is package .[a-zA-Z0-9\\.]+. version .([-0-9\\.]+)[^-0-9\\.].*$",
"\\1",
line,
perl = TRUE
)
}
#' Shorthand to parse R CMD check results from a URL
#'
#' @param url URL to parse the results from. Note that it should
#' not contain HTML markup, just the text output.
#' @param quiet Passed to \code{download.file}.
#' @return An \code{rcmdcheck} object, the check results.
#'
#' @seealso \code{\link{parse_check}}
#' @export
parse_check_url <- function(url, quiet = TRUE) {
parse_check(text = download_file(url, quiet = quiet))
}
find_check_file <- function(file) {
if (is.null(file)) file <- "."
if (file.exists(file) && file.info(file)$isdir) {
find_check_file_indir(file)
} else if (file.exists(file)) {
file
} else {
stop("Cannot find R CMD check output file")
}
}
find_check_file_indir <- function(dir) {
if (file.exists(logfile <- file.path(dir, "00check.log"))) {
logfile
} else {
stop("Cannot find R CMD check output file")
}
}
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53840861298846e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0), .Dim = c(5L, 1L)))
result <- do.call(CNull:::communities_individual_based_sampling_beta,testlist)
str(result) | /CNull/inst/testfiles/communities_individual_based_sampling_beta/AFL_communities_individual_based_sampling_beta/communities_individual_based_sampling_beta_valgrind_files/1615833715-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 270 | r | testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53840861298846e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0), .Dim = c(5L, 1L)))
result <- do.call(CNull:::communities_individual_based_sampling_beta,testlist)
str(result) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ch7-fn.R
\name{cont.mpdf}
\alias{cont.mpdf}
\title{PDF and CDF for Continuous Random Variables}
\usage{
cont.mpdf(dist, lo, up, para, para2, ymax, mt, dcol, np = 100,
pos1 = "topright", pos2 = "bottomright", xp1, xp2)
}
\arguments{
\item{dist}{Name of continuous probability distribution (one of the follows)
("exp", "gamma", "weibull", "beta", "norm", "t", "chisq", "f")}
\item{lo}{Lower limit of x-axis}
\item{up}{Upper limit of x-axis}
\item{para}{First parameter vector of PDF}
\item{para2}{Second parameter vector of PDF (if necessary)}
\item{ymax}{Upper limit of y-axis}
\item{mt}{Graph title}
\item{dcol}{Graph color vector (default as follows)
c("red", "blue", "orange2", "green4", "purple", "cyan2")}
\item{np}{Number of plot points, Default: 100}
\item{pos1}{Legend location of PDF, Default: 'topright'}
\item{pos2}{Legend location of CDF, Default: 'bottomright'}
\item{xp1}{Vector of specific x values for PDF (ignore legend)}
\item{xp2}{Vector of specific x values for CDF (ignore legend)}
}
\value{
None.
}
\description{
PDF and CDF for Continuous Random Variables
}
\examples{
lamb = 1:5
cont.mpdf("exp", 0, 3, para=lamb, ymax=5)
alp = c(0.5, 1, 2, 3); rate = 1
cont.mpdf("gamma", 0, 8, para=alp, para2=rate, ymax=1.2)
th = 1; alp = c(0.5, 1, 2, 3)
cont.mpdf("weibull", 0, 5, para=alp, para2=th, ymax=1.2)
}
| /man/cont.mpdf.Rd | no_license | zlfn/Rstat-1 | R | false | true | 1,416 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ch7-fn.R
\name{cont.mpdf}
\alias{cont.mpdf}
\title{PDF and CDF for Continuous Random Variables}
\usage{
cont.mpdf(dist, lo, up, para, para2, ymax, mt, dcol, np = 100,
pos1 = "topright", pos2 = "bottomright", xp1, xp2)
}
\arguments{
\item{dist}{Name of continuous probability distribution (one of the follows)
("exp", "gamma", "weibull", "beta", "norm", "t", "chisq", "f")}
\item{lo}{Lower limit of x-axis}
\item{up}{Upper limit of x-axis}
\item{para}{First parameter vector of PDF}
\item{para2}{Second parameter vector of PDF (if necessary)}
\item{ymax}{Upper limit of y-axis}
\item{mt}{Graph title}
\item{dcol}{Graph color vector (default as follows)
c("red", "blue", "orange2", "green4", "purple", "cyan2")}
\item{np}{Number of plot points, Default: 100}
\item{pos1}{Legend location of PDF, Default: 'topright'}
\item{pos2}{Legend location of CDF, Default: 'bottomright'}
\item{xp1}{Vector of specific x values for PDF (ignore legend)}
\item{xp2}{Vector of specific x values for CDF (ignore legend)}
}
\value{
None.
}
\description{
PDF and CDF for Continuous Random Variables
}
\examples{
lamb = 1:5
cont.mpdf("exp", 0, 3, para=lamb, ymax=5)
alp = c(0.5, 1, 2, 3); rate = 1
cont.mpdf("gamma", 0, 8, para=alp, para2=rate, ymax=1.2)
th = 1; alp = c(0.5, 1, 2, 3)
cont.mpdf("weibull", 0, 5, para=alp, para2=th, ymax=1.2)
}
|
## Implementing functions which will help us to cache inverse of matrix
## creating an jobject which can store its own inverse
makeCacheMatrix <- function(x = matrix()) {
prop_inverse <- NULL
set <- function (matrix){
m <<- matrix
prop_inverse <<- NULL
}
get <- function(){
m
}
setInverse <- function(inverse) {
prop_inverse <<- inverse
}
getInverse <- function() {
prop_inverse
}
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
##This function will get the inverse of the matrix returned by makeCacheMatrix.
cacheSolve <- function(x, ...) {
matrix <- x$getInverse()
if( !is.null(matrix) ) {
return(matrix)
}
data <- x$get()
matrix <- solve(data) %*% data
x$setInverse(matrix)
## Return a matrix that is the inverse of 'x'
matrix
}
| /cachematrix.R | no_license | kaustubhshete/ProgrammingAssignment2 | R | false | false | 864 | r | ## Implementing functions which will help us to cache inverse of matrix
## creating an jobject which can store its own inverse
makeCacheMatrix <- function(x = matrix()) {
prop_inverse <- NULL
set <- function (matrix){
m <<- matrix
prop_inverse <<- NULL
}
get <- function(){
m
}
setInverse <- function(inverse) {
prop_inverse <<- inverse
}
getInverse <- function() {
prop_inverse
}
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
##This function will get the inverse of the matrix returned by makeCacheMatrix.
cacheSolve <- function(x, ...) {
matrix <- x$getInverse()
if( !is.null(matrix) ) {
return(matrix)
}
data <- x$get()
matrix <- solve(data) %*% data
x$setInverse(matrix)
## Return a matrix that is the inverse of 'x'
matrix
}
|
#practica 2: Automata celular
#tarea 2
library(parallel)
library(Rlab)
dim <- 10
num <- dim^2
datos <- data.frame()
paso <- function(pos) {
fila <- floor((pos - 1) / dim) + 1
columna <- ((pos - 1) %% dim) + 1
vecindad <- actual[max(fila - 1, 1) : min(fila + 1, dim),
max(columna - 1, 1): min(columna + 1, dim)]
return(1 * ((sum(vecindad) - actual[fila, columna]) == 3))
}
cluster <- makeCluster(detectCores() - 1)
clusterExport(cluster, "dim")
clusterExport(cluster, "paso")
prob = seq(0.1,0.9,0.1)
for(p in prob) { # aqui variamos la probabilidad
for (repetir in 1:50) {
i <- 0
actual <- matrix(rbern(100,p), nrow=dim, ncol=dim) #matriz celulas vivas con cierta probabilidad
for (iteracion in 1:15) {
i <- i + 1 # contador para las generaciones
clusterExport(cluster, "actual")
siguiente <- parSapply(cluster, 1:num, paso)
if (sum(siguiente) == 0) { # todos murieron
i <- iteracion
break;
}
actual <- matrix(siguiente, nrow=dim, ncol=dim, byrow=TRUE)
}
datos <- rbind(datos, i)
}
}
data <- matrix(t(datos), nrow=50, ncol=9)
stopCluster(cluster)
png("prueba.png")
colnames(data) = prob
boxplot(data, xlab="Probabilidad", ylab="Iteraciones",)
graphics.off()
| /p2_automata_celular/tarea2.R | no_license | Saphira3000/Simulacion-de-sistemas | R | false | false | 1,285 | r | #practica 2: Automata celular
#tarea 2
library(parallel)
library(Rlab)
dim <- 10
num <- dim^2
datos <- data.frame()
paso <- function(pos) {
fila <- floor((pos - 1) / dim) + 1
columna <- ((pos - 1) %% dim) + 1
vecindad <- actual[max(fila - 1, 1) : min(fila + 1, dim),
max(columna - 1, 1): min(columna + 1, dim)]
return(1 * ((sum(vecindad) - actual[fila, columna]) == 3))
}
cluster <- makeCluster(detectCores() - 1)
clusterExport(cluster, "dim")
clusterExport(cluster, "paso")
prob = seq(0.1,0.9,0.1)
for(p in prob) { # aqui variamos la probabilidad
for (repetir in 1:50) {
i <- 0
actual <- matrix(rbern(100,p), nrow=dim, ncol=dim) #matriz celulas vivas con cierta probabilidad
for (iteracion in 1:15) {
i <- i + 1 # contador para las generaciones
clusterExport(cluster, "actual")
siguiente <- parSapply(cluster, 1:num, paso)
if (sum(siguiente) == 0) { # todos murieron
i <- iteracion
break;
}
actual <- matrix(siguiente, nrow=dim, ncol=dim, byrow=TRUE)
}
datos <- rbind(datos, i)
}
}
data <- matrix(t(datos), nrow=50, ncol=9)
stopCluster(cluster)
png("prueba.png")
colnames(data) = prob
boxplot(data, xlab="Probabilidad", ylab="Iteraciones",)
graphics.off()
|
library(caret)
timestamp <- format(Sys.time(), "%Y_%m_%d_%H_%M")
model <- "RSimca"
#########################################################################
set.seed(1)
training <- twoClassSim(50, linearVars = 2)
testing <- twoClassSim(500, linearVars = 2)
trainX <- training[, -ncol(training)]
trainY <- training$Class
cctrl1 <- trainControl(method = "cv", number = 3, returnResamp = "all")
cctrl2 <- trainControl(method = "LOOCV")
cctrl3 <- trainControl(method = "none")
set.seed(849)
test_class_cv_model <- train(trainX, trainY,
method = "RSimca",
trControl = cctrl1,
preProc = c("center", "scale"))
set.seed(849)
test_class_cv_form <- train(Class ~ ., data = training,
method = "RSimca",
trControl = cctrl1,
preProc = c("center", "scale"))
test_class_pred <- predict(test_class_cv_model, testing[, -ncol(testing)])
test_class_pred_form <- predict(test_class_cv_form, testing[, -ncol(testing)])
set.seed(849)
test_class_loo_model <- train(trainX, trainY,
method = "RSimca",
trControl = cctrl2,
preProc = c("center", "scale"))
set.seed(849)
test_class_none_model <- train(trainX, trainY,
method = "RSimca",
trControl = cctrl3,
tuneGrid = test_class_cv_model$bestTune,
preProc = c("center", "scale"))
test_class_none_pred <- predict(test_class_none_model, testing[, -ncol(testing)])
test_levels <- levels(test_class_cv_model)
if(!all(levels(trainY) %in% test_levels))
cat("wrong levels")
#########################################################################
tests <- grep("test_", ls(), fixed = TRUE, value = TRUE)
sInfo <- sessionInfo()
save(list = c(tests, "sInfo", "timestamp"),
file = file.path(getwd(), paste(model, ".RData", sep = "")))
q("no")
| /RegressionTests/Code/RSimca.R | no_license | Ragyi/caret | R | false | false | 2,068 | r | library(caret)
timestamp <- format(Sys.time(), "%Y_%m_%d_%H_%M")
model <- "RSimca"
#########################################################################
set.seed(1)
training <- twoClassSim(50, linearVars = 2)
testing <- twoClassSim(500, linearVars = 2)
trainX <- training[, -ncol(training)]
trainY <- training$Class
cctrl1 <- trainControl(method = "cv", number = 3, returnResamp = "all")
cctrl2 <- trainControl(method = "LOOCV")
cctrl3 <- trainControl(method = "none")
set.seed(849)
test_class_cv_model <- train(trainX, trainY,
method = "RSimca",
trControl = cctrl1,
preProc = c("center", "scale"))
set.seed(849)
test_class_cv_form <- train(Class ~ ., data = training,
method = "RSimca",
trControl = cctrl1,
preProc = c("center", "scale"))
test_class_pred <- predict(test_class_cv_model, testing[, -ncol(testing)])
test_class_pred_form <- predict(test_class_cv_form, testing[, -ncol(testing)])
set.seed(849)
test_class_loo_model <- train(trainX, trainY,
method = "RSimca",
trControl = cctrl2,
preProc = c("center", "scale"))
set.seed(849)
test_class_none_model <- train(trainX, trainY,
method = "RSimca",
trControl = cctrl3,
tuneGrid = test_class_cv_model$bestTune,
preProc = c("center", "scale"))
test_class_none_pred <- predict(test_class_none_model, testing[, -ncol(testing)])
test_levels <- levels(test_class_cv_model)
if(!all(levels(trainY) %in% test_levels))
cat("wrong levels")
#########################################################################
tests <- grep("test_", ls(), fixed = TRUE, value = TRUE)
sInfo <- sessionInfo()
save(list = c(tests, "sInfo", "timestamp"),
file = file.path(getwd(), paste(model, ".RData", sep = "")))
q("no")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/default.output.function.r
\name{default.output.function}
\alias{default.output.function}
\title{Default Method for Outputting ADF Calculations to Disk}
\usage{
default.output.function(x, FUN2, outDir, params)
}
\arguments{
\item{x}{an abstract data frame}
\item{FUN2}{function to apply over each chunk}
\item{outDir}{an empty directory for storing output}
\item{params}{a list of additional parameters}
}
\description{
Default method for constructing output file to file system.
}
| /man/default.output.function.Rd | no_license | kaneplusplus/adf | R | false | true | 562 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/default.output.function.r
\name{default.output.function}
\alias{default.output.function}
\title{Default Method for Outputting ADF Calculations to Disk}
\usage{
default.output.function(x, FUN2, outDir, params)
}
\arguments{
\item{x}{an abstract data frame}
\item{FUN2}{function to apply over each chunk}
\item{outDir}{an empty directory for storing output}
\item{params}{a list of additional parameters}
}
\description{
Default method for constructing output file to file system.
}
|
library(magrittr) # pipe package | /Rdatascience/18 - pipes.R | no_license | daifengqi/TidyverseStyle | R | false | false | 32 | r | library(magrittr) # pipe package |
#########################################################################################################
# Initial Setup of working directory and functions used to calculate scores
#########################################################################################################
setwd("C:/Users/sjsty/Desktop/Masters/Algorithms/Simplified Problem")
leakyrelu <- function(eps,x){
if(x<0){
return(eps*x)
} else{
return(x)
}
}
score <- function(x){
rank = NULL
for(i in 1:length(x)){
rank[i] = exp(-x)
}
return(rank)
}
update <- function(A,b,w,i,n,chi){
wnew = w + (1/((n+i)^chi) * (b-A%*%w))
return(wnew)
}
mse <- function(x,y){
temp = NULL
for(i in 1:length(x)){
temp[i] = (x[i]-y[i])^2
}
return(mean(temp))
}
#########################################################################################################
# Initial Data to estimate and validation set
#########################################################################################################
SAdata = read.table("12outputtraindata.txt", sep = "\t", header= TRUE)
row.names(SAdata) = NULL
#Setting the amount of data that will be in the sampling set and the amount that will be in the updating set
smp_size = floor(0.75 * nrow(SAdata))
#This section just creates datasets for the X's and y's
train_ind = sample(seq_len(nrow(SAdata)), size = smp_size)
traindata = SAdata[train_ind,1:4]
trainlabels = SAdata[train_ind,5:16]
row.names(traindata) = NULL
row.names(trainlabels) = NULL
testdata = SAdata[-train_ind,1:4]
testlabels = SAdata[-train_ind,5:16]
row.names(testdata) = NULL
row.names(testlabels) = NULL
validationdata = read.table("12outputNNdata.txt", sep = "\t", header= TRUE)
#########################################################################################################
# Initial Weights and Biases
#########################################################################################################
#These weights are generated from the sci-kit learn package in python
bias1 = t(read.table("final12firstlayerint.csv",sep = ","))
weights1 = read.table("final12firstlayercoef.csv",sep = ",")
w1df = rbind(bias1, weights1)
w1 = as.numeric(c(w1df[1,],w1df[2,],w1df[3,],w1df[4,],w1df[5,]))
bias2 = t(read.table("final12secondlayerint.csv",sep = ","))
weights2 = read.table("final12secondlayercoef.csv",sep = ",")
w2df = rbind(bias2, weights2)
w2 = as.numeric(c(w2df[1,],w2df[2,],w2df[3,],w2df[4,],w2df[5,],
w2df[6,],w2df[7,],w2df[8,],w2df[9,],w2df[10,],
w2df[11,],w2df[12,],w2df[13,],w2df[14,],w2df[15,],
w2df[16,],w2df[17,],w2df[18,],w2df[19,],w2df[20,],
w2df[21,]))
#Reseting the accuracy so that we can track it throughout the algorithm
acc = NULL
#########################################################################################################
# Bootstrapped Algorithm
#########################################################################################################
#The slope of the x<0 section of the leaky RELU
eps=0.0001
#These are the sizes of the sampling set and the updating set
#Size of sampling set:
m=30000
#Size of updating set:
n=10000
#Setting the iteration number
it=1
for(k in 1:20){
#These values move along the data sets so that we see new observations throughout the process
a = 1 + m*(k-1)
b = m*k
c = 1 + n*(k-1)
d = n*k
data1 = traindata[a:b,]
data2 = testdata[c:d,]
data1labels = trainlabels[a:b,]
data2labels = testlabels[c:d,]
#Tracking the observations and all their hidden layer values during the feed forward process.
#This just saves us from having to calculate any inverses
inputs1 = data.frame(x1_1=numeric(),x1_2=numeric(),x1_3=numeric(),x1_4=numeric())
outputs1 = data.frame(h1=numeric(),h2=numeric(),h3=numeric(),h4=numeric(),h5=numeric(),
h6=numeric(),h7=numeric(),h8=numeric(),h9=numeric(),h10=numeric(),
h11=numeric(),h12=numeric(),h13=numeric(),h14=numeric(),h15=numeric(),
h16=numeric(),h17=numeric(),h18=numeric(),h19=numeric(),h20=numeric())
inputs2 = data.frame(x2_1=numeric(),x2_2=numeric(),x2_3=numeric(),x2_4=numeric(),x2_5=numeric(),
x2_6=numeric(),x2_7=numeric(),x2_8=numeric(),x2_9=numeric(),x2_10=numeric(),
x2_11=numeric(),x2_12=numeric(),x2_13=numeric(),x2_14=numeric(),x2_15=numeric(),
x2_16=numeric(),x2_17=numeric(),x2_18=numeric(),x2_19=numeric(),x2_20=numeric())
outputs2 = data.frame(o1=numeric(),o2=numeric(),o3=numeric(),o4=numeric(),o5=numeric(),o6=numeric(),
o7=numeric(),o8=numeric(),o9=numeric(),o10=numeric(),o11=numeric(),o12=numeric())
finalest = data.frame(yhat1=numeric(),yhat2=numeric(),yhat3=numeric(),yhat4=numeric(),yhat5=numeric(),yhat6=numeric(),
yhat7=numeric(),yhat8=numeric(),yhat9=numeric(),yhat10=numeric(),yhat11=numeric(),yhat12=numeric())
trueval = data.frame(y1=numeric(),y2=numeric(),y3=numeric(),y4=numeric(),y5=numeric(),y6=numeric(),
y7=numeric(),y8=numeric(),y9=numeric(),y10=numeric(),y11=numeric(),y12=numeric())
for(i in 1:m){
inputs1[i,] = data1[i,1:4]
x1 = as.numeric(inputs1[i,1:4])
Xm = cbind(1*diag(20),x1[1]*diag(20),x1[2]*diag(20),x1[3]*diag(20),x1[4]*diag(20))
outputs1[i,] = Xm %*% w1
for(j in 1:20){
inputs2[i,j] = leakyrelu(eps,outputs1[i,j])
}
x2 = as.numeric(inputs2[i,1:20])
Xm = cbind(1*diag(12),x2[1]*diag(12),x2[2]*diag(12),x2[3]*diag(12),x2[4]*diag(12),
x2[5]*diag(12),x2[6]*diag(12),x2[7]*diag(12),x2[8]*diag(12),
x2[9]*diag(12),x2[10]*diag(12),x2[11]*diag(12),x2[12]*diag(12),
x2[13]*diag(12),x2[14]*diag(12),x2[15]*diag(12),x2[16]*diag(12),
x2[17]*diag(12),x2[18]*diag(12),x2[19]*diag(12),x2[20]*diag(12))
outputs2[i,] = Xm %*% w2
for(j in 1:12){
finalest[i,j] = leakyrelu(eps,outputs2[i,j])
}
trueval[i,] = data1labels[i,1:12]
}
approx = cbind(inputs1,outputs1,inputs2,outputs2,finalest,trueval)
#Now that we have all of our estimates, we can score them using our score function. This will allow us to
#create a probability distribution over the observations
error = data.frame(err = numeric())
for(i in 1:m){
x = as.numeric(approx[i,57:68])
x = x/sum(x)
y = as.numeric(approx[i,69:80])
error[i,1] = mse(x,y)
}
approx = cbind(approx,error)
#Now we throw away all the incorrect observations
correctclass = data.frame(correct = numeric())
for(i in 1:m){
y = as.numeric(approx[i,57:68])
x = as.numeric(approx[i,69:80])
if(max(y)!=0){
for( j in 1:12){
if(y[j] == max(y)){
y[j]=1
} else{
y[j]=0
}
}
}
correctclass[i,1] = t(x) %*% y
}
approx = cbind(approx,correctclass)
truevalues = approx[which(approx$correct == 1),]
truey1 = truevalues[which(truevalues$y1 == 1),];truey2 = truevalues[which(truevalues$y2 == 1),]
truey3 = truevalues[which(truevalues$y3 == 1),];truey4 = truevalues[which(truevalues$y4 == 1),]
truey5 = truevalues[which(truevalues$y5 == 1),];truey6 = truevalues[which(truevalues$y6 == 1),]
truey7 = truevalues[which(truevalues$y7 == 1),];truey8 = truevalues[which(truevalues$y8 == 1),]
truey9 = truevalues[which(truevalues$y9 == 1),];truey10 = truevalues[which(truevalues$y10 == 1),]
truey11 = truevalues[which(truevalues$y11 == 1),];truey12 = truevalues[which(truevalues$y12 == 1),]
#We create probability distributions over all the 12 output values
sy1 = sapply(truey1[,81],score);sy1 = sy1/sum(sy1)
sy2 = sapply(truey2[,81],score);sy2 = sy2/sum(sy2)
sy3 = sapply(truey3[,81],score);sy3 = sy3/sum(sy3)
sy4 = sapply(truey4[,81],score);sy4 = sy4/sum(sy4)
sy5 = sapply(truey5[,81],score);sy5 = sy5/sum(sy5)
sy6 = sapply(truey6[,81],score);sy6 = sy6/sum(sy6)
sy7 = sapply(truey7[,81],score);sy7 = sy7/sum(sy7)
sy8 = sapply(truey8[,81],score);sy8 = sy8/sum(sy8)
sy9 = sapply(truey9[,81],score);sy9 = sy9/sum(sy9)
sy10 = sapply(truey10[,81],score);sy10 = sy10/sum(sy10)
sy11 = sapply(truey11[,81],score);sy11 = sy11/sum(sy11)
sy12 = sapply(truey12[,81],score);sy12 = sy12/sum(sy12)
for(i in 1:n){
x = as.numeric(data2[i,1:4])
data = NULL
y2 = 10*as.numeric(data2labels[i,1:12])
if(y2[1] == 10){
data = truey1;sc = sy1
} else if(y2[2] == 10){
data = truey2;sc = sy2
} else if(y2[3] == 10){
data = truey3;sc = sy3
} else if(y2[4] == 10){
data = truey4;sc = sy4
} else if(y2[5] == 10){
data = truey5;sc = sy5
} else if(y2[6] == 10){
data = truey6;sc = sy6
} else if(y2[7] == 10){
data = truey7;sc = sy7
} else if(y2[8] == 10){
data = truey8;sc = sy8
} else if(y2[9] == 10){
data = truey9;sc = sy9
} else if(y2[10] == 10){
data = truey10;sc = sy10
} else if(y2[11] == 10){
data = truey11;sc = sy11
} else if(y2[12] == 10){
data = truey12;sc = sy12
}
q = sample(1:dim(data)[1],size=1,prob=sc)
X1 = cbind(1*diag(20),x[1]*diag(20),x[2]*diag(20),x[3]*diag(20),x[4]*diag(20))
y1 = as.numeric(data[q,5:24])
x2 = as.numeric(data[q,25:44])
X2 = cbind(1*diag(12),x2[1]*diag(12),x2[2]*diag(12),x2[3]*diag(12),x2[4]*diag(12),
x2[5]*diag(12),x2[6]*diag(12),x2[7]*diag(12),x2[8]*diag(12),
x2[9]*diag(12),x2[10]*diag(12),x2[11]*diag(12),x2[12]*diag(12),
x2[13]*diag(12),x2[14]*diag(12),x2[15]*diag(12),x2[16]*diag(12),
x2[17]*diag(12),x2[18]*diag(12),x2[19]*diag(12),x2[20]*diag(12))
if(it==1){
A1check = t(X1)%*%X1
A2check = t(X2)%*%X2
B1check = t(X1)%*%y1
B2check = t(X2)%*%y2
}
A1 = t(X1)%*%X1
A2 = t(X2)%*%X2
B1 = t(X1)%*%y1
B2 = t(X2)%*%y2
A1check = A1check +(A1 -A1check)/(it+1)
B1check = B1check +(B1 -B1check)/(it+1)
A2check = A2check +(A2 -A2check)/(it+1)
B2check = B2check +(B2 -B2check)/(it+1)
w1 = w1 + (1/4)*(B1check-A1check%*%w1)
w2 = w2 + (1/400)*(B2check-A2check%*%w2)
it=it+1
if(i%%1000==0){
tempvalidationdata = validationdata
estimates = data.frame(yhat1=numeric(),yhat2=numeric(),yhat3=numeric(),yhat4=numeric(),yhat5=numeric(),yhat6=numeric(),
yhat7=numeric(),yhat8=numeric(),yhat9=numeric(),yhat10=numeric(),yhat11=numeric(),yhat12=numeric())
for (s in 1:6000){
x1 = as.numeric(tempvalidationdata[s,1:4])
Xm = cbind(1*diag(20),x1[1]*diag(20),x1[2]*diag(20),x1[3]*diag(20),x1[4]*diag(20))
output1 = Xm %*% w1
x2 = c(rep(0,20))
for(j in 1:20){
x2[j] = leakyrelu(eps,output1[j])
}
Xm = cbind(1*diag(12),x2[1]*diag(12),x2[2]*diag(12),x2[3]*diag(12),x2[4]*diag(12),
x2[5]*diag(12),x2[6]*diag(12),x2[7]*diag(12),x2[8]*diag(12),
x2[9]*diag(12),x2[10]*diag(12),x2[11]*diag(12),x2[12]*diag(12),
x2[13]*diag(12),x2[14]*diag(12),x2[15]*diag(12),x2[16]*diag(12),
x2[17]*diag(12),x2[18]*diag(12),x2[19]*diag(12),x2[20]*diag(12))
output2 = Xm %*% w2
for(j in 1:12){
estimates[s,j] = leakyrelu(eps,output2[j])
}
}
tempvalidationdata=cbind(tempvalidationdata,estimates)
correctclass = data.frame(correct = numeric(), error = numeric())
for(s in 1:6000){
y = as.numeric(tempvalidationdata[s,17:28])
x = as.numeric(tempvalidationdata[s,5:16])
temp = rep(0,12)
temp[which.max(y)] = 1
correctclass[s,1] = t(x) %*% temp
}
tempvalidationdata=cbind(tempvalidationdata,estimates,correctclass)
acc = c(acc,sum(correctclass$correct))
plot(acc/6000,type = "l", col = "blue", xlab = "1000 Steps", ylab = "Accuracy")
}
}
}
| /Code/BootStrap3StepConstantGain.R | no_license | StephenStyles/StochasticApproximation | R | false | false | 12,043 | r | #########################################################################################################
# Initial Setup of working directory and functions used to calculate scores
#########################################################################################################
setwd("C:/Users/sjsty/Desktop/Masters/Algorithms/Simplified Problem")
leakyrelu <- function(eps,x){
if(x<0){
return(eps*x)
} else{
return(x)
}
}
score <- function(x){
rank = NULL
for(i in 1:length(x)){
rank[i] = exp(-x)
}
return(rank)
}
update <- function(A,b,w,i,n,chi){
wnew = w + (1/((n+i)^chi) * (b-A%*%w))
return(wnew)
}
mse <- function(x,y){
temp = NULL
for(i in 1:length(x)){
temp[i] = (x[i]-y[i])^2
}
return(mean(temp))
}
#########################################################################################################
# Initial Data to estimate and validation set
#########################################################################################################
SAdata = read.table("12outputtraindata.txt", sep = "\t", header= TRUE)
row.names(SAdata) = NULL
#Setting the amount of data that will be in the sampling set and the amount that will be in the updating set
smp_size = floor(0.75 * nrow(SAdata))
#This section just creates datasets for the X's and y's
train_ind = sample(seq_len(nrow(SAdata)), size = smp_size)
traindata = SAdata[train_ind,1:4]
trainlabels = SAdata[train_ind,5:16]
row.names(traindata) = NULL
row.names(trainlabels) = NULL
testdata = SAdata[-train_ind,1:4]
testlabels = SAdata[-train_ind,5:16]
row.names(testdata) = NULL
row.names(testlabels) = NULL
validationdata = read.table("12outputNNdata.txt", sep = "\t", header= TRUE)
#########################################################################################################
# Initial Weights and Biases
#########################################################################################################
#These weights are generated from the sci-kit learn package in python
bias1 = t(read.table("final12firstlayerint.csv",sep = ","))
weights1 = read.table("final12firstlayercoef.csv",sep = ",")
w1df = rbind(bias1, weights1)
w1 = as.numeric(c(w1df[1,],w1df[2,],w1df[3,],w1df[4,],w1df[5,]))
bias2 = t(read.table("final12secondlayerint.csv",sep = ","))
weights2 = read.table("final12secondlayercoef.csv",sep = ",")
w2df = rbind(bias2, weights2)
w2 = as.numeric(c(w2df[1,],w2df[2,],w2df[3,],w2df[4,],w2df[5,],
w2df[6,],w2df[7,],w2df[8,],w2df[9,],w2df[10,],
w2df[11,],w2df[12,],w2df[13,],w2df[14,],w2df[15,],
w2df[16,],w2df[17,],w2df[18,],w2df[19,],w2df[20,],
w2df[21,]))
#Reseting the accuracy so that we can track it throughout the algorithm
acc = NULL
#########################################################################################################
# Bootstrapped Algorithm
#########################################################################################################
#The slope of the x<0 section of the leaky RELU
eps=0.0001
#These are the sizes of the sampling set and the updating set
#Size of sampling set:
m=30000
#Size of updating set:
n=10000
#Setting the iteration number
it=1
for(k in 1:20){
#These values move along the data sets so that we see new observations throughout the process
a = 1 + m*(k-1)
b = m*k
c = 1 + n*(k-1)
d = n*k
data1 = traindata[a:b,]
data2 = testdata[c:d,]
data1labels = trainlabels[a:b,]
data2labels = testlabels[c:d,]
#Tracking the observations and all their hidden layer values during the feed forward process.
#This just saves us from having to calculate any inverses
inputs1 = data.frame(x1_1=numeric(),x1_2=numeric(),x1_3=numeric(),x1_4=numeric())
outputs1 = data.frame(h1=numeric(),h2=numeric(),h3=numeric(),h4=numeric(),h5=numeric(),
h6=numeric(),h7=numeric(),h8=numeric(),h9=numeric(),h10=numeric(),
h11=numeric(),h12=numeric(),h13=numeric(),h14=numeric(),h15=numeric(),
h16=numeric(),h17=numeric(),h18=numeric(),h19=numeric(),h20=numeric())
inputs2 = data.frame(x2_1=numeric(),x2_2=numeric(),x2_3=numeric(),x2_4=numeric(),x2_5=numeric(),
x2_6=numeric(),x2_7=numeric(),x2_8=numeric(),x2_9=numeric(),x2_10=numeric(),
x2_11=numeric(),x2_12=numeric(),x2_13=numeric(),x2_14=numeric(),x2_15=numeric(),
x2_16=numeric(),x2_17=numeric(),x2_18=numeric(),x2_19=numeric(),x2_20=numeric())
outputs2 = data.frame(o1=numeric(),o2=numeric(),o3=numeric(),o4=numeric(),o5=numeric(),o6=numeric(),
o7=numeric(),o8=numeric(),o9=numeric(),o10=numeric(),o11=numeric(),o12=numeric())
finalest = data.frame(yhat1=numeric(),yhat2=numeric(),yhat3=numeric(),yhat4=numeric(),yhat5=numeric(),yhat6=numeric(),
yhat7=numeric(),yhat8=numeric(),yhat9=numeric(),yhat10=numeric(),yhat11=numeric(),yhat12=numeric())
trueval = data.frame(y1=numeric(),y2=numeric(),y3=numeric(),y4=numeric(),y5=numeric(),y6=numeric(),
y7=numeric(),y8=numeric(),y9=numeric(),y10=numeric(),y11=numeric(),y12=numeric())
for(i in 1:m){
inputs1[i,] = data1[i,1:4]
x1 = as.numeric(inputs1[i,1:4])
Xm = cbind(1*diag(20),x1[1]*diag(20),x1[2]*diag(20),x1[3]*diag(20),x1[4]*diag(20))
outputs1[i,] = Xm %*% w1
for(j in 1:20){
inputs2[i,j] = leakyrelu(eps,outputs1[i,j])
}
x2 = as.numeric(inputs2[i,1:20])
Xm = cbind(1*diag(12),x2[1]*diag(12),x2[2]*diag(12),x2[3]*diag(12),x2[4]*diag(12),
x2[5]*diag(12),x2[6]*diag(12),x2[7]*diag(12),x2[8]*diag(12),
x2[9]*diag(12),x2[10]*diag(12),x2[11]*diag(12),x2[12]*diag(12),
x2[13]*diag(12),x2[14]*diag(12),x2[15]*diag(12),x2[16]*diag(12),
x2[17]*diag(12),x2[18]*diag(12),x2[19]*diag(12),x2[20]*diag(12))
outputs2[i,] = Xm %*% w2
for(j in 1:12){
finalest[i,j] = leakyrelu(eps,outputs2[i,j])
}
trueval[i,] = data1labels[i,1:12]
}
approx = cbind(inputs1,outputs1,inputs2,outputs2,finalest,trueval)
#Now that we have all of our estimates, we can score them using our score function. This will allow us to
#create a probability distribution over the observations
error = data.frame(err = numeric())
for(i in 1:m){
x = as.numeric(approx[i,57:68])
x = x/sum(x)
y = as.numeric(approx[i,69:80])
error[i,1] = mse(x,y)
}
approx = cbind(approx,error)
#Now we throw away all the incorrect observations
correctclass = data.frame(correct = numeric())
for(i in 1:m){
y = as.numeric(approx[i,57:68])
x = as.numeric(approx[i,69:80])
if(max(y)!=0){
for( j in 1:12){
if(y[j] == max(y)){
y[j]=1
} else{
y[j]=0
}
}
}
correctclass[i,1] = t(x) %*% y
}
approx = cbind(approx,correctclass)
truevalues = approx[which(approx$correct == 1),]
truey1 = truevalues[which(truevalues$y1 == 1),];truey2 = truevalues[which(truevalues$y2 == 1),]
truey3 = truevalues[which(truevalues$y3 == 1),];truey4 = truevalues[which(truevalues$y4 == 1),]
truey5 = truevalues[which(truevalues$y5 == 1),];truey6 = truevalues[which(truevalues$y6 == 1),]
truey7 = truevalues[which(truevalues$y7 == 1),];truey8 = truevalues[which(truevalues$y8 == 1),]
truey9 = truevalues[which(truevalues$y9 == 1),];truey10 = truevalues[which(truevalues$y10 == 1),]
truey11 = truevalues[which(truevalues$y11 == 1),];truey12 = truevalues[which(truevalues$y12 == 1),]
#We create probability distributions over all the 12 output values
sy1 = sapply(truey1[,81],score);sy1 = sy1/sum(sy1)
sy2 = sapply(truey2[,81],score);sy2 = sy2/sum(sy2)
sy3 = sapply(truey3[,81],score);sy3 = sy3/sum(sy3)
sy4 = sapply(truey4[,81],score);sy4 = sy4/sum(sy4)
sy5 = sapply(truey5[,81],score);sy5 = sy5/sum(sy5)
sy6 = sapply(truey6[,81],score);sy6 = sy6/sum(sy6)
sy7 = sapply(truey7[,81],score);sy7 = sy7/sum(sy7)
sy8 = sapply(truey8[,81],score);sy8 = sy8/sum(sy8)
sy9 = sapply(truey9[,81],score);sy9 = sy9/sum(sy9)
sy10 = sapply(truey10[,81],score);sy10 = sy10/sum(sy10)
sy11 = sapply(truey11[,81],score);sy11 = sy11/sum(sy11)
sy12 = sapply(truey12[,81],score);sy12 = sy12/sum(sy12)
for(i in 1:n){
x = as.numeric(data2[i,1:4])
data = NULL
y2 = 10*as.numeric(data2labels[i,1:12])
if(y2[1] == 10){
data = truey1;sc = sy1
} else if(y2[2] == 10){
data = truey2;sc = sy2
} else if(y2[3] == 10){
data = truey3;sc = sy3
} else if(y2[4] == 10){
data = truey4;sc = sy4
} else if(y2[5] == 10){
data = truey5;sc = sy5
} else if(y2[6] == 10){
data = truey6;sc = sy6
} else if(y2[7] == 10){
data = truey7;sc = sy7
} else if(y2[8] == 10){
data = truey8;sc = sy8
} else if(y2[9] == 10){
data = truey9;sc = sy9
} else if(y2[10] == 10){
data = truey10;sc = sy10
} else if(y2[11] == 10){
data = truey11;sc = sy11
} else if(y2[12] == 10){
data = truey12;sc = sy12
}
q = sample(1:dim(data)[1],size=1,prob=sc)
X1 = cbind(1*diag(20),x[1]*diag(20),x[2]*diag(20),x[3]*diag(20),x[4]*diag(20))
y1 = as.numeric(data[q,5:24])
x2 = as.numeric(data[q,25:44])
X2 = cbind(1*diag(12),x2[1]*diag(12),x2[2]*diag(12),x2[3]*diag(12),x2[4]*diag(12),
x2[5]*diag(12),x2[6]*diag(12),x2[7]*diag(12),x2[8]*diag(12),
x2[9]*diag(12),x2[10]*diag(12),x2[11]*diag(12),x2[12]*diag(12),
x2[13]*diag(12),x2[14]*diag(12),x2[15]*diag(12),x2[16]*diag(12),
x2[17]*diag(12),x2[18]*diag(12),x2[19]*diag(12),x2[20]*diag(12))
if(it==1){
A1check = t(X1)%*%X1
A2check = t(X2)%*%X2
B1check = t(X1)%*%y1
B2check = t(X2)%*%y2
}
A1 = t(X1)%*%X1
A2 = t(X2)%*%X2
B1 = t(X1)%*%y1
B2 = t(X2)%*%y2
A1check = A1check +(A1 -A1check)/(it+1)
B1check = B1check +(B1 -B1check)/(it+1)
A2check = A2check +(A2 -A2check)/(it+1)
B2check = B2check +(B2 -B2check)/(it+1)
w1 = w1 + (1/4)*(B1check-A1check%*%w1)
w2 = w2 + (1/400)*(B2check-A2check%*%w2)
it=it+1
if(i%%1000==0){
tempvalidationdata = validationdata
estimates = data.frame(yhat1=numeric(),yhat2=numeric(),yhat3=numeric(),yhat4=numeric(),yhat5=numeric(),yhat6=numeric(),
yhat7=numeric(),yhat8=numeric(),yhat9=numeric(),yhat10=numeric(),yhat11=numeric(),yhat12=numeric())
for (s in 1:6000){
x1 = as.numeric(tempvalidationdata[s,1:4])
Xm = cbind(1*diag(20),x1[1]*diag(20),x1[2]*diag(20),x1[3]*diag(20),x1[4]*diag(20))
output1 = Xm %*% w1
x2 = c(rep(0,20))
for(j in 1:20){
x2[j] = leakyrelu(eps,output1[j])
}
Xm = cbind(1*diag(12),x2[1]*diag(12),x2[2]*diag(12),x2[3]*diag(12),x2[4]*diag(12),
x2[5]*diag(12),x2[6]*diag(12),x2[7]*diag(12),x2[8]*diag(12),
x2[9]*diag(12),x2[10]*diag(12),x2[11]*diag(12),x2[12]*diag(12),
x2[13]*diag(12),x2[14]*diag(12),x2[15]*diag(12),x2[16]*diag(12),
x2[17]*diag(12),x2[18]*diag(12),x2[19]*diag(12),x2[20]*diag(12))
output2 = Xm %*% w2
for(j in 1:12){
estimates[s,j] = leakyrelu(eps,output2[j])
}
}
tempvalidationdata=cbind(tempvalidationdata,estimates)
correctclass = data.frame(correct = numeric(), error = numeric())
for(s in 1:6000){
y = as.numeric(tempvalidationdata[s,17:28])
x = as.numeric(tempvalidationdata[s,5:16])
temp = rep(0,12)
temp[which.max(y)] = 1
correctclass[s,1] = t(x) %*% temp
}
tempvalidationdata=cbind(tempvalidationdata,estimates,correctclass)
acc = c(acc,sum(correctclass$correct))
plot(acc/6000,type = "l", col = "blue", xlab = "1000 Steps", ylab = "Accuracy")
}
}
}
|
#' Imbens-Kalyanaraman 2012 Optimal Bandwidth Calculation
#'
#' \code{bw_ik12} calculates the Imbens-Kalyanaraman (2012) optimal bandwidth
#' for local linear regression in regression discontinuity designs.
#' It is based on a function in the now archived rddtools package.
#' This is an internal function and is typically not directly invoked by the user.
#' It can be accessed using the triple colon, as in rddapp:::bw_ik12().
#'
#' @param X A numerical vector which is the running variable.
#' @param Y A numerical vector which is the outcome variable.
#' @param cutpoint The cutpoint.
#' @param verbose Logical flag indicating whether to print more information to the terminal.
#' Default is \code{FALSE}.
#' @param kernel String indicating which kernel to use. Options are \code{"triangular"}
#' (default and recommended), \code{"rectangular"}, \code{"epanechnikov"}, \code{"quartic"},
#' \code{"triweight"}, \code{"tricube"}, and \code{"cosine"}.
#'
#' @return The optimal bandwidth.
#'
#' @references Imbens, G., Kalyanaraman, K. (2012).
#' Optimal bandwidth choice for the regression discontinuity estimator.
#' The Review of Economic Studies, 79(3), 933-959.
#' \url{https://academic.oup.com/restud/article/79/3/933/1533189}.
#'
#' @importFrom stats var
bw_ik12 <- function(X, Y, cutpoint = NULL, verbose = FALSE, kernel = "triangular") {
# type <- match.arg(type)
# kernel <- match.arg(kernel)
sub <- complete.cases(X) & complete.cases(Y)
X <- X[sub]
Y <- Y[sub]
N <- length(X)
N_left <- sum(X < cutpoint, na.rm = TRUE)
N_right <- sum(X >= cutpoint, na.rm = TRUE)
if (N != length(Y))
stop("Running and outcome variable must be of equal length.")
if (is.null(cutpoint)) {
cutpoint <- 0
if (verbose)
cat("Using default cutpoint of zero.\n")
} else {
if (!(typeof(cutpoint) %in% c("integer", "double")))
stop("Cutpoint must be of a numeric type.")
}
########## STEP 1
## Silverman bandwidth
h1 <- 1.84 * sd(X) * N^(-1/5)
if (verbose)
cat("\n-h1:", h1)
## f(cut)
isIn_h1_left <- X >= (cutpoint - h1) & X < cutpoint
isIn_h1_right <- X >= cutpoint & X <= (cutpoint + h1)
NisIn_h1_left <- sum(isIn_h1_left, na.rm = TRUE)
NisIn_h1_right <- sum(isIn_h1_right, na.rm = TRUE)
if (verbose)
cat("\n-N left/right:", NisIn_h1_left, NisIn_h1_right)
f_cut <- (NisIn_h1_left + NisIn_h1_right) / (2 * N * h1)
if (verbose)
cat("\n-f(cutpoint):", f_cut)
## Variances : Equ (13)
var_inh_left <- var(Y[isIn_h1_left], na.rm = TRUE)
var_inh_right <- var(Y[isIn_h1_right], na.rm = TRUE)
if (verbose) {
cat("\n-Sigma^2 left:", var_inh_left, "\n-Sigma^2 right:", var_inh_right)
}
########## STEP 2
## Global function of order 3: Equ (14)
reg <- lm(Y ~ I(X >= cutpoint) + I(X - cutpoint) + I((X - cutpoint)^2) + I((X - cutpoint)^3))
m3 <- 6 * coef(reg)[5]
if (verbose)
cat("\n-m3:", m3)
## left and right bandwidths: Equ (15)
Ck_h2 <- 3.5567 # 7200^(1/7)
h2_left <- Ck_h2 * (var_inh_left / (f_cut * m3^2))^(1/7) * N_left^(-1/7)
h2_right <- Ck_h2 * (var_inh_right / (f_cut * m3^2))^(1/7) * N_right^(-1/7)
if (verbose)
cat("\n-h2 left:", h2_left, "\n-h2 right:", h2_right)
## second derivatives right/left
isIn_h2_left <- X >= (cutpoint - h2_left) & X < cutpoint
isIn_h2_right <- X >= cutpoint & X <= (cutpoint + h2_right)
N_h2_left <- sum(isIn_h2_left, na.rm = TRUE)
N_h2_right <- sum(isIn_h2_right, na.rm = TRUE)
if (N_h2_left == 0 | N_h2_right == 0)
stop("Insufficient data in vicinity of the cutpoint to calculate bandwidth.")
reg2_left <- lm(Y ~ I(X - cutpoint) + I((X - cutpoint)^2), subset = isIn_h2_left)
reg2_right <- lm(Y ~ I(X - cutpoint) + I((X - cutpoint)^2), subset = isIn_h2_right)
m2_left <- as.numeric(2 * coef(reg2_left)[3])
m2_right <- as.numeric(2 * coef(reg2_right)[3])
if (verbose)
cat("\n-m2 left:", m2_left, "\n-m2 right:", m2_right)
########## STEP 3
## Regularization: Equ (16)
r_left <- (2160 * var_inh_left) / (N_h2_left * h2_left^4)
r_right <- (2160 * var_inh_right) / (N_h2_right * h2_right^4)
if (verbose)
cat("\n-Reg left:", r_left, "\n-Reg right:", r_right)
# Which kernel are we using?
# Method for finding these available in I--K p. 6
if (kernel == "triangular") {
ck <- 3.43754
} else if (kernel == "rectangular") {
ck <- 2.70192
} else if (kernel == "epanechnikov") {
ck <- 3.1999
} else if (kernel == "quartic" | kernel == "biweight") {
ck <- 3.65362
} else if (kernel == "triweight") {
ck <- 4.06065
} else if (kernel == "tricube") {
ck <- 3.68765
# } else if (kernel == "gaussian") {
# ck <- 1.25864
} else if (kernel == "cosine") {
ck <- 3.25869
} else {
stop("Unrecognized kernel.")
}
## Final bandwidth: Equ (17)
optbw <- ck * ((var_inh_left + var_inh_right) /
(f_cut * ((m2_right - m2_left)^2 + r_left + r_right)))^(1/5) * N^(-1/5)
left <- (X >= (cutpoint - optbw)) & (X < cutpoint)
right <- (X >= cutpoint) & (X <= (cutpoint + optbw))
if (sum(left) == 0 | sum(right) == 0)
stop("Insufficient data in the calculated bandwidth.")
names(optbw) <- NULL
if (verbose)
cat("Imbens-Kalyanamaran Optimal Bandwidth: ", sprintf("%.3f", optbw), "\n")
return(optbw)
}
| /R/bw_ik12.R | no_license | kimberlywebb/rddapp | R | false | false | 5,366 | r | #' Imbens-Kalyanaraman 2012 Optimal Bandwidth Calculation
#'
#' \code{bw_ik12} calculates the Imbens-Kalyanaraman (2012) optimal bandwidth
#' for local linear regression in regression discontinuity designs.
#' It is based on a function in the now archived rddtools package.
#' This is an internal function and is typically not directly invoked by the user.
#' It can be accessed using the triple colon, as in rddapp:::bw_ik12().
#'
#' @param X A numerical vector which is the running variable.
#' @param Y A numerical vector which is the outcome variable.
#' @param cutpoint The cutpoint.
#' @param verbose Logical flag indicating whether to print more information to the terminal.
#' Default is \code{FALSE}.
#' @param kernel String indicating which kernel to use. Options are \code{"triangular"}
#' (default and recommended), \code{"rectangular"}, \code{"epanechnikov"}, \code{"quartic"},
#' \code{"triweight"}, \code{"tricube"}, and \code{"cosine"}.
#'
#' @return The optimal bandwidth.
#'
#' @references Imbens, G., Kalyanaraman, K. (2012).
#' Optimal bandwidth choice for the regression discontinuity estimator.
#' The Review of Economic Studies, 79(3), 933-959.
#' \url{https://academic.oup.com/restud/article/79/3/933/1533189}.
#'
#' @importFrom stats var
bw_ik12 <- function(X, Y, cutpoint = NULL, verbose = FALSE, kernel = "triangular") {
# type <- match.arg(type)
# kernel <- match.arg(kernel)
sub <- complete.cases(X) & complete.cases(Y)
X <- X[sub]
Y <- Y[sub]
N <- length(X)
N_left <- sum(X < cutpoint, na.rm = TRUE)
N_right <- sum(X >= cutpoint, na.rm = TRUE)
if (N != length(Y))
stop("Running and outcome variable must be of equal length.")
if (is.null(cutpoint)) {
cutpoint <- 0
if (verbose)
cat("Using default cutpoint of zero.\n")
} else {
if (!(typeof(cutpoint) %in% c("integer", "double")))
stop("Cutpoint must be of a numeric type.")
}
########## STEP 1
## Silverman bandwidth
h1 <- 1.84 * sd(X) * N^(-1/5)
if (verbose)
cat("\n-h1:", h1)
## f(cut)
isIn_h1_left <- X >= (cutpoint - h1) & X < cutpoint
isIn_h1_right <- X >= cutpoint & X <= (cutpoint + h1)
NisIn_h1_left <- sum(isIn_h1_left, na.rm = TRUE)
NisIn_h1_right <- sum(isIn_h1_right, na.rm = TRUE)
if (verbose)
cat("\n-N left/right:", NisIn_h1_left, NisIn_h1_right)
f_cut <- (NisIn_h1_left + NisIn_h1_right) / (2 * N * h1)
if (verbose)
cat("\n-f(cutpoint):", f_cut)
## Variances : Equ (13)
var_inh_left <- var(Y[isIn_h1_left], na.rm = TRUE)
var_inh_right <- var(Y[isIn_h1_right], na.rm = TRUE)
if (verbose) {
cat("\n-Sigma^2 left:", var_inh_left, "\n-Sigma^2 right:", var_inh_right)
}
########## STEP 2
## Global function of order 3: Equ (14)
reg <- lm(Y ~ I(X >= cutpoint) + I(X - cutpoint) + I((X - cutpoint)^2) + I((X - cutpoint)^3))
m3 <- 6 * coef(reg)[5]
if (verbose)
cat("\n-m3:", m3)
## left and right bandwidths: Equ (15)
Ck_h2 <- 3.5567 # 7200^(1/7)
h2_left <- Ck_h2 * (var_inh_left / (f_cut * m3^2))^(1/7) * N_left^(-1/7)
h2_right <- Ck_h2 * (var_inh_right / (f_cut * m3^2))^(1/7) * N_right^(-1/7)
if (verbose)
cat("\n-h2 left:", h2_left, "\n-h2 right:", h2_right)
## second derivatives right/left
isIn_h2_left <- X >= (cutpoint - h2_left) & X < cutpoint
isIn_h2_right <- X >= cutpoint & X <= (cutpoint + h2_right)
N_h2_left <- sum(isIn_h2_left, na.rm = TRUE)
N_h2_right <- sum(isIn_h2_right, na.rm = TRUE)
if (N_h2_left == 0 | N_h2_right == 0)
stop("Insufficient data in vicinity of the cutpoint to calculate bandwidth.")
reg2_left <- lm(Y ~ I(X - cutpoint) + I((X - cutpoint)^2), subset = isIn_h2_left)
reg2_right <- lm(Y ~ I(X - cutpoint) + I((X - cutpoint)^2), subset = isIn_h2_right)
m2_left <- as.numeric(2 * coef(reg2_left)[3])
m2_right <- as.numeric(2 * coef(reg2_right)[3])
if (verbose)
cat("\n-m2 left:", m2_left, "\n-m2 right:", m2_right)
########## STEP 3
## Regularization: Equ (16)
r_left <- (2160 * var_inh_left) / (N_h2_left * h2_left^4)
r_right <- (2160 * var_inh_right) / (N_h2_right * h2_right^4)
if (verbose)
cat("\n-Reg left:", r_left, "\n-Reg right:", r_right)
# Which kernel are we using?
# Method for finding these available in I--K p. 6
if (kernel == "triangular") {
ck <- 3.43754
} else if (kernel == "rectangular") {
ck <- 2.70192
} else if (kernel == "epanechnikov") {
ck <- 3.1999
} else if (kernel == "quartic" | kernel == "biweight") {
ck <- 3.65362
} else if (kernel == "triweight") {
ck <- 4.06065
} else if (kernel == "tricube") {
ck <- 3.68765
# } else if (kernel == "gaussian") {
# ck <- 1.25864
} else if (kernel == "cosine") {
ck <- 3.25869
} else {
stop("Unrecognized kernel.")
}
## Final bandwidth: Equ (17)
optbw <- ck * ((var_inh_left + var_inh_right) /
(f_cut * ((m2_right - m2_left)^2 + r_left + r_right)))^(1/5) * N^(-1/5)
left <- (X >= (cutpoint - optbw)) & (X < cutpoint)
right <- (X >= cutpoint) & (X <= (cutpoint + optbw))
if (sum(left) == 0 | sum(right) == 0)
stop("Insufficient data in the calculated bandwidth.")
names(optbw) <- NULL
if (verbose)
cat("Imbens-Kalyanamaran Optimal Bandwidth: ", sprintf("%.3f", optbw), "\n")
return(optbw)
}
|
library(ggthemes)
### Name: ptol_pal
### Title: Color Palettes from Paul Tol's "Colour Schemes"
### Aliases: ptol_pal
### ** Examples
library("scales")
show_col(ptol_pal()(6))
show_col(ptol_pal()(4))
show_col(ptol_pal()(12))
| /data/genthat_extracted_code/ggthemes/examples/ptol_pal.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 233 | r | library(ggthemes)
### Name: ptol_pal
### Title: Color Palettes from Paul Tol's "Colour Schemes"
### Aliases: ptol_pal
### ** Examples
library("scales")
show_col(ptol_pal()(6))
show_col(ptol_pal()(4))
show_col(ptol_pal()(12))
|
#' S3 plotting method for diffnet objects.
#'
#' @param x An object of class \code{\link[=diffnet-class]{diffnet}}
#' @param t Integer scalar indicating the time slice to plot.
#' @param vertex.color Character scalar/vector. Color of the vertices.
#' @template plotting_template
#' @param main Character. A title template to be passed to sprintf.
#' @param ... Further arguments passed to \code{\link[igraph:plot.igraph]{plot.igraph}}.
#' @param y Ignored.
#' @export
#'
#' @family diffnet methods
#'
#' @return A matrix with the coordinates of the vertices.
#' @author George G. Vega Yon
#' @examples
#'
#' data(medInnovationsDiffNet)
#' plot(medInnovationsDiffNet)
#'
#'
plot.diffnet <- function(
x,y=NULL, t=1,
vertex.color = c(adopt="steelblue", noadopt="white"),
vertex.size = "degree",
main = "Diffusion network in time %d",
minmax.relative.size = getOption("diffnet.minmax.relative.size", c(0.01, 0.04)),
...) {
# Listing arguments
igraph.args <- list(...)
# Checking that the time period is actually within
if (!(t %in% 1:x$meta$nper))
stop("-t- must be an integer within 1 and ",x$meta$nper,".")
# Extracting the graph to be plotted
graph <- diffnet_to_igraph(x)[[t]]
# Setting the colors
cols <- with(x, ifelse(cumadopt[,t], vertex.color[1], vertex.color[2]))
set_igraph_plotting_defaults("igraph.args")
if (!length(igraph.args$layout))
igraph.args$layout <- igraph::layout_nicely(graph)
igraph.args$vertex.color <- cols
graphics::plot.new()
graphics::plot.window(
xlim = c(-1,1),
ylim = c(-1,1)
)
igraph.args$vertex.size <-
rescale_vertex_igraph(
compute_vertex_size(x$graph[[t]], vertex.size),
minmax.relative.size = minmax.relative.size
)
do.call(igraph::plot.igraph, c(
list(
x = graph
), igraph.args))
if (length(main))
graphics::title(main = sprintf(main, x$meta$pers[t]))
invisible(igraph.args$layout)
}
#' @export
#' @rdname diffnet-class
print.diffnet <- function(x, ...) {
with(x, {
# Getting attrs
vsa <- paste0(colnames(vertex.static.attrs), collapse=", ")
if (nchar(vsa) > 50) vsa <- paste0(strtrim(vsa, 50),"...")
else if (!nchar(vsa)) vsa <- '-'
nsa <-ncol(vertex.static.attrs)
if (nsa) vsa <- paste0(vsa," (",nsa, ")")
vda <- paste0(colnames(vertex.dyn.attrs[[1]]), collapse=", ")
if (nchar(vda) > 50) vda <- paste0(strtrim(vda, 50),"...")
else if (!nchar(vda)) vda <- '-'
nda <- ncol(vertex.dyn.attrs[[1]])
if (nda) vda <- paste0(vda," (",nda, ")")
# Getting nodes labels
nodesl <- paste0(meta$n," (",
paste(head(meta$ids, 8), collapse=", "),
ifelse(meta$n>8, ", ...", "") ,")")
cat(
"Dynamic network of class -diffnet-",
paste(" Name :", meta$name),
paste(" Behavior :", meta$behavior),
paste(" # of nodes :", nodesl ),
paste(" # of time periods :", meta$nper, sprintf("(%d - %d)", meta$pers[1], meta$pers[meta$nper])),
paste(" Type :", ifelse(meta$undirected, "undirected", "directed")),
paste(" Final prevalence :",
formatC(sum(cumadopt[,meta$nper])/meta$n, digits = 2, format="f")
),
paste(" Static attributes :", vsa),
paste(" Dynamic attributes :", vda),
sep="\n"
)
})
invisible(x)
}
#' Summary of diffnet objects
#'
#' @export
#' @param object An object of class \code{\link[=as_diffnet]{diffnet}}.
#' @param slices Either an integer or character vector. While integer vectors are used as
#' indexes, character vectors are used jointly with the time period labels.
#' @param valued Logical scalar. When \code{TRUE} weights will be considered.
#' Otherwise non-zero values will be replaced by ones.
#' @param no.print Logical scalar. When TRUE suppress screen messages.
#' @param skip.moran Logical scalar. When TRUE Moran's I is not reported (see details).
#' @param ... Further arguments to be passed to \code{\link{approx_geodesic}}.
#' @details
#' Moran's I is calculated over the
#' cumulative adoption matrix using as weighting matrix the inverse of the geodesic
#' distance matrix. All this via \code{\link{moran}}. For each time period \code{t},
#' this is calculated as:
#'
#' \preformatted{
#' m = moran(C[,t], G^(-1))
#' }
#'
#' Where \code{C[,t]} is the t-th column of the cumulative adoption matrix,
#' \code{G^(-1)} is the element-wise inverse of the geodesic matrix at time \code{t},
#' and \code{moran} is \pkg{netdiffuseR}'s moran's I routine. When \code{skip.moran=TRUE}
#' Moran's I is not reported. This can be useful for both: reducing computing
#' time and saving memory as geodesic distance matrix can become large. Since
#' version \code{1.18.0}, geodesic matrices are approximated using \code{approx_geodesic}
#' which, as a difference from \code{\link[sna:geodist]{geodist}} from the
#' \pkg{sna} package, and \code{\link[igraph:distances]{distances}} from the
#' \pkg{igraph} package returns a matrix of class \code{dgCMatrix} (more
#' details in \code{\link{approx_geodesic}}).
#'
#' @return A data frame with the following columns:
#' \item{adopt}{Integer. Number of adopters at each time point.}
#' \item{cum_adopt}{Integer. Number of cumulative adopters at each time point.}
#' \item{cum_adopt_pcent}{Numeric. Proportion of comulative adopters at each time point.}
#' \item{hazard}{Numeric. Hazard rate at each time point.}
#' \item{density}{Numeric. Density of the network at each time point.}
#' \item{moran_obs}{Numeric. Observed Moran's I.}
#' \item{moran_exp}{Numeric. Expected Moran's I.}
#' \item{moran_sd}{Numeric. Standard error of Moran's I under the null.}
#' \item{moran_pval}{Numeric. P-value for the observed Moran's I.}
#' @author George G. Vega Yon
#'
#' @examples
#' data(medInnovationsDiffNet)
#' summary(medInnovationsDiffNet)
#'
#' @family diffnet methods
#'
summary.diffnet <- function(
object,
slices = NULL,
no.print = FALSE,
skip.moran = FALSE,
valued = getOption("diffnet.valued",FALSE),
...) {
# Subsetting
if (!length(slices)) slices <- 1:object$meta$nper
# If no valued
if (!valued)
for (i in 1:object$meta$nper)
object$graph[[i]]@x <- rep(1, length(object$graph[[i]]@x))
# Checking that the time period is actually within
test <- !(slices %in% 1:object$meta$nper)
if (any(test))
stop("-slices- must be an integer range within 1 and ",object$meta$nper,".")
slices <- sort(slices)
# To make notation nicer
meta <- object$meta
# Computing density
d <- unlist(lapply(object$graph[slices], function(x) {
nlinks(x)/nnodes(x)/(nnodes(x)-1)
# nelements <- length(x@x)
# x <-nelements/(meta$n * (meta$n-1))
}))
# Computing moran's I
if (!skip.moran) {
m <- matrix(NA, nrow=length(slices), ncol=4,
dimnames = list(NULL, c("moran_obs", "moran_exp", "moran_sd", "moran_pval")))
for (i in 1:length(slices)) {
# Computing distances
g <- approx_geodesic(object$graph[[slices[i]]], ...)
# Inverting it (only the diagonal may have 0)
g@x <- 1/g@x
m[i,] <- unlist(moran(object$cumadopt[,slices[i]], g))
}
}
# Computing adopters, cumadopt and hazard rate
ad <- colSums(object$adopt[,slices,drop=FALSE])
ca <- t(cumulative_adopt_count(object$cumadopt))[slices,-3, drop=FALSE]
hr <- t(hazard_rate(object$cumadopt, no.plot = TRUE))[slices,,drop=FALSE]
# Left censoring
lc <- sum(object$toa == meta$pers[1], na.rm = TRUE)
rc <- sum(is.na(object$toa), na.rm=TRUE)
out <- data.frame(
adopt = ad,
cum_adopt = ca[,1],
cum_adopt_pcent = ca[,2],
hazard = hr,
density=d
)
if (!skip.moran) {
out <- cbind(out, m)
}
if (no.print) return(out)
# Function to print data.frames differently
header <- c(" Period "," Adopters "," Cum Adopt. (%) ",
" Hazard Rate "," Density ",
if (!skip.moran) c(" Moran's I (sd) ") else NULL
)
slen <- nchar(header)
hline <- paste(sapply(sapply(slen, rep.int, x="-"), paste0, collapse=""),
collapse=" ")
rule <- paste0(rep("-", sum(slen) + length(slen) - 1), collapse="")
# Quick Formatting function
qf <- function(x, digits=2) sprintf(paste0("%.",digits,"f"), x)
cat("Diffusion network summary statistics\n",
"Name : ", meta$name, "\n",
"Behavior : ", meta$behavior, "\n",
rule,"\n",sep="")
cat(header,"\n")
cat(hline, "\n")
for (i in 1:nrow(out)) {
cat(sprintf(
paste0("%",slen,"s", collapse=" "),
qf(meta$pers[slices[i]],0), qf(out[i,1],0),
sprintf("%s (%s)",
qf(out$cum_adopt[i],0),
qf(out$cum_adopt_pcent[i])
),
ifelse(i==1, "-",qf(out$hazard[i])), qf(out$density[i]),
if (!skip.moran) {
if (is.nan(out$moran_sd[i]))
" - "
else
sprintf("%s (%s) %-3s",
qf(out$moran_obs[i]),
qf(out$moran_sd[i]),
ifelse(out$moran_pval[i] <= .01, "***",
ifelse(out$moran_pval[i] <= .05, "**",
ifelse(out$moran_pval[i] <= .10, "*", ""
)))
)
} else ""
), "\n")
}
# print(out, digits=2)
cat(
rule,
paste(" Left censoring :", sprintf("%3.2f (%d)", lc/meta$n, lc)),
paste(" Right centoring :", sprintf("%3.2f (%d)", rc/meta$n, rc)),
paste(" # of nodes :", sprintf("%d",meta$n)),
"\n Moran's I was computed on contemporaneous autocorrelation using 1/geodesic",
" values. Significane levels *** <= .01, ** <= .05, * <= .1.",
sep="\n"
)
invisible(out)
}
#' Plot the diffusion process
#'
#' Creates a colored network plot showing the structure of the graph through time
#' (one network plot for each time period) and the set of adopter and non-adopters
#' in the network.
#'
#' @templateVar dynamic TRUE
#' @template graph_template
#' @param cumadopt \eqn{n\times T}{n*T} matrix.
#' @param slices Integer vector. Indicates what slices to plot. By default all are plotted.
#' @param vertex.color A character vector of size 3 with colors names.
#' @param vertex.shape A character vector of size 3 with shape names.
#' @template plotting_template
#' @param mfrow.par Vector of size 2 with number of rows and columns to be passed to \code{\link{par}.}
#' @param main Character scalar. A title template to be passed to \code{\link{sprintf}.}
#' @param ... Further arguments to be passed to \code{\link[igraph:plot.igraph]{plot.igraph}}.
#' @param legend.args List of arguments to be passed to \code{\link{legend}}.
#' @param background Either a function to be called before plotting each slice, a color
#' to specify the backgroupd color, or \code{NULL} (in which case nothing is done).
#'
#' @details Plotting is done via the function \code{\link[igraph:plot.igraph]{plot.igraph}}.
#'
#' In order to center the attention on the diffusion process itself, the
#' positions of each vertex are computed only once by aggregating the networks
#' through time, this is, instead of computing the layout for each time \eqn{t},
#' the function creates a new graph accumulating links through time.
#'
#' The \code{mfrow.par} sets how to arrange the plots on the device. If \eqn{T=5}
#' and \code{mfrow.par=c(2,3)}, the first three networks will be in the top
#' of the device and the last two in the bottom.
#'
#' The argument \code{vertex.color} contains the colors of non-adopters, new-adopters,
#' and adopters respectively. The new adopters (default color \code{"tomato"}) have a different
#' color that the adopters when the graph is at their time of adoption, hence,
#' when the graph been plotted is in \eqn{t=2} and \eqn{toa=2} the vertex will
#' be plotted in red.
#'
#' \code{legend.args} has the following default parameter:
#' \tabular{ll}{
#' \code{x} \tab \code{"bottom"} \cr
#' \code{legend} \tab \code{c("Non adopters", "New adopters","Adopters")} \cr
#' \code{pch} \tab \code{sapply(vertex.shape, switch, circle = 21, square = 22, 21)} \cr
#' \code{bty} \tab \code{"n"} \cr
#' \code{horiz} \tab \code{TRUE} \cr
#' }
#'
#'
#' @examples
#' # Generating a random graph
#' set.seed(1234)
#' n <- 6
#' nper <- 5
#' graph <- rgraph_er(n,nper, p=.3, undirected = FALSE)
#' toa <- sample(2000:(2000+nper-1), n, TRUE)
#' adopt <- toa_mat(toa)
#'
#' plot_diffnet(graph, adopt$cumadopt)
#' @return Calculated coordinates for the grouped graph (invisible).
#' @family visualizations
#' @keywords hplot
#' @export
#' @author George G. Vega Yon
plot_diffnet <- function(...) UseMethod("plot_diffnet")
#' @export
#' @rdname plot_diffnet
plot_diffnet.diffnet <- function(
graph, ...
) {
args <- list(...)
do.call(
plot_diffnet.default,
c(
list(graph = as_dgCMatrix(graph), cumadopt = graph$cumadopt),
args
)
)
}
#' @rdname plot_diffnet
#' @export
plot_diffnet.default <- function(
graph, cumadopt,
slices = NULL,
vertex.color = c("white", "tomato", "steelblue"),
vertex.shape = c("square", "circle", "circle"),
vertex.size = "degree",
mfrow.par = NULL,
main = c("Network in period %s", "Diffusion Network"),
legend.args = list(),
minmax.relative.size = getOption("diffnet.minmax.relative.size", c(0.01, 0.04)),
background = NULL,
...) {
set_plotting_defaults("background")
# Setting parameters
oldpar <- graphics::par(no.readonly = TRUE)
on.exit(graphics::par(oldpar))
# Setting legend parameters, if specified
if (length(legend.args) | (!length(legend.args) & is.list(legend.args))) {
if (!length(legend.args$x)) legend.args$x <- "bottom"
if (!length(legend.args$legend))
legend.args$legend <-c("Non adopters", "New adopters","Adopters")
if (!length(legend.args$pch)) {
legend.args$pch <- sapply(vertex.shape, switch, circle = 21, square = 22, 21)
}
if (!length(legend.args$bty)) legend.args$bty <- "n"
if (!length(legend.args$horiz)) legend.args$horiz <-TRUE
}
igraph.args <- list(...)
# Coercing into a dgCMatrix list
graph <- as_dgCMatrix(graph)
if (!is.list(graph))
stopifnot_graph(graph)
# Making sure it has names
add_dimnames.list(graph)
colnames(cumadopt) <- names(graph)
# Checking parameters
t <- nslices(graph)
n <- nrow(graph[[1]])
# Checking slices
if (!length(slices)) {
slices <- names(graph)[unique(floor(seq(1, t, length.out = min(t, 4))))]
} else if (is.numeric(slices)) {
slices <- names(graph)[slices]
}
t <- length(slices)
# Figuring out the dimension
if (!length(mfrow.par)) {
if (t<4) mfrow.par <- c(1,t)
else if (t==4) mfrow.par <- c(2,2)
else if (t==5) mfrow.par <- c(2,3)
else if (t==6) mfrow.par <- c(2,3)
else if (t==7) mfrow.par <- c(2,4)
else if (t==8) mfrow.par <- c(2,4)
else if (t==9) mfrow.par <- c(3,4)
else if (t==10) mfrow.par <- c(3,4)
else if (t==11) mfrow.par <- c(3,4)
else if (t==12) mfrow.par <- c(3,4)
else mfrow.par <- c(ceiling(t/4),4)
}
# Computing legend and main width/height
legend_height_i <- 0
if (length(legend.args) && length(legend.args$legend)) {
legend_height_i <- max(sapply(
legend.args$legend,
graphics::strheight,
units="inches",
cex = if (length(legend.args$cex)) legend.args$cex else NULL
))*2.5
}
main_height_i <- graphics::strheight(
main[2],
units = "inches",
cex = if ("cex.main" %in% igraph.args) igraph.args$main.cex else NULL
)*1.5
graphics::par(
mfrow = mfrow.par, mar = rep(.25,4),
omi = c(legend_height_i, 0, main_height_i, 0),
xpd = NA, xaxs = "i", yaxs="i"
)
# Setting igraph defaults
set_igraph_plotting_defaults("igraph.args")
# 3. Plotting ----------------------------------------------------------------
times <- as.integer(names(graph))
# Set types:
# - 1: Non adopter
# - 2: Adopter in s
# - 3: Adopter prior to s
set_type <- function() {
i <- match(s, colnames(cumadopt))
j <- match(s, slices)
# If we are looking at the first of both
if (i==1 & j ==1)
return(ifelse(!cumadopt[,s], 1L, 2L))
# Otherwise, we look at something more complicated
type <- ifelse(!cumadopt[,s] , 1L, NA)
if (j > 1) {
type <- ifelse(!is.na(type), type,
ifelse(cumadopt[,slices[j-1]], 3L, 2L))
} else if (i > 1) {
type <- ifelse(!is.na(type), type,
ifelse(cumadopt[, i-1], 3L, 2L))
}
type
}
for (s in slices) {
# Colors, new adopters are painted differently
# Setting color and shape depending on the type of vertex these are.
type <- set_type()
cols <- vertex.color[type]
shapes <- vertex.shape[type]
# Creating igraph object
ig <- igraph::graph_from_adjacency_matrix(graph[[s]], weighted = TRUE)
# Computing layout
if (!length(igraph.args$layout)) {
igraph.args$layout <- igraph::layout_nicely(ig)
} else if (length(igraph.args$layout) && is.function(igraph.args$layout)) {
igraph.args$layout <- igraph.args$layout(ig)
}
# Computing subtitle height
graphics::plot.new()
graphics::plot.window(xlim=c(-1.15,1.15), ylim=c(-1.15,1.15))
# Should we paint or do something else?
if (is.function(background)) background()
else if (length(background))
graphics::rect(-1.15,-1.15,1.15,1.15, col=background, border=background)
# Plotting
do.call(
igraph::plot.igraph,
c(
list(
ig,
vertex.color = cols,
vertex.size = rescale_vertex_igraph(
compute_vertex_size(graph, vertex.size, match(s, names(graph))),
minmax.relative.size = minmax.relative.size
),
vertex.shape = shapes
),
igraph.args)
)
# Adding a legend (title)
if (length(main))
subtitle(x = sprintf(main[1], names(graph[s])))
}
# Legend
graphics::par(
mfrow = c(1,1), mai = rep(0,4), new = TRUE, xpd=NA,
omi = c(0, 0, main_height_i, 0)
)
# graphics::par(mfrow=c(1,1), new=TRUE, mar=rep(0,4), oma = rep(0,4), xpd=NA)
graphics::plot.new()
graphics::plot.window(c(0,1), c(0,1))
if (length(main) > 1)
title(main = main[2], outer=TRUE)
if (length(legend.args))
do.call(graphics::legend, c(legend.args, list(pt.bg=vertex.color)))
invisible(igraph.args$layout)
}
#' Threshold levels through time
#'
#' Draws a graph where the coordinates are given by time of adoption, x-axis,
#' and threshold level, y-axis.
#'
#' @templateVar dynamic TRUE
#' @templateVar toa TRUE
#' @templateVar undirected TRUE
#' @template graph_template
#' @param expo \eqn{n\times T}{n * T} matrix. Esposure to the innovation obtained from \code{\link{exposure}}
#' @param t0 Integer scalar. Passed to \code{\link{threshold}}.
#' @param include_censored Logical scalar. Passed to \code{\link{threshold}}.
#' @param attrs Passed to \code{\link{exposure}} (via threshold).
#' @param no.contemporary Logical scalar. When TRUE, edges for vertices with the same
#' \code{toa} won't be plotted.
#' @param main Character scalar. Title of the plot.
#' @param xlab Character scalar. x-axis label.
#' @param ylab Character scalar. y-axis label.
#' @param vertex.size Numeric vector of size \eqn{n}. Relative size of the vertices.
#' @param vertex.color Either a vector of size \eqn{n} or a scalar indicating colors of the vertices.
#' @param vertex.label Character vector of size \eqn{n}. Labels of the vertices.
#' @param vertex.label.pos Integer value to be passed to \code{\link{text}} via \code{pos}.
#' @param vertex.label.cex Either a numeric scalar or vector of size \eqn{n}. Passed to \code{text}.
#' @param vertex.label.adj Passed to \code{\link{text}}.
#' @param vertex.label.color Passed to \code{\link{text}}.
#' @param jitter.amount Numeric vector of size 2 (for x and y) passed to \code{\link{jitter}}.
#' @param jitter.factor Numeric vector of size 2 (for x and y) passed to \code{\link{jitter}}.
#' @param vertex.frame.color Either a vector of size \eqn{n} or a scalar indicating colors of vertices' borders.
#' @param vertex.sides Either a vector of size \eqn{n} or a scalar indicating the
#' number of sides of each vertex (see details).
#' @param vertex.rot Either a vector of size \eqn{n} or a scalar indicating the
#' rotation in radians of each vertex (see details).
#' @param edge.width Numeric. Width of the edges.
#' @param edge.color Character. Color of the edges.
#' @param arrow.width Numeric value to be passed to \code{\link{arrows}}.
#' @param arrow.length Numeric value to be passed to \code{\link{arrows}}.
#' @param arrow.color Color.
#' @param include.grid Logical. When TRUE, the grid of the graph is drawn.
#' @param bty See \code{\link{par}}.
#' @param xlim Passed to \code{\link{plot}}.
#' @param ylim Passed to \code{\link{plot}}.
#' @param ... Additional arguments passed to \code{\link{plot}}.
#' @param edge.curved Logical scalar. When curved, generates curved edges.
#' @param background TBD
#' @family visualizations
#' @seealso Use \code{\link{threshold}} to retrieve the corresponding threshold
#' obtained returned by \code{\link{exposure}}.
#' @keywords hplot
#'
#' @details When \code{vertex.label=NULL} the function uses vertices ids as labels.
#' By default \code{vertex.label=""} plots no labels.
#'
#' Vertices are drawn using an internal function for generating polygons.
#' Polygons are inscribed in a circle of radius \code{vertex.size}, and can be
#' rotated using \code{vertex.rot}. The number of sides of each polygon
#' is set via \code{vertex.sides}.
#'
#' @examples
#'
#' # Generating a random graph
#' set.seed(1234)
#' n <- 6
#' nper <- 5
#' graph <- rgraph_er(n,nper, p=.3, undirected = FALSE)
#' toa <- sample(2000:(2000+nper-1), n, TRUE)
#' adopt <- toa_mat(toa)
#'
#' # Computing exposure
#' expos <- exposure(graph, adopt$cumadopt)
#'
#' plot_threshold(graph, expos, toa)
#'
#' # Calculating degree (for sizing the vertices)
#' plot_threshold(graph, expos, toa, vertex.size = "indegree")
#'
#' @export
#' @author George G. Vega Yon
plot_threshold <- function(graph, expo, ...) UseMethod("plot_threshold")
#' @export
#' @rdname plot_threshold
plot_threshold.diffnet <- function(graph, expo, ...) {
# If graph is diffnet, then we should do something different (because the
# first toa may not be the firts one as toa may be stacked to the right.
# see ?as_diffnet)
# graph$toa <- graph$toa - min(graph$meta$pers) + 1L
if (missing(expo))
expo <- exposure(graph)
args <- list(...)
if (!length(args$undirected)) args$undirected <- graph$meta$undirected
if (!length(args$t0)) args$t0 <- graph$meta$pers[1]
if (length(args$toa)) {
warning("While -graph- has its own toa variable, the user is providing one.")
} else {
args$toa <- graph$toa
}
do.call(plot_threshold.default, c(list(graph = graph$graph, expo=expo), args))
}
#' @export
#' @rdname plot_threshold
plot_threshold.array <- function(graph, expo, ...) {
plot_threshold.default(as_dgCMatrix(graph), expo = expo, ...)
}
#' @export
#' @rdname plot_threshold
plot_threshold.default <- function(
graph,
expo,
toa,
include_censored = FALSE,
t0 = min(toa, na.rm = TRUE),
attrs = NULL,
undirected = getOption("diffnet.undirected"),
no.contemporary = TRUE,
main = "Time of Adoption by\nNetwork Threshold",
xlab = "Time",
ylab = "Threshold",
vertex.size = "degree",
vertex.color = NULL,
vertex.label = "",
vertex.label.pos = NULL,
vertex.label.cex = 1,
vertex.label.adj = c(.5,.5),
vertex.label.color = NULL,
vertex.sides = 40L,
vertex.rot = 0,
edge.width = 2,
edge.color = NULL,
arrow.width = NULL,
arrow.length = NULL,
arrow.color = NULL,
include.grid = FALSE,
vertex.frame.color = NULL,
bty = "n",
jitter.factor = c(1,1),
jitter.amount = c(.25,.025),
xlim = NULL,
ylim = NULL,
edge.curved = NULL,
background = NULL,
...
) {
# Setting default parameters
set_plotting_defaults(c("edge.color", "vertex.frame.color", "vertex.label.color", "edge.curved", "vertex.color", "background", "arrow.color"))
# # Checking out defaults
# if (!length(edge.color)) edge.color <- igraph_plotting_defaults$edge.color
# if (!length(edge.color)) edge.color <- igraph_plotting_defaults$vertex.frame.color
# Checking if exposure was provided
if (missing(expo))
stop("expo should be provided")
# Checking the type of graph
graph <- as_dgCMatrix(graph)
# Step 0: Getting basic info
t <- length(graph)
n <- nrow(graph[[1]])
# Step 1: Creating the cumulative graph
# Matrix::sparseMatrix(i={}, j={}, dims=c(n, n))
cumgraph <- methods::new("dgCMatrix", Dim=c(n,n), p=rep(0L, n+1L))
for(i in 1:t) {
cumgraph <- cumgraph + graph[[i]]
}
# Creating the pos vector
y0 <- threshold(expo, toa, t0, attrs=attrs, include_censored=include_censored)
y <- jitter(y0, factor=jitter.factor[2], amount = jitter.amount[2])
# Jitter to the xaxis and limits
jit <- jitter(toa, factor=jitter.factor[1], amount = jitter.amount[1])
xran <- range(toa, na.rm = TRUE)
if (!length(xlim)) xlim <- xran + c(-1,1)
yran <- c(0,1)
if (!length(ylim)) ylim <- yran + (yran[2] - yran[1])*.1*c(-1,1)
# Step 2: Checking colors and sizes
# Computing sizes
vertex.size <- compute_vertex_size(graph, vertex.size)
# Checking sides
test <- length(vertex.sides)
if (!inherits(vertex.sides, c("integer", "numeric"))) {
stop("-vertex.sides- must be integer.")
} else if (inherits(vertex.sides, "numeric")) {
warning("-vertex.sides- will be coerced to integer.")
vertex.sides <- as.integer(vertex.sides)
}
if (test == 1) {
vertex.sides <- rep(vertex.sides, n)
} else if (test != n) {
stop("-vertex.sides- must be of the same length as nnodes(graph).")
}
# Checking Rotation
test <- length(vertex.rot)
if (!inherits(vertex.rot, "integer") & !inherits(vertex.rot, "numeric")) {
stop("-vertex.rot- must be numeric.")
} else if (test == 1) {
vertex.rot <- rep(vertex.rot, n)
} else if (test != n) {
stop("-vertex.rot- must be of the same length as nnodes(graph).")
}
# Plotting
# oldpar <- par(no.readonly = TRUE)
graphics::plot(NULL, xlim=xlim, ylim=ylim, xlab=xlab, ylab=ylab, main=main,
xaxs="i", yaxs="i",...)
# Should we paint or do something else?
if (is.function(background)) background()
else if (length(background))
graphics::rect(xlim[1], ylim[1], xlim[2], ylim[2], col=background, border=background)
# Checking
if (!length(arrow.width))
arrow.width <- with(graphics::par(), (usr[2] - usr[1])/75)
if (!length(arrow.length))
arrow.length <- with(graphics::par(), (usr[2] - usr[1])/75)
# Should there be a grid??
if (include.grid)
grid()
# Now, for y (it should be different)
xran <- range(xlim, na.rm = TRUE)
yran <- range(ylim, na.rm = TRUE)
# Drawing arrows, first we calculate the coordinates of the edges, for this we
# use the function edges_coords. This considers aspect ratio of the plot.
vertex.size <- igraph_vertex_rescale(vertex.size, adjust=1)
edges <- edges_coords(cumgraph, toa, jit, y, vertex.size, undirected, no.contemporary,
dev=par("pin"), ran=c(xlim[2]-xlim[1], ylim[2]-ylim[1]))
edges <- as.data.frame(edges)
ran <- c(xlim[2]-xlim[1], ylim[2]-ylim[1])
# Plotting the edges
mapply(function(x0, y0, x1, y1, col, edge.curved, arrow.color) {
y <- edges_arrow(x0, y0, x1, y1, width=arrow.width, height=arrow.length,
beta=pi*(2/3), dev=par("pin"), ran=ran, curved = edge.curved)
# Drawing arrow
if (edge.curved) {
# Edge
graphics::xspline(
y$edge[,1],y$edge[,2],
shape = c(0, 1, 0),
open=TRUE, border = col, lwd=edge.width)
# Arrow
graphics::polygon(y$arrow[,1], y$arrow[,2], col = arrow.color, border = arrow.color)
} else {
# Edge
graphics::polygon(y$edge[,1],y$edge[,2], col = col, border = col, lwd=edge.width)
# Arrow
graphics::polygon(y$arrow[,1], y$arrow[,2], col = arrow.color, border = arrow.color)
}
}, x0 = edges[,"x0"], y0 = edges[,"y0"], x1 = edges[,"x1"], y1 = edges[,"y1"],
col = edge.color, edge.curved = edge.curved, arrow.color=arrow.color)
# Drawing the vertices and its labels
# Computing the coordinates
pol <- vertices_coords(jit, y, vertex.size, vertex.sides, vertex.rot, par("pin"), ran)
# Plotting
mapply(function(coords,border,col)
graphics::polygon(coords[,1], coords[,2], border = border, col=col),
coords = pol, border = vertex.frame.color, col=vertex.color)
# Positioning labels can be harsh, so we try with this algorithm
if (!length(vertex.label)) vertex.label <- 1:n
graphics::text(x=jit, y=y, labels = vertex.label,
pos = vertex.label.pos,
cex = vertex.label.cex,
col = vertex.label.color,
adj = vertex.label.adj
)
# par(oldpar)
invisible(data.frame(toa=toa,threshold=y0, jit=jit))
}
#' Plot distribution of infect/suscep
#'
#' After calculating infectiousness and susceptibility of each individual on the
#' network, it creates an \code{nlevels} by \code{nlevels} matrix indicating the
#' number of individuals that lie within each cell, and draws a heatmap.
#'
#' @templateVar dynamic TRUE
#' @templateVar toa TRUE
#' @template graph_template
#' @param t0 Integer scalar. See \code{\link{toa_mat}}.
#' @param normalize Logical scalar. Passed to infection/susceptibility.
#' @param K Integer scalar. Passed to infection/susceptibility.
#' @param r Numeric scalar. Passed to infection/susceptibility.
#' @param expdiscount Logical scalar. Passed to infection/susceptibility.
#' @param bins Integer scalar. Size of the grid (\eqn{n}).
#' @param nlevels Integer scalar. Number of levels to plot (see \code{\link{filled.contour}}).
#' @param h Numeric vector of length 2. Passed to \code{\link[MASS:kde2d]{kde2d}} in the \pkg{MASS} package.
#' @param logscale Logical scalar. When TRUE the axis of the plot will be presented in log-scale.
#' @param main Character scalar. Title of the graph.
#' @param xlab Character scalar. Title of the x-axis.
#' @param ylab Character scalar. Title of the y-axis.
#' @param sub Character scalar. Subtitle of the graph.
#' @param color.palette a color palette function to be used to assign colors in the plot (see \code{\link{filled.contour}}).
#' @param include.grid Logical scalar. When TRUE, the grid of the graph is drawn.
#' @param ... Additional parameters to be passed to \code{\link{filled.contour}.}
#' @param exclude.zeros Logical scalar. When TRUE, observations with zero values
#' @param valued Logical scalar. When FALSE non-zero values in the adjmat are set to one.
#' in infect or suscept are excluded from the graph. This is done explicitly when \code{logscale=TRUE}.
#' @details
#'
#' This plotting function was inspired by Aral, S., & Walker, D. (2012).
#'
#' By default the function will try to apply a kernel smooth function via
#' \code{kde2d}. If not possible (because not enought data points), then
#' the user should try changing the parameter \code{h} or set it equal to zero.
#'
#' \code{toa} is passed to \code{infection/susceptibility}.
#'
#' @return A list with three elements:
#' \item{infect}{A numeric vector of size \eqn{n} with infectiousness levels}
#' \item{suscep}{A numeric vector of size \eqn{n} with susceptibility levels}
#' \item{coords}{A list containing the class marks and counts used to draw the
#' plot via \code{\link{filled.contour}} (see \code{\link{grid_distribution}})}
#' \item{complete}{A logical vector with \code{TRUE} when the case was included in
#' the plot. (this is relevant whenever \code{logscale=TRUE})}
#' @family visualizations
#' @seealso Infectiousness and susceptibility are computed via \code{\link{infection}} and
#' \code{\link{susceptibility}}.
#' @keywords hplot
#' @references
#' Aral, S., & Walker, D. (2012). "Identifying Influential and Susceptible Members
#' of Social Networks". Science, 337(6092), 337โ341.
#' \url{http://doi.org/10.1126/science.1215842}
#' @export
#' @examples
#' # Generating a random graph -------------------------------------------------
#' set.seed(1234)
#' n <- 100
#' nper <- 20
#' graph <- rgraph_er(n,nper, p=.2, undirected = FALSE)
#' toa <- sample(1:(1+nper-1), n, TRUE)
#'
#' # Visualizing distribution of suscep/infect
#' out <- plot_infectsuscep(graph, toa, K=3, logscale = FALSE)
#' @author George G. Vega Yon
plot_infectsuscep <- function(
graph,
toa,
t0 = NULL,
normalize = TRUE,
K = 1L,
r = 0.5,
expdiscount = FALSE,
bins = 20,
nlevels = round(bins/2),
h = NULL,
logscale = TRUE,
main = "Distribution of Infectiousness and\nSusceptibility",
xlab = "Infectiousness of ego",
ylab = "Susceptibility of ego",
sub = ifelse(logscale, "(in log-scale)", NA),
color.palette = function(n) viridisLite::viridis(n),
include.grid = TRUE,
exclude.zeros = FALSE,
valued = getOption("diffnet.valued",FALSE),
...
) {
# Checking the times argument
if (missing(toa))
if (!inherits(graph, "diffnet")) {
stop("-toa- should be provided when -graph- is not of class 'diffnet'")
} else {
toa <- graph$toa
t0 <- min(graph$meta$pers)
}
if (!length(t0)) t0 <- min(toa, na.rm = TRUE)
cls <- class(graph)
if ("array" %in% cls) {
plot_infectsuscep.array(
graph, toa, t0, normalize, K, r, expdiscount, bins, nlevels, h, logscale, main,
xlab, ylab, sub, color.palette, include.grid, exclude.zeros, valued, ...)
} else if ("list" %in% cls) {
plot_infectsuscep.list(
graph, toa, t0, normalize, K, r, expdiscount, bins, nlevels, h, logscale, main,
xlab, ylab, sub, color.palette, include.grid, exclude.zeros, valued,...)
} else if ("diffnet" %in% cls) {
plot_infectsuscep.list(
graph$graph, graph$toa, t0, normalize, K, r, expdiscount, bins, nlevels, h, logscale, main,
xlab, ylab, sub, color.palette, include.grid, exclude.zeros, valued,...)
} else
stopifnot_graph(graph)
}
# @export
# @rdname plot_infectsuscep
plot_infectsuscep.array <- function(graph, ...) {
graph <- apply(graph, 3, methods::as, Class="dgCMatrix")
plot_infectsuscep.list(graph, ...)
}
# @export
# @rdname plot_infectsuscep
plot_infectsuscep.list <- function(graph, toa, t0, normalize,
K, r, expdiscount,
bins,nlevels,
h,
logscale,
main,
xlab,
ylab,
sub,
color.palette,
include.grid, exclude.zeros, valued,
...) {
# Computing infect and suscept
infect <- infection(graph, toa, t0, normalize, K, r, expdiscount, valued)
suscep <- susceptibility(graph, toa, t0, normalize, K, r, expdiscount, valued)
complete <- complete.cases(infect, suscep)
# Performing classification (linear)
if (logscale) {
infectp<-log(infect)
suscepp<-log(suscep)
# Only keeping complete cases
complete <- complete & is.finite(infectp) & is.finite(suscepp)
if (any(!complete)) warning("When applying logscale some observations are missing.")
}
else {
infectp <- infect
suscepp <- suscep
}
infectp <- infectp[complete,]
suscepp <- suscepp[complete,]
if ((!length(infectp) | !length(suscepp)) & logscale)
stop("Can't apply logscale (undefined values).")
# If excluding zeros
include <- rep(TRUE,length(infectp))
if (exclude.zeros) {
include[!infectp | !suscepp] <- FALSE
}
# Computing infect & suscept
if (length(h) && h==0) {
coords <- grid_distribution(infectp[include], suscepp[include], bins)
} else {
if (!length(h)) h <- c(
MASS::bandwidth.nrd(infectp[include & infectp!=0]),
MASS::bandwidth.nrd(suscepp[include & suscepp!=0])
)
# Cant use smoother
if (any((h==0) | is.na(h)))
stop('Not enought data to perform smooth. Try choosing another value for -h-,',
' or set h=0 (no kernel smooth).')
coords <- MASS::kde2d(infectp[include], suscepp[include], n = bins, h = h)
}
# Nice plot
n <- sum(coords$z)
with(coords, filled.contour(
x,y,
z/n, bty="n", main=main, xlab=xlab, ylab=ylab, sub=sub, color.palette =color.palette,
xlim=range(x), ylim=range(y),
plot.axes={
# Preparing the tickmarks for the axis
xticks <- pretty(x)
yticks <- pretty(y)
if (logscale) {
xlticks <- exp(xticks)
ylticks <- exp(yticks)
} else {
xlticks <- xticks
ylticks <- yticks
}
# Drawing the axis
axis(1, xticks, sprintf("%.2f",xlticks))
axis(2, yticks, sprintf("%.2f",ylticks))
# Putting the grid
if (include.grid) grid()
}, nlevels=nlevels, ...))
# if (include.grid) grid()
# Adding some reference
legend("topleft", legend=
sprintf('\n%d out of %d obs.\nincluded', sum(include), length(complete)),
bty="n")
invisible(list(infect=infect, suscept=suscep, coords=coords,
complete=complete))
}
#' Visualize adopters and cumulative adopters
#' @param obj Either a diffnet object or a cumulative a doption matrix.
#' @param freq Logical scalar. When TRUE frequencies are plotted instead of proportions.
#' @param what Character vector of length 2. What to plot.
#' @param add Logical scalar. When TRUE lines and dots are added to the current graph.
#' @param include.legend Logical scalar. When TRUE a legend of the graph is plotted.
#' @param pch Integer vector of length 2. See \code{\link{matplot}}.
#' @param type Character vector of length 2. See \code{\link{matplot}}.
#' @param ylim Numeric vector of length 2. Sets the plotting limit for the y-axis.
#' @param lty Numeric vector of length 2. See \code{\link{matplot}}.
#' @param col Character vector of length 2. See \code{\link{matplot}}.
#' @param bg Character vector of length 2. See \code{\link{matplot}}.
#' @param xlab Character scalar. Name of the x-axis.
#' @param ylab Character scalar. Name of the y-axis.
#' @param main Character scalar. Title of the plot
#' @param ... Further arguments passed to \code{\link{matplot}}.
#' @param include.grid Logical scalar. When TRUE, the grid of the graph is drawn
#' @family visualizations
#' @examples
#' # Generating a random diffnet -----------------------------------------------
#' set.seed(821)
#' diffnet <- rdiffnet(100, 5, seed.graph="small-world", seed.nodes="central")
#'
#' plot_adopters(diffnet)
#'
#' # Alternatively, we can use a TOA Matrix
#' toa <- sample(c(NA, 2010L,2015L), 20, TRUE)
#' mat <- toa_mat(toa)
#' plot_adopters(mat$cumadopt)
#' @return A matrix as described in \code{\link{cumulative_adopt_count}}.
#' @export
#' @author George G. Vega Yon
plot_adopters <- function(
obj,
freq = FALSE,
what = c("adopt","cumadopt"),
add = FALSE,
include.legend = TRUE,
include.grid = TRUE,
pch = c(21,24),
type = c("b", "b"),
ylim = if (!freq) c(0,1) else NULL,
lty = c(1,1),
col = c("black","black"),
bg = c("tomato","gray"),
xlab = "Time",
ylab = ifelse(freq, "Frequency", "Proportion"),
main = "Adopters and Cumulative Adopters",
...
) {
# Checking what
if (any(!(what %in% c("adopt", "cumadopt"))))
stop("Invalid curve to plot. -what- must be in c(\"adopt\",\"cumadopt\").")
# Computing the TOA mat
if (inherits(obj, "diffnet")) {
cumadopt <- cumulative_adopt_count(obj)
adopt <- colSums(obj$adopt)
n <- obj$meta$n
}
else {
cumadopt <- cumulative_adopt_count(obj)
adopt <- cumadopt["num",] - c(0,cumadopt["num",1:(ncol(cumadopt)-1)])
n <- nrow(obj)
}
out <- cumadopt
# In the case that the user wants pcent (the default)
if (!freq) {
cumadopt <- cumadopt/n
adopt <- adopt/n
}
# Time names...
times <- colnames(cumadopt)
if ((length(ylim) == 1) && is.na(ylim))
ylim <- NULL
# Building matrix to plot
k <- length(what)
n <- length(times)
mat <- matrix(ncol=k, nrow=n)
if ("cumadopt" %in% what) mat[,1] <- cumadopt["num",]
if ("adopt" %in% what) mat[,k] <- adopt
# Fixing parameters
test <- c("cumadopt" %in% what, "adopt" %in% what)
if (length(type) > k) type <- type[test]
if (length(lty) > k) lty <- lty[test]
if (length(col) > k) col <- col[test]
if (length(bg) > k) bg <- bg[test]
if (length(pch) > k) pch <- pch[test]
matplot(times, y=mat, ylim=ylim, add=add, type=type,
lty=lty, col=col, xlab=xlab, ylab=ylab, main=main, pch=pch,
bg=bg,...)
# If not been added
if (!add) {
if (include.legend)
legend("topleft", bty="n", pch=pch,
legend = c("Cumulative adopters", "Adopters")[test], pt.bg = bg, col=col)
if (include.grid)
grid()
}
invisible(out)
}
# x <- cumulative_adopt_count(diffnet)
# z <- x["num",] - c(0,x["num",1:(ncol(x)-1)])
# cumsum(z)
# x["num",]
#' \code{diffnet} Arithmetic and Logical Operators
#'
#' Addition, subtraction, network power of diffnet and logical operators such as
#' \code{&} and \code{|} as objects
#'
#' @param x A \code{diffnet} class object.
#' @param y Integer scalar. Power of the network
#' @param valued Logical scalar. When FALSE all non-zero entries of the adjacency
#' matrices are set to one.
#'
#' @details Using binary operators, ease data management process with diffnet.
#'
#' By default the binary operator \code{^} assumes that the graph is valued,
#' hence the power is computed using a weighted edges. Otherwise, if more control
#' is needed, the user can use \code{graph_power} instead.
#'
#' @return A diffnet class object
#'
#' @examples
#' # Computing two-steps away threshold with the Brazilian farmers data --------
#' data(brfarmersDiffNet)
#'
#' expo1 <- threshold(brfarmersDiffNet)
#' expo2 <- threshold(brfarmersDiffNet^2)
#'
#' # Computing correlation
#' cor(expo1,expo2)
#'
#' # Drawing a qqplot
#' qqplot(expo1, expo2)
#'
#' # Working with inverse ------------------------------------------------------
#' brf2_step <- brfarmersDiffNet^2
#' brf2_step <- 1/brf2_step
#'
#' @export
#' @name diffnet-arithmetic
#' @family diffnet methods
`^.diffnet` <- function(x,y) {
if (y < 2) return(x)
for (i in 1:x$meta$nper) {
g <- x$graph[[i]]
for (p in 1:(y-1))
x$graph[[i]] <- x$graph[[i]] %*% g
}
x
}
#' @rdname diffnet-arithmetic
#' @export
graph_power <- function(x, y, valued=getOption("diffnet.valued", FALSE)) {
# If no valued
if (!valued)
for (i in 1:x$meta$nper)
x$graph[[i]]@x <- rep(1, length(x$graph[[i]]@x))
x^y
}
#' @rdname diffnet-arithmetic
#' @export
`/.diffnet` <- function(y, x) {
if (inherits(x, "diffnet") && (inherits(y, "numeric") | inherits(y, "integer"))) {
for (i in 1:x$meta$nper)
x$graph[[i]]@x <- y/(x$graph[[i]]@x)
return(x)
} else if (inherits(y, "diffnet") && (inherits(x, "numeric") | inherits(x, "integer"))) {
for (i in 1:y$meta$nper)
y$graph[[i]]@x <- x/(y$graph[[i]]@x)
return(y)
} else stop("No method for x:", class(x), " and y:", class(y))
}
#' @rdname diffnet-arithmetic
#' @export
#' @examples
#' # Removing the first 3 vertex of medInnovationsDiffnet ----------------------
#' data(medInnovationsDiffNet)
#'
#' # Using a diffnet object
#' first3Diffnet <- medInnovationsDiffNet[1:3,,]
#' medInnovationsDiffNet - first3Diffnet
#'
#' # Using indexes
#' medInnovationsDiffNet - 1:3
#'
#' # Using ids
#' medInnovationsDiffNet - as.character(1001:1003)
`-.diffnet` <- function(x, y) {
if (inherits(x, "diffnet") & inherits(y, "diffnet")) {
# Listing the id numbers that wont be removed
ids.to.remove <- y$meta$ids
ids.to.remove <- which(x$meta$ids %in% ids.to.remove)
x[-ids.to.remove, , drop=FALSE]
} else if (inherits(x, "diffnet") & any(class(y) %in% c("integer", "numeric"))) {
# Dropping using ids
x[-y,, drop=FALSE]
} else if (inherits(x, "diffnet") & inherits(y, "character")) {
# Checking labels exists
test <- which(!(y %in% x$meta$ids))
if (length(test))
stop("Some elements in -y- (right-hand side of the expression) are not ",
"in the set of ids of the diffnet object:\n\t",
paste0(y[test], collapse=", "),".")
y <- which(x$meta$ids %in% y)
x[-y,,drop=FALSE]
} else
stop("Subtraction between -",class(x),"- and -", class(y), "- not supported.")
}
#' @export
#' @rdname diffnet-arithmetic
`*.diffnet` <- function(x,y) {
if (inherits(x, "diffnet") & inherits(y, "diffnet")) {
# Checking dimensions
test <- all(dim(x) == dim(y))
if (!test)
stop('Both -x- and -y- must have the same dimensions.')
x$graph <- mapply(`*`, x$graph, y$graph)
return(x)
} else if (inherits(x, "diffnet") & is.numeric(y)) {
x$graph <- mapply(`*`, x$graph, y)
return(x)
} else
stop("Multiplication between -",class(x),"- and -", class(y), "- not supported.")
}
#' @export
#' @rdname diffnet-arithmetic
`&.diffnet` <- function(x,y) {
x$graph <- mapply(function(a,b) methods::as(a & b, "dgCMatrix"), x$graph, y$graph)
x
}
#' @export
#' @rdname diffnet-arithmetic
`|.diffnet` <- function(x,y) {
x$graph <- mapply(function(a,b) methods::as(a | b, "dgCMatrix"), x$graph, y$graph)
x
}
#' Matrix multiplication
#'
#' Matrix multiplication methods, including \code{\link{diffnet}}
#' objects. This function creates a generic method for \code{\link[base:matmult]{\%*\%}}
#' allowing for multiplying diffnet objects.
#'
#' @param x Numeric or complex matrices or vectors, or \code{diffnet} objects.
#' @param y Numeric or complex matrices or vectors, or \code{diffnet} objects.
#'
#' @details This function can be usefult to generate alternative graphs, for
#' example, users could compute the n-steps graph by doing \code{net \%*\% net}
#' (see examples).
#'
#' @return In the case of \code{diffnet} objects performs matrix multiplication
#' via \code{\link{mapply}} using \code{x$graph} and \code{y$graph} as arguments,
#' returnling a \code{diffnet}. Otherwise returns the default according to
#' \code{\link[base:matmult]{\%*\%}}.
#'
#' @examples
#' # Finding the Simmelian Ties network ----------------------------------------
#'
#' # Random diffnet graph
#' set.seed(773)
#' net <- rdiffnet(100, 4, seed.graph='small-world', rgraph.args=list(k=8))
#' netsim <- net
#'
#' # According to Dekker (2006), Simmelian ties can be computed as follows
#' netsim <- net * t(net) # Keeping mutal
#' netsim <- netsim * (netsim %*% netsim)
#'
#' # Checking out differences (netsim should have less)
#' nlinks(net)
#' nlinks(netsim)
#'
#' mapply(`-`, nlinks(net), nlinks(netsim))
#'
#' @export
#' @rdname diffnetmatmult
#' @family diffnet methods
`%*%` <- function(x, y) UseMethod("%*%")
#' @export
#' @rdname diffnetmatmult
`%*%.default` <- function(x, y) {
if (inherits(y, "diffnet")) `%*%.diffnet`(x,y)
else base::`%*%`(x=x,y=y)
}
#' @export
#' @rdname diffnetmatmult
`%*%.diffnet` <- function(x, y) {
mat2dgCList <- function(w,z) {
w <- lapply(seq_len(nslices(z)), function(u) methods::as(w, "dgCMatrix"))
names(w) <- dimnames(z)[[3]]
w
}
if (inherits(x, "diffnet") && inherits(y, "diffnet")) {
x$graph <- mapply(base::`%*%`, x$graph, y$graph)
} else if (inherits(x, "diffnet") && !inherits(y, "diffnet")) {
if (identical(rep(dim(x)[1],2), dim(y)))
x$graph <- mapply(base::`%*%`, x$graph, mat2dgCList(y, x))
else stop("-y- must have the same dimension as -x-")
} else if (inherits(y, "diffnet") && !inherits(x, "diffnet")) {
if (identical(rep(dim(y)[1],2), dim(x))) {
y$graph <- mapply(base::`%*%`, mat2dgCList(x, y), y$graph)
return(y)
}
else stop("-y- must have the same dimension as -x-")
}
x
}
#' Coerce a diffnet graph into an array
#'
#' @param x A diffnet object.
#' @param ... Ignored.
#' @details
#' The function takes the list of sparse matrices stored in \code{x} and creates
#' an array with them. Attributes and other elements from the diffnet object are
#' dropped.
#'
#' \code{dimnames} are obtained from the metadata of the diffnet object.
#'
#' @return A three-dimensional array of \eqn{T} matrices of size \eqn{n\times n}{n * n}.
#' @seealso \code{\link{diffnet}}.
#' @family diffnet methods
#' @examples
#' # Creating a random diffnet object
#' set.seed(84117)
#' mydiffnet <- rdiffnet(30, 5)
#'
#' # Coercing it into an array
#' as.array(mydiffnet)
#' @export
as.array.diffnet <- function(x, ...) {
# Coercing into matrices
z <- lapply(x$graph, function(y) {
as.matrix(y)
})
# Creating the array
out <- with(x$meta, array(dim=c(n, n, nper)))
for (i in 1:length(z))
out[,,i] <- z[[i]]
# Naming dimensions
dimnames(out) <- with(x$meta, list(ids, ids, pers))
out
}
#' Count the number of vertices/edges/slices in a graph
#'
#' @template graph_template
#' @return For \code{nvertices} and \code{nslices}, an integer scalar equal to the number
#' of vertices and slices in the graph. Otherwise, from \code{nedges}, either a list
#' of size \eqn{t} with the counts of edges (non-zero elements in the adjacency matrices) at
#' each time period, or, when \code{graph} is static, a single scalar with
#' such number.
#' @details
#' \code{nnodes} and \code{nlinks} are just aliases for \code{nvertices} and
#' \code{nedges} respectively.
#' @export
#' @examples
#' # Creating a dynamic graph (we will use this for all the classes) -----------
#' set.seed(13133)
#' diffnet <- rdiffnet(100, 4)
#'
#' # Lets use the first time period as a static graph
#' graph_mat <- diffnet$graph[[1]]
#' graph_dgCMatrix <- methods::as(graph_mat, "dgCMatrix")
#'
#' # Now lets generate the other dynamic graphs
#' graph_list <- diffnet$graph
#' graph_array <- as.array(diffnet) # using the as.array method for diffnet objects
#'
#' # Now we can compare vertices counts
#' nvertices(diffnet)
#' nvertices(graph_list)
#' nvertices(graph_array)
#'
#' nvertices(graph_mat)
#' nvertices(graph_dgCMatrix)
#'
#' # ... and edges count
#' nedges(diffnet)
#' nedges(graph_list)
#' nedges(graph_array)
#'
#' nedges(graph_mat)
#' nedges(graph_dgCMatrix)
nvertices <- function(graph) {
cls <- class(graph)
if (any(c("array", "matrix", "dgCMatrix") %in% cls)) {
nrow(graph)
} else if ("list" %in% cls) {
nrow(graph[[1]])
} else if ("diffnet" %in% cls) {
graph$meta$n
} else if ("igraph" %in% cls) {
igraph::vcount(graph)
} else if ("network" %in% cls) {
network::network.size(graph)
} else
stopifnot_graph(graph)
}
#' @rdname nvertices
#' @export
nnodes <- nvertices
#' @export
#' @rdname nvertices
nedges <- function(graph) {
cls <- class(graph)
if ("matrix" %in% cls) {
sum(graph != 0)
} else if ("array" %in% cls) {
# Computing and coercing into a list
x <- as.list(apply(graph, 3, function(x) sum(x!=0)))
# Naming
tnames <- names(x)
if (!length(tnames)) names(x) <- 1:length(x)
x
} else if ("dgCMatrix" %in% cls) {
length(graph@i)
} else if ("list" %in% cls) {
# Computing
x <- lapply(graph, function(x) length(x@i))
# Naming
tnames <- names(x)
if (!length(tnames)) names(x) <- 1:length(x)
x
} else if ("diffnet" %in% cls) {
lapply(graph$graph, function(x) sum(x@x != 0))
} else if ("igraph" %in% cls) {
igraph::ecount(graph)
} else if ("network" %in% cls) {
network::network.edgecount(graph)
} else
stopifnot_graph(graph)
}
#' @export
#' @rdname nvertices
nlinks <- nedges
#' @export
#' @rdname nvertices
nslices <- function(graph) {
cls <- class(graph)
if ("matrix" %in% cls) {
1L
} else if ("array" %in% cls) {
dim(graph)[3]
} else if ("dgCMatrix" %in% cls) {
1L
} else if ("diffnet" %in% cls) {
graph$meta$nper
} else if ("list" %in% cls) {
length(graph)
} else
stopifnot_graph(graph)
}
#' @export
#' @rdname diffnet-class
nodes <- function(graph) {
cls <- class(graph)
if ("diffnet" %in% cls)
return(graph$meta$ids)
else if ("list" %in% cls) {
ans <- rownames(graph[[1]])
if (!length(ans)) stop("There are not names to fetch")
else return(ans)
} else if (any(c("matrix", "dgCMatrix", "array") %in% cls)) {
ans <- rownames(graph)
if (!length(ans)) stop("There are not names to fetch")
else return(ans)
}
else stopifnot_graph(graph)
}
#' @export
#' @rdname diffnet-class
#' @param FUN a function to be passed to lapply
diffnetLapply <- function(graph, FUN, ...) {
lapply(seq_len(nslices(graph)), function(x, graph, ...) {
FUN(x,
graph = graph$graph[[x]],
toa = graph$toa,
vertex.static.attrs = graph$vertex.static.attrs,
vertex.dyn.attrs = graph$vertex.dyn.attrs[[x]],
adopt = graph$adopt[,x,drop=FALSE],
cumadopt = graph$cumadopt[,x,drop=FALSE],
meta = graph$meta)
}, graph=graph,...)
}
# debug(diffnetLapply)
# diffnetLapply(medInnovationsDiffNet, function(x, graph, cumadopt, ...) {
# sum(cumadopt)
# })
#' @export
#' @rdname diffnet-class
str.diffnet <- function(object, ...) {
utils::str(unclass(object))
}
#' @export
#' @rdname diffnet-class
dimnames.diffnet <- function(x) {
with(x, list(
meta$ids,
c(colnames(vertex.static.attrs), names(vertex.dyn.attrs[[1]])),
meta$pers)
)
}
#' @export
#' @rdname diffnet-class
#' @method t diffnet
t.diffnet <- function(x) {
x$graph <- lapply(x$graph, getMethod("t", "dgCMatrix"))
x
}
#' @rdname diffnet-class
#' @export
dim.diffnet <- function(x) {
k <- length(with(x, c(colnames(vertex.static.attrs), names(vertex.dyn.attrs[[1]]))))
as.integer(with(x$meta, c(n, k, nper)))
}
| /R/diffnet-methods.r | permissive | LYYLM2019/netdiffuseR | R | false | false | 54,298 | r | #' S3 plotting method for diffnet objects.
#'
#' @param x An object of class \code{\link[=diffnet-class]{diffnet}}
#' @param t Integer scalar indicating the time slice to plot.
#' @param vertex.color Character scalar/vector. Color of the vertices.
#' @template plotting_template
#' @param main Character. A title template to be passed to sprintf.
#' @param ... Further arguments passed to \code{\link[igraph:plot.igraph]{plot.igraph}}.
#' @param y Ignored.
#' @export
#'
#' @family diffnet methods
#'
#' @return A matrix with the coordinates of the vertices.
#' @author George G. Vega Yon
#' @examples
#'
#' data(medInnovationsDiffNet)
#' plot(medInnovationsDiffNet)
#'
#'
plot.diffnet <- function(
x,y=NULL, t=1,
vertex.color = c(adopt="steelblue", noadopt="white"),
vertex.size = "degree",
main = "Diffusion network in time %d",
minmax.relative.size = getOption("diffnet.minmax.relative.size", c(0.01, 0.04)),
...) {
# Listing arguments
igraph.args <- list(...)
# Checking that the time period is actually within
if (!(t %in% 1:x$meta$nper))
stop("-t- must be an integer within 1 and ",x$meta$nper,".")
# Extracting the graph to be plotted
graph <- diffnet_to_igraph(x)[[t]]
# Setting the colors
cols <- with(x, ifelse(cumadopt[,t], vertex.color[1], vertex.color[2]))
set_igraph_plotting_defaults("igraph.args")
if (!length(igraph.args$layout))
igraph.args$layout <- igraph::layout_nicely(graph)
igraph.args$vertex.color <- cols
graphics::plot.new()
graphics::plot.window(
xlim = c(-1,1),
ylim = c(-1,1)
)
igraph.args$vertex.size <-
rescale_vertex_igraph(
compute_vertex_size(x$graph[[t]], vertex.size),
minmax.relative.size = minmax.relative.size
)
do.call(igraph::plot.igraph, c(
list(
x = graph
), igraph.args))
if (length(main))
graphics::title(main = sprintf(main, x$meta$pers[t]))
invisible(igraph.args$layout)
}
#' @export
#' @rdname diffnet-class
print.diffnet <- function(x, ...) {
with(x, {
# Getting attrs
vsa <- paste0(colnames(vertex.static.attrs), collapse=", ")
if (nchar(vsa) > 50) vsa <- paste0(strtrim(vsa, 50),"...")
else if (!nchar(vsa)) vsa <- '-'
nsa <-ncol(vertex.static.attrs)
if (nsa) vsa <- paste0(vsa," (",nsa, ")")
vda <- paste0(colnames(vertex.dyn.attrs[[1]]), collapse=", ")
if (nchar(vda) > 50) vda <- paste0(strtrim(vda, 50),"...")
else if (!nchar(vda)) vda <- '-'
nda <- ncol(vertex.dyn.attrs[[1]])
if (nda) vda <- paste0(vda," (",nda, ")")
# Getting nodes labels
nodesl <- paste0(meta$n," (",
paste(head(meta$ids, 8), collapse=", "),
ifelse(meta$n>8, ", ...", "") ,")")
cat(
"Dynamic network of class -diffnet-",
paste(" Name :", meta$name),
paste(" Behavior :", meta$behavior),
paste(" # of nodes :", nodesl ),
paste(" # of time periods :", meta$nper, sprintf("(%d - %d)", meta$pers[1], meta$pers[meta$nper])),
paste(" Type :", ifelse(meta$undirected, "undirected", "directed")),
paste(" Final prevalence :",
formatC(sum(cumadopt[,meta$nper])/meta$n, digits = 2, format="f")
),
paste(" Static attributes :", vsa),
paste(" Dynamic attributes :", vda),
sep="\n"
)
})
invisible(x)
}
#' Summary of diffnet objects
#'
#' @export
#' @param object An object of class \code{\link[=as_diffnet]{diffnet}}.
#' @param slices Either an integer or character vector. While integer vectors are used as
#' indexes, character vectors are used jointly with the time period labels.
#' @param valued Logical scalar. When \code{TRUE} weights will be considered.
#' Otherwise non-zero values will be replaced by ones.
#' @param no.print Logical scalar. When TRUE suppress screen messages.
#' @param skip.moran Logical scalar. When TRUE Moran's I is not reported (see details).
#' @param ... Further arguments to be passed to \code{\link{approx_geodesic}}.
#' @details
#' Moran's I is calculated over the
#' cumulative adoption matrix using as weighting matrix the inverse of the geodesic
#' distance matrix. All this via \code{\link{moran}}. For each time period \code{t},
#' this is calculated as:
#'
#' \preformatted{
#' m = moran(C[,t], G^(-1))
#' }
#'
#' Where \code{C[,t]} is the t-th column of the cumulative adoption matrix,
#' \code{G^(-1)} is the element-wise inverse of the geodesic matrix at time \code{t},
#' and \code{moran} is \pkg{netdiffuseR}'s moran's I routine. When \code{skip.moran=TRUE}
#' Moran's I is not reported. This can be useful for both: reducing computing
#' time and saving memory as geodesic distance matrix can become large. Since
#' version \code{1.18.0}, geodesic matrices are approximated using \code{approx_geodesic}
#' which, as a difference from \code{\link[sna:geodist]{geodist}} from the
#' \pkg{sna} package, and \code{\link[igraph:distances]{distances}} from the
#' \pkg{igraph} package returns a matrix of class \code{dgCMatrix} (more
#' details in \code{\link{approx_geodesic}}).
#'
#' @return A data frame with the following columns:
#' \item{adopt}{Integer. Number of adopters at each time point.}
#' \item{cum_adopt}{Integer. Number of cumulative adopters at each time point.}
#' \item{cum_adopt_pcent}{Numeric. Proportion of comulative adopters at each time point.}
#' \item{hazard}{Numeric. Hazard rate at each time point.}
#' \item{density}{Numeric. Density of the network at each time point.}
#' \item{moran_obs}{Numeric. Observed Moran's I.}
#' \item{moran_exp}{Numeric. Expected Moran's I.}
#' \item{moran_sd}{Numeric. Standard error of Moran's I under the null.}
#' \item{moran_pval}{Numeric. P-value for the observed Moran's I.}
#' @author George G. Vega Yon
#'
#' @examples
#' data(medInnovationsDiffNet)
#' summary(medInnovationsDiffNet)
#'
#' @family diffnet methods
#'
summary.diffnet <- function(
object,
slices = NULL,
no.print = FALSE,
skip.moran = FALSE,
valued = getOption("diffnet.valued",FALSE),
...) {
# Subsetting
if (!length(slices)) slices <- 1:object$meta$nper
# If no valued
if (!valued)
for (i in 1:object$meta$nper)
object$graph[[i]]@x <- rep(1, length(object$graph[[i]]@x))
# Checking that the time period is actually within
test <- !(slices %in% 1:object$meta$nper)
if (any(test))
stop("-slices- must be an integer range within 1 and ",object$meta$nper,".")
slices <- sort(slices)
# To make notation nicer
meta <- object$meta
# Computing density
d <- unlist(lapply(object$graph[slices], function(x) {
nlinks(x)/nnodes(x)/(nnodes(x)-1)
# nelements <- length(x@x)
# x <-nelements/(meta$n * (meta$n-1))
}))
# Computing moran's I
if (!skip.moran) {
m <- matrix(NA, nrow=length(slices), ncol=4,
dimnames = list(NULL, c("moran_obs", "moran_exp", "moran_sd", "moran_pval")))
for (i in 1:length(slices)) {
# Computing distances
g <- approx_geodesic(object$graph[[slices[i]]], ...)
# Inverting it (only the diagonal may have 0)
g@x <- 1/g@x
m[i,] <- unlist(moran(object$cumadopt[,slices[i]], g))
}
}
# Computing adopters, cumadopt and hazard rate
ad <- colSums(object$adopt[,slices,drop=FALSE])
ca <- t(cumulative_adopt_count(object$cumadopt))[slices,-3, drop=FALSE]
hr <- t(hazard_rate(object$cumadopt, no.plot = TRUE))[slices,,drop=FALSE]
# Left censoring
lc <- sum(object$toa == meta$pers[1], na.rm = TRUE)
rc <- sum(is.na(object$toa), na.rm=TRUE)
out <- data.frame(
adopt = ad,
cum_adopt = ca[,1],
cum_adopt_pcent = ca[,2],
hazard = hr,
density=d
)
if (!skip.moran) {
out <- cbind(out, m)
}
if (no.print) return(out)
# Function to print data.frames differently
header <- c(" Period "," Adopters "," Cum Adopt. (%) ",
" Hazard Rate "," Density ",
if (!skip.moran) c(" Moran's I (sd) ") else NULL
)
slen <- nchar(header)
hline <- paste(sapply(sapply(slen, rep.int, x="-"), paste0, collapse=""),
collapse=" ")
rule <- paste0(rep("-", sum(slen) + length(slen) - 1), collapse="")
# Quick Formatting function
qf <- function(x, digits=2) sprintf(paste0("%.",digits,"f"), x)
cat("Diffusion network summary statistics\n",
"Name : ", meta$name, "\n",
"Behavior : ", meta$behavior, "\n",
rule,"\n",sep="")
cat(header,"\n")
cat(hline, "\n")
for (i in 1:nrow(out)) {
cat(sprintf(
paste0("%",slen,"s", collapse=" "),
qf(meta$pers[slices[i]],0), qf(out[i,1],0),
sprintf("%s (%s)",
qf(out$cum_adopt[i],0),
qf(out$cum_adopt_pcent[i])
),
ifelse(i==1, "-",qf(out$hazard[i])), qf(out$density[i]),
if (!skip.moran) {
if (is.nan(out$moran_sd[i]))
" - "
else
sprintf("%s (%s) %-3s",
qf(out$moran_obs[i]),
qf(out$moran_sd[i]),
ifelse(out$moran_pval[i] <= .01, "***",
ifelse(out$moran_pval[i] <= .05, "**",
ifelse(out$moran_pval[i] <= .10, "*", ""
)))
)
} else ""
), "\n")
}
# print(out, digits=2)
cat(
rule,
paste(" Left censoring :", sprintf("%3.2f (%d)", lc/meta$n, lc)),
paste(" Right centoring :", sprintf("%3.2f (%d)", rc/meta$n, rc)),
paste(" # of nodes :", sprintf("%d",meta$n)),
"\n Moran's I was computed on contemporaneous autocorrelation using 1/geodesic",
" values. Significane levels *** <= .01, ** <= .05, * <= .1.",
sep="\n"
)
invisible(out)
}
#' Plot the diffusion process
#'
#' Creates a colored network plot showing the structure of the graph through time
#' (one network plot for each time period) and the set of adopter and non-adopters
#' in the network.
#'
#' @templateVar dynamic TRUE
#' @template graph_template
#' @param cumadopt \eqn{n\times T}{n*T} matrix.
#' @param slices Integer vector. Indicates what slices to plot. By default all are plotted.
#' @param vertex.color A character vector of size 3 with colors names.
#' @param vertex.shape A character vector of size 3 with shape names.
#' @template plotting_template
#' @param mfrow.par Vector of size 2 with number of rows and columns to be passed to \code{\link{par}.}
#' @param main Character scalar. A title template to be passed to \code{\link{sprintf}.}
#' @param ... Further arguments to be passed to \code{\link[igraph:plot.igraph]{plot.igraph}}.
#' @param legend.args List of arguments to be passed to \code{\link{legend}}.
#' @param background Either a function to be called before plotting each slice, a color
#' to specify the backgroupd color, or \code{NULL} (in which case nothing is done).
#'
#' @details Plotting is done via the function \code{\link[igraph:plot.igraph]{plot.igraph}}.
#'
#' In order to center the attention on the diffusion process itself, the
#' positions of each vertex are computed only once by aggregating the networks
#' through time, this is, instead of computing the layout for each time \eqn{t},
#' the function creates a new graph accumulating links through time.
#'
#' The \code{mfrow.par} sets how to arrange the plots on the device. If \eqn{T=5}
#' and \code{mfrow.par=c(2,3)}, the first three networks will be in the top
#' of the device and the last two in the bottom.
#'
#' The argument \code{vertex.color} contains the colors of non-adopters, new-adopters,
#' and adopters respectively. The new adopters (default color \code{"tomato"}) have a different
#' color that the adopters when the graph is at their time of adoption, hence,
#' when the graph been plotted is in \eqn{t=2} and \eqn{toa=2} the vertex will
#' be plotted in red.
#'
#' \code{legend.args} has the following default parameter:
#' \tabular{ll}{
#' \code{x} \tab \code{"bottom"} \cr
#' \code{legend} \tab \code{c("Non adopters", "New adopters","Adopters")} \cr
#' \code{pch} \tab \code{sapply(vertex.shape, switch, circle = 21, square = 22, 21)} \cr
#' \code{bty} \tab \code{"n"} \cr
#' \code{horiz} \tab \code{TRUE} \cr
#' }
#'
#'
#' @examples
#' # Generating a random graph
#' set.seed(1234)
#' n <- 6
#' nper <- 5
#' graph <- rgraph_er(n,nper, p=.3, undirected = FALSE)
#' toa <- sample(2000:(2000+nper-1), n, TRUE)
#' adopt <- toa_mat(toa)
#'
#' plot_diffnet(graph, adopt$cumadopt)
#' @return Calculated coordinates for the grouped graph (invisible).
#' @family visualizations
#' @keywords hplot
#' @export
#' @author George G. Vega Yon
plot_diffnet <- function(...) UseMethod("plot_diffnet")
#' @export
#' @rdname plot_diffnet
plot_diffnet.diffnet <- function(
graph, ...
) {
args <- list(...)
do.call(
plot_diffnet.default,
c(
list(graph = as_dgCMatrix(graph), cumadopt = graph$cumadopt),
args
)
)
}
#' @rdname plot_diffnet
#' @export
plot_diffnet.default <- function(
graph, cumadopt,
slices = NULL,
vertex.color = c("white", "tomato", "steelblue"),
vertex.shape = c("square", "circle", "circle"),
vertex.size = "degree",
mfrow.par = NULL,
main = c("Network in period %s", "Diffusion Network"),
legend.args = list(),
minmax.relative.size = getOption("diffnet.minmax.relative.size", c(0.01, 0.04)),
background = NULL,
...) {
set_plotting_defaults("background")
# Setting parameters
oldpar <- graphics::par(no.readonly = TRUE)
on.exit(graphics::par(oldpar))
# Setting legend parameters, if specified
if (length(legend.args) | (!length(legend.args) & is.list(legend.args))) {
if (!length(legend.args$x)) legend.args$x <- "bottom"
if (!length(legend.args$legend))
legend.args$legend <-c("Non adopters", "New adopters","Adopters")
if (!length(legend.args$pch)) {
legend.args$pch <- sapply(vertex.shape, switch, circle = 21, square = 22, 21)
}
if (!length(legend.args$bty)) legend.args$bty <- "n"
if (!length(legend.args$horiz)) legend.args$horiz <-TRUE
}
igraph.args <- list(...)
# Coercing into a dgCMatrix list
graph <- as_dgCMatrix(graph)
if (!is.list(graph))
stopifnot_graph(graph)
# Making sure it has names
add_dimnames.list(graph)
colnames(cumadopt) <- names(graph)
# Checking parameters
t <- nslices(graph)
n <- nrow(graph[[1]])
# Checking slices
if (!length(slices)) {
slices <- names(graph)[unique(floor(seq(1, t, length.out = min(t, 4))))]
} else if (is.numeric(slices)) {
slices <- names(graph)[slices]
}
t <- length(slices)
# Figuring out the dimension
if (!length(mfrow.par)) {
if (t<4) mfrow.par <- c(1,t)
else if (t==4) mfrow.par <- c(2,2)
else if (t==5) mfrow.par <- c(2,3)
else if (t==6) mfrow.par <- c(2,3)
else if (t==7) mfrow.par <- c(2,4)
else if (t==8) mfrow.par <- c(2,4)
else if (t==9) mfrow.par <- c(3,4)
else if (t==10) mfrow.par <- c(3,4)
else if (t==11) mfrow.par <- c(3,4)
else if (t==12) mfrow.par <- c(3,4)
else mfrow.par <- c(ceiling(t/4),4)
}
# Computing legend and main width/height
legend_height_i <- 0
if (length(legend.args) && length(legend.args$legend)) {
legend_height_i <- max(sapply(
legend.args$legend,
graphics::strheight,
units="inches",
cex = if (length(legend.args$cex)) legend.args$cex else NULL
))*2.5
}
main_height_i <- graphics::strheight(
main[2],
units = "inches",
cex = if ("cex.main" %in% igraph.args) igraph.args$main.cex else NULL
)*1.5
graphics::par(
mfrow = mfrow.par, mar = rep(.25,4),
omi = c(legend_height_i, 0, main_height_i, 0),
xpd = NA, xaxs = "i", yaxs="i"
)
# Setting igraph defaults
set_igraph_plotting_defaults("igraph.args")
# 3. Plotting ----------------------------------------------------------------
times <- as.integer(names(graph))
# Set types:
# - 1: Non adopter
# - 2: Adopter in s
# - 3: Adopter prior to s
set_type <- function() {
i <- match(s, colnames(cumadopt))
j <- match(s, slices)
# If we are looking at the first of both
if (i==1 & j ==1)
return(ifelse(!cumadopt[,s], 1L, 2L))
# Otherwise, we look at something more complicated
type <- ifelse(!cumadopt[,s] , 1L, NA)
if (j > 1) {
type <- ifelse(!is.na(type), type,
ifelse(cumadopt[,slices[j-1]], 3L, 2L))
} else if (i > 1) {
type <- ifelse(!is.na(type), type,
ifelse(cumadopt[, i-1], 3L, 2L))
}
type
}
for (s in slices) {
# Colors, new adopters are painted differently
# Setting color and shape depending on the type of vertex these are.
type <- set_type()
cols <- vertex.color[type]
shapes <- vertex.shape[type]
# Creating igraph object
ig <- igraph::graph_from_adjacency_matrix(graph[[s]], weighted = TRUE)
# Computing layout
if (!length(igraph.args$layout)) {
igraph.args$layout <- igraph::layout_nicely(ig)
} else if (length(igraph.args$layout) && is.function(igraph.args$layout)) {
igraph.args$layout <- igraph.args$layout(ig)
}
# Computing subtitle height
graphics::plot.new()
graphics::plot.window(xlim=c(-1.15,1.15), ylim=c(-1.15,1.15))
# Should we paint or do something else?
if (is.function(background)) background()
else if (length(background))
graphics::rect(-1.15,-1.15,1.15,1.15, col=background, border=background)
# Plotting
do.call(
igraph::plot.igraph,
c(
list(
ig,
vertex.color = cols,
vertex.size = rescale_vertex_igraph(
compute_vertex_size(graph, vertex.size, match(s, names(graph))),
minmax.relative.size = minmax.relative.size
),
vertex.shape = shapes
),
igraph.args)
)
# Adding a legend (title)
if (length(main))
subtitle(x = sprintf(main[1], names(graph[s])))
}
# Legend
graphics::par(
mfrow = c(1,1), mai = rep(0,4), new = TRUE, xpd=NA,
omi = c(0, 0, main_height_i, 0)
)
# graphics::par(mfrow=c(1,1), new=TRUE, mar=rep(0,4), oma = rep(0,4), xpd=NA)
graphics::plot.new()
graphics::plot.window(c(0,1), c(0,1))
if (length(main) > 1)
title(main = main[2], outer=TRUE)
if (length(legend.args))
do.call(graphics::legend, c(legend.args, list(pt.bg=vertex.color)))
invisible(igraph.args$layout)
}
#' Threshold levels through time
#'
#' Draws a graph where the coordinates are given by time of adoption, x-axis,
#' and threshold level, y-axis.
#'
#' @templateVar dynamic TRUE
#' @templateVar toa TRUE
#' @templateVar undirected TRUE
#' @template graph_template
#' @param expo \eqn{n\times T}{n * T} matrix. Esposure to the innovation obtained from \code{\link{exposure}}
#' @param t0 Integer scalar. Passed to \code{\link{threshold}}.
#' @param include_censored Logical scalar. Passed to \code{\link{threshold}}.
#' @param attrs Passed to \code{\link{exposure}} (via threshold).
#' @param no.contemporary Logical scalar. When TRUE, edges for vertices with the same
#' \code{toa} won't be plotted.
#' @param main Character scalar. Title of the plot.
#' @param xlab Character scalar. x-axis label.
#' @param ylab Character scalar. y-axis label.
#' @param vertex.size Numeric vector of size \eqn{n}. Relative size of the vertices.
#' @param vertex.color Either a vector of size \eqn{n} or a scalar indicating colors of the vertices.
#' @param vertex.label Character vector of size \eqn{n}. Labels of the vertices.
#' @param vertex.label.pos Integer value to be passed to \code{\link{text}} via \code{pos}.
#' @param vertex.label.cex Either a numeric scalar or vector of size \eqn{n}. Passed to \code{text}.
#' @param vertex.label.adj Passed to \code{\link{text}}.
#' @param vertex.label.color Passed to \code{\link{text}}.
#' @param jitter.amount Numeric vector of size 2 (for x and y) passed to \code{\link{jitter}}.
#' @param jitter.factor Numeric vector of size 2 (for x and y) passed to \code{\link{jitter}}.
#' @param vertex.frame.color Either a vector of size \eqn{n} or a scalar indicating colors of vertices' borders.
#' @param vertex.sides Either a vector of size \eqn{n} or a scalar indicating the
#' number of sides of each vertex (see details).
#' @param vertex.rot Either a vector of size \eqn{n} or a scalar indicating the
#' rotation in radians of each vertex (see details).
#' @param edge.width Numeric. Width of the edges.
#' @param edge.color Character. Color of the edges.
#' @param arrow.width Numeric value to be passed to \code{\link{arrows}}.
#' @param arrow.length Numeric value to be passed to \code{\link{arrows}}.
#' @param arrow.color Color.
#' @param include.grid Logical. When TRUE, the grid of the graph is drawn.
#' @param bty See \code{\link{par}}.
#' @param xlim Passed to \code{\link{plot}}.
#' @param ylim Passed to \code{\link{plot}}.
#' @param ... Additional arguments passed to \code{\link{plot}}.
#' @param edge.curved Logical scalar. When curved, generates curved edges.
#' @param background TBD
#' @family visualizations
#' @seealso Use \code{\link{threshold}} to retrieve the corresponding threshold
#' obtained returned by \code{\link{exposure}}.
#' @keywords hplot
#'
#' @details When \code{vertex.label=NULL} the function uses vertices ids as labels.
#' By default \code{vertex.label=""} plots no labels.
#'
#' Vertices are drawn using an internal function for generating polygons.
#' Polygons are inscribed in a circle of radius \code{vertex.size}, and can be
#' rotated using \code{vertex.rot}. The number of sides of each polygon
#' is set via \code{vertex.sides}.
#'
#' @examples
#'
#' # Generating a random graph
#' set.seed(1234)
#' n <- 6
#' nper <- 5
#' graph <- rgraph_er(n,nper, p=.3, undirected = FALSE)
#' toa <- sample(2000:(2000+nper-1), n, TRUE)
#' adopt <- toa_mat(toa)
#'
#' # Computing exposure
#' expos <- exposure(graph, adopt$cumadopt)
#'
#' plot_threshold(graph, expos, toa)
#'
#' # Calculating degree (for sizing the vertices)
#' plot_threshold(graph, expos, toa, vertex.size = "indegree")
#'
#' @export
#' @author George G. Vega Yon
plot_threshold <- function(graph, expo, ...) UseMethod("plot_threshold")
#' @export
#' @rdname plot_threshold
plot_threshold.diffnet <- function(graph, expo, ...) {
# If graph is diffnet, then we should do something different (because the
# first toa may not be the firts one as toa may be stacked to the right.
# see ?as_diffnet)
# graph$toa <- graph$toa - min(graph$meta$pers) + 1L
if (missing(expo))
expo <- exposure(graph)
args <- list(...)
if (!length(args$undirected)) args$undirected <- graph$meta$undirected
if (!length(args$t0)) args$t0 <- graph$meta$pers[1]
if (length(args$toa)) {
warning("While -graph- has its own toa variable, the user is providing one.")
} else {
args$toa <- graph$toa
}
do.call(plot_threshold.default, c(list(graph = graph$graph, expo=expo), args))
}
#' @export
#' @rdname plot_threshold
plot_threshold.array <- function(graph, expo, ...) {
plot_threshold.default(as_dgCMatrix(graph), expo = expo, ...)
}
#' @export
#' @rdname plot_threshold
plot_threshold.default <- function(
graph,
expo,
toa,
include_censored = FALSE,
t0 = min(toa, na.rm = TRUE),
attrs = NULL,
undirected = getOption("diffnet.undirected"),
no.contemporary = TRUE,
main = "Time of Adoption by\nNetwork Threshold",
xlab = "Time",
ylab = "Threshold",
vertex.size = "degree",
vertex.color = NULL,
vertex.label = "",
vertex.label.pos = NULL,
vertex.label.cex = 1,
vertex.label.adj = c(.5,.5),
vertex.label.color = NULL,
vertex.sides = 40L,
vertex.rot = 0,
edge.width = 2,
edge.color = NULL,
arrow.width = NULL,
arrow.length = NULL,
arrow.color = NULL,
include.grid = FALSE,
vertex.frame.color = NULL,
bty = "n",
jitter.factor = c(1,1),
jitter.amount = c(.25,.025),
xlim = NULL,
ylim = NULL,
edge.curved = NULL,
background = NULL,
...
) {
# Setting default parameters
set_plotting_defaults(c("edge.color", "vertex.frame.color", "vertex.label.color", "edge.curved", "vertex.color", "background", "arrow.color"))
# # Checking out defaults
# if (!length(edge.color)) edge.color <- igraph_plotting_defaults$edge.color
# if (!length(edge.color)) edge.color <- igraph_plotting_defaults$vertex.frame.color
# Checking if exposure was provided
if (missing(expo))
stop("expo should be provided")
# Checking the type of graph
graph <- as_dgCMatrix(graph)
# Step 0: Getting basic info
t <- length(graph)
n <- nrow(graph[[1]])
# Step 1: Creating the cumulative graph
# Matrix::sparseMatrix(i={}, j={}, dims=c(n, n))
cumgraph <- methods::new("dgCMatrix", Dim=c(n,n), p=rep(0L, n+1L))
for(i in 1:t) {
cumgraph <- cumgraph + graph[[i]]
}
# Creating the pos vector
y0 <- threshold(expo, toa, t0, attrs=attrs, include_censored=include_censored)
y <- jitter(y0, factor=jitter.factor[2], amount = jitter.amount[2])
# Jitter to the xaxis and limits
jit <- jitter(toa, factor=jitter.factor[1], amount = jitter.amount[1])
xran <- range(toa, na.rm = TRUE)
if (!length(xlim)) xlim <- xran + c(-1,1)
yran <- c(0,1)
if (!length(ylim)) ylim <- yran + (yran[2] - yran[1])*.1*c(-1,1)
# Step 2: Checking colors and sizes
# Computing sizes
vertex.size <- compute_vertex_size(graph, vertex.size)
# Checking sides
test <- length(vertex.sides)
if (!inherits(vertex.sides, c("integer", "numeric"))) {
stop("-vertex.sides- must be integer.")
} else if (inherits(vertex.sides, "numeric")) {
warning("-vertex.sides- will be coerced to integer.")
vertex.sides <- as.integer(vertex.sides)
}
if (test == 1) {
vertex.sides <- rep(vertex.sides, n)
} else if (test != n) {
stop("-vertex.sides- must be of the same length as nnodes(graph).")
}
# Checking Rotation
test <- length(vertex.rot)
if (!inherits(vertex.rot, "integer") & !inherits(vertex.rot, "numeric")) {
stop("-vertex.rot- must be numeric.")
} else if (test == 1) {
vertex.rot <- rep(vertex.rot, n)
} else if (test != n) {
stop("-vertex.rot- must be of the same length as nnodes(graph).")
}
# Plotting
# oldpar <- par(no.readonly = TRUE)
graphics::plot(NULL, xlim=xlim, ylim=ylim, xlab=xlab, ylab=ylab, main=main,
xaxs="i", yaxs="i",...)
# Should we paint or do something else?
if (is.function(background)) background()
else if (length(background))
graphics::rect(xlim[1], ylim[1], xlim[2], ylim[2], col=background, border=background)
# Checking
if (!length(arrow.width))
arrow.width <- with(graphics::par(), (usr[2] - usr[1])/75)
if (!length(arrow.length))
arrow.length <- with(graphics::par(), (usr[2] - usr[1])/75)
# Should there be a grid??
if (include.grid)
grid()
# Now, for y (it should be different)
xran <- range(xlim, na.rm = TRUE)
yran <- range(ylim, na.rm = TRUE)
# Drawing arrows, first we calculate the coordinates of the edges, for this we
# use the function edges_coords. This considers aspect ratio of the plot.
vertex.size <- igraph_vertex_rescale(vertex.size, adjust=1)
edges <- edges_coords(cumgraph, toa, jit, y, vertex.size, undirected, no.contemporary,
dev=par("pin"), ran=c(xlim[2]-xlim[1], ylim[2]-ylim[1]))
edges <- as.data.frame(edges)
ran <- c(xlim[2]-xlim[1], ylim[2]-ylim[1])
# Plotting the edges
mapply(function(x0, y0, x1, y1, col, edge.curved, arrow.color) {
y <- edges_arrow(x0, y0, x1, y1, width=arrow.width, height=arrow.length,
beta=pi*(2/3), dev=par("pin"), ran=ran, curved = edge.curved)
# Drawing arrow
if (edge.curved) {
# Edge
graphics::xspline(
y$edge[,1],y$edge[,2],
shape = c(0, 1, 0),
open=TRUE, border = col, lwd=edge.width)
# Arrow
graphics::polygon(y$arrow[,1], y$arrow[,2], col = arrow.color, border = arrow.color)
} else {
# Edge
graphics::polygon(y$edge[,1],y$edge[,2], col = col, border = col, lwd=edge.width)
# Arrow
graphics::polygon(y$arrow[,1], y$arrow[,2], col = arrow.color, border = arrow.color)
}
}, x0 = edges[,"x0"], y0 = edges[,"y0"], x1 = edges[,"x1"], y1 = edges[,"y1"],
col = edge.color, edge.curved = edge.curved, arrow.color=arrow.color)
# Drawing the vertices and its labels
# Computing the coordinates
pol <- vertices_coords(jit, y, vertex.size, vertex.sides, vertex.rot, par("pin"), ran)
# Plotting
mapply(function(coords,border,col)
graphics::polygon(coords[,1], coords[,2], border = border, col=col),
coords = pol, border = vertex.frame.color, col=vertex.color)
# Positioning labels can be harsh, so we try with this algorithm
if (!length(vertex.label)) vertex.label <- 1:n
graphics::text(x=jit, y=y, labels = vertex.label,
pos = vertex.label.pos,
cex = vertex.label.cex,
col = vertex.label.color,
adj = vertex.label.adj
)
# par(oldpar)
invisible(data.frame(toa=toa,threshold=y0, jit=jit))
}
#' Plot distribution of infect/suscep
#'
#' After calculating infectiousness and susceptibility of each individual on the
#' network, it creates an \code{nlevels} by \code{nlevels} matrix indicating the
#' number of individuals that lie within each cell, and draws a heatmap.
#'
#' @templateVar dynamic TRUE
#' @templateVar toa TRUE
#' @template graph_template
#' @param t0 Integer scalar. See \code{\link{toa_mat}}.
#' @param normalize Logical scalar. Passed to infection/susceptibility.
#' @param K Integer scalar. Passed to infection/susceptibility.
#' @param r Numeric scalar. Passed to infection/susceptibility.
#' @param expdiscount Logical scalar. Passed to infection/susceptibility.
#' @param bins Integer scalar. Size of the grid (\eqn{n}).
#' @param nlevels Integer scalar. Number of levels to plot (see \code{\link{filled.contour}}).
#' @param h Numeric vector of length 2. Passed to \code{\link[MASS:kde2d]{kde2d}} in the \pkg{MASS} package.
#' @param logscale Logical scalar. When TRUE the axis of the plot will be presented in log-scale.
#' @param main Character scalar. Title of the graph.
#' @param xlab Character scalar. Title of the x-axis.
#' @param ylab Character scalar. Title of the y-axis.
#' @param sub Character scalar. Subtitle of the graph.
#' @param color.palette a color palette function to be used to assign colors in the plot (see \code{\link{filled.contour}}).
#' @param include.grid Logical scalar. When TRUE, the grid of the graph is drawn.
#' @param ... Additional parameters to be passed to \code{\link{filled.contour}.}
#' @param exclude.zeros Logical scalar. When TRUE, observations with zero values
#' @param valued Logical scalar. When FALSE non-zero values in the adjmat are set to one.
#' in infect or suscept are excluded from the graph. This is done explicitly when \code{logscale=TRUE}.
#' @details
#'
#' This plotting function was inspired by Aral, S., & Walker, D. (2012).
#'
#' By default the function will try to apply a kernel smooth function via
#' \code{kde2d}. If not possible (because not enought data points), then
#' the user should try changing the parameter \code{h} or set it equal to zero.
#'
#' \code{toa} is passed to \code{infection/susceptibility}.
#'
#' @return A list with three elements:
#' \item{infect}{A numeric vector of size \eqn{n} with infectiousness levels}
#' \item{suscep}{A numeric vector of size \eqn{n} with susceptibility levels}
#' \item{coords}{A list containing the class marks and counts used to draw the
#' plot via \code{\link{filled.contour}} (see \code{\link{grid_distribution}})}
#' \item{complete}{A logical vector with \code{TRUE} when the case was included in
#' the plot. (this is relevant whenever \code{logscale=TRUE})}
#' @family visualizations
#' @seealso Infectiousness and susceptibility are computed via \code{\link{infection}} and
#' \code{\link{susceptibility}}.
#' @keywords hplot
#' @references
#' Aral, S., & Walker, D. (2012). "Identifying Influential and Susceptible Members
#' of Social Networks". Science, 337(6092), 337โ341.
#' \url{http://doi.org/10.1126/science.1215842}
#' @export
#' @examples
#' # Generating a random graph -------------------------------------------------
#' set.seed(1234)
#' n <- 100
#' nper <- 20
#' graph <- rgraph_er(n,nper, p=.2, undirected = FALSE)
#' toa <- sample(1:(1+nper-1), n, TRUE)
#'
#' # Visualizing distribution of suscep/infect
#' out <- plot_infectsuscep(graph, toa, K=3, logscale = FALSE)
#' @author George G. Vega Yon
plot_infectsuscep <- function(
graph,
toa,
t0 = NULL,
normalize = TRUE,
K = 1L,
r = 0.5,
expdiscount = FALSE,
bins = 20,
nlevels = round(bins/2),
h = NULL,
logscale = TRUE,
main = "Distribution of Infectiousness and\nSusceptibility",
xlab = "Infectiousness of ego",
ylab = "Susceptibility of ego",
sub = ifelse(logscale, "(in log-scale)", NA),
color.palette = function(n) viridisLite::viridis(n),
include.grid = TRUE,
exclude.zeros = FALSE,
valued = getOption("diffnet.valued",FALSE),
...
) {
# Checking the times argument
if (missing(toa))
if (!inherits(graph, "diffnet")) {
stop("-toa- should be provided when -graph- is not of class 'diffnet'")
} else {
toa <- graph$toa
t0 <- min(graph$meta$pers)
}
if (!length(t0)) t0 <- min(toa, na.rm = TRUE)
cls <- class(graph)
if ("array" %in% cls) {
plot_infectsuscep.array(
graph, toa, t0, normalize, K, r, expdiscount, bins, nlevels, h, logscale, main,
xlab, ylab, sub, color.palette, include.grid, exclude.zeros, valued, ...)
} else if ("list" %in% cls) {
plot_infectsuscep.list(
graph, toa, t0, normalize, K, r, expdiscount, bins, nlevels, h, logscale, main,
xlab, ylab, sub, color.palette, include.grid, exclude.zeros, valued,...)
} else if ("diffnet" %in% cls) {
plot_infectsuscep.list(
graph$graph, graph$toa, t0, normalize, K, r, expdiscount, bins, nlevels, h, logscale, main,
xlab, ylab, sub, color.palette, include.grid, exclude.zeros, valued,...)
} else
stopifnot_graph(graph)
}
# @export
# @rdname plot_infectsuscep
plot_infectsuscep.array <- function(graph, ...) {
graph <- apply(graph, 3, methods::as, Class="dgCMatrix")
plot_infectsuscep.list(graph, ...)
}
# @export
# @rdname plot_infectsuscep
plot_infectsuscep.list <- function(graph, toa, t0, normalize,
K, r, expdiscount,
bins,nlevels,
h,
logscale,
main,
xlab,
ylab,
sub,
color.palette,
include.grid, exclude.zeros, valued,
...) {
# Computing infect and suscept
infect <- infection(graph, toa, t0, normalize, K, r, expdiscount, valued)
suscep <- susceptibility(graph, toa, t0, normalize, K, r, expdiscount, valued)
complete <- complete.cases(infect, suscep)
# Performing classification (linear)
if (logscale) {
infectp<-log(infect)
suscepp<-log(suscep)
# Only keeping complete cases
complete <- complete & is.finite(infectp) & is.finite(suscepp)
if (any(!complete)) warning("When applying logscale some observations are missing.")
}
else {
infectp <- infect
suscepp <- suscep
}
infectp <- infectp[complete,]
suscepp <- suscepp[complete,]
if ((!length(infectp) | !length(suscepp)) & logscale)
stop("Can't apply logscale (undefined values).")
# If excluding zeros
include <- rep(TRUE,length(infectp))
if (exclude.zeros) {
include[!infectp | !suscepp] <- FALSE
}
# Computing infect & suscept
if (length(h) && h==0) {
coords <- grid_distribution(infectp[include], suscepp[include], bins)
} else {
if (!length(h)) h <- c(
MASS::bandwidth.nrd(infectp[include & infectp!=0]),
MASS::bandwidth.nrd(suscepp[include & suscepp!=0])
)
# Cant use smoother
if (any((h==0) | is.na(h)))
stop('Not enought data to perform smooth. Try choosing another value for -h-,',
' or set h=0 (no kernel smooth).')
coords <- MASS::kde2d(infectp[include], suscepp[include], n = bins, h = h)
}
# Nice plot
n <- sum(coords$z)
with(coords, filled.contour(
x,y,
z/n, bty="n", main=main, xlab=xlab, ylab=ylab, sub=sub, color.palette =color.palette,
xlim=range(x), ylim=range(y),
plot.axes={
# Preparing the tickmarks for the axis
xticks <- pretty(x)
yticks <- pretty(y)
if (logscale) {
xlticks <- exp(xticks)
ylticks <- exp(yticks)
} else {
xlticks <- xticks
ylticks <- yticks
}
# Drawing the axis
axis(1, xticks, sprintf("%.2f",xlticks))
axis(2, yticks, sprintf("%.2f",ylticks))
# Putting the grid
if (include.grid) grid()
}, nlevels=nlevels, ...))
# if (include.grid) grid()
# Adding some reference
legend("topleft", legend=
sprintf('\n%d out of %d obs.\nincluded', sum(include), length(complete)),
bty="n")
invisible(list(infect=infect, suscept=suscep, coords=coords,
complete=complete))
}
#' Visualize adopters and cumulative adopters
#' @param obj Either a diffnet object or a cumulative a doption matrix.
#' @param freq Logical scalar. When TRUE frequencies are plotted instead of proportions.
#' @param what Character vector of length 2. What to plot.
#' @param add Logical scalar. When TRUE lines and dots are added to the current graph.
#' @param include.legend Logical scalar. When TRUE a legend of the graph is plotted.
#' @param pch Integer vector of length 2. See \code{\link{matplot}}.
#' @param type Character vector of length 2. See \code{\link{matplot}}.
#' @param ylim Numeric vector of length 2. Sets the plotting limit for the y-axis.
#' @param lty Numeric vector of length 2. See \code{\link{matplot}}.
#' @param col Character vector of length 2. See \code{\link{matplot}}.
#' @param bg Character vector of length 2. See \code{\link{matplot}}.
#' @param xlab Character scalar. Name of the x-axis.
#' @param ylab Character scalar. Name of the y-axis.
#' @param main Character scalar. Title of the plot
#' @param ... Further arguments passed to \code{\link{matplot}}.
#' @param include.grid Logical scalar. When TRUE, the grid of the graph is drawn
#' @family visualizations
#' @examples
#' # Generating a random diffnet -----------------------------------------------
#' set.seed(821)
#' diffnet <- rdiffnet(100, 5, seed.graph="small-world", seed.nodes="central")
#'
#' plot_adopters(diffnet)
#'
#' # Alternatively, we can use a TOA Matrix
#' toa <- sample(c(NA, 2010L,2015L), 20, TRUE)
#' mat <- toa_mat(toa)
#' plot_adopters(mat$cumadopt)
#' @return A matrix as described in \code{\link{cumulative_adopt_count}}.
#' @export
#' @author George G. Vega Yon
plot_adopters <- function(
obj,
freq = FALSE,
what = c("adopt","cumadopt"),
add = FALSE,
include.legend = TRUE,
include.grid = TRUE,
pch = c(21,24),
type = c("b", "b"),
ylim = if (!freq) c(0,1) else NULL,
lty = c(1,1),
col = c("black","black"),
bg = c("tomato","gray"),
xlab = "Time",
ylab = ifelse(freq, "Frequency", "Proportion"),
main = "Adopters and Cumulative Adopters",
...
) {
# Checking what
if (any(!(what %in% c("adopt", "cumadopt"))))
stop("Invalid curve to plot. -what- must be in c(\"adopt\",\"cumadopt\").")
# Computing the TOA mat
if (inherits(obj, "diffnet")) {
cumadopt <- cumulative_adopt_count(obj)
adopt <- colSums(obj$adopt)
n <- obj$meta$n
}
else {
cumadopt <- cumulative_adopt_count(obj)
adopt <- cumadopt["num",] - c(0,cumadopt["num",1:(ncol(cumadopt)-1)])
n <- nrow(obj)
}
out <- cumadopt
# In the case that the user wants pcent (the default)
if (!freq) {
cumadopt <- cumadopt/n
adopt <- adopt/n
}
# Time names...
times <- colnames(cumadopt)
if ((length(ylim) == 1) && is.na(ylim))
ylim <- NULL
# Building matrix to plot
k <- length(what)
n <- length(times)
mat <- matrix(ncol=k, nrow=n)
if ("cumadopt" %in% what) mat[,1] <- cumadopt["num",]
if ("adopt" %in% what) mat[,k] <- adopt
# Fixing parameters
test <- c("cumadopt" %in% what, "adopt" %in% what)
if (length(type) > k) type <- type[test]
if (length(lty) > k) lty <- lty[test]
if (length(col) > k) col <- col[test]
if (length(bg) > k) bg <- bg[test]
if (length(pch) > k) pch <- pch[test]
matplot(times, y=mat, ylim=ylim, add=add, type=type,
lty=lty, col=col, xlab=xlab, ylab=ylab, main=main, pch=pch,
bg=bg,...)
# If not been added
if (!add) {
if (include.legend)
legend("topleft", bty="n", pch=pch,
legend = c("Cumulative adopters", "Adopters")[test], pt.bg = bg, col=col)
if (include.grid)
grid()
}
invisible(out)
}
# x <- cumulative_adopt_count(diffnet)
# z <- x["num",] - c(0,x["num",1:(ncol(x)-1)])
# cumsum(z)
# x["num",]
#' \code{diffnet} Arithmetic and Logical Operators
#'
#' Addition, subtraction, network power of diffnet and logical operators such as
#' \code{&} and \code{|} as objects
#'
#' @param x A \code{diffnet} class object.
#' @param y Integer scalar. Power of the network
#' @param valued Logical scalar. When FALSE all non-zero entries of the adjacency
#' matrices are set to one.
#'
#' @details Using binary operators, ease data management process with diffnet.
#'
#' By default the binary operator \code{^} assumes that the graph is valued,
#' hence the power is computed using a weighted edges. Otherwise, if more control
#' is needed, the user can use \code{graph_power} instead.
#'
#' @return A diffnet class object
#'
#' @examples
#' # Computing two-steps away threshold with the Brazilian farmers data --------
#' data(brfarmersDiffNet)
#'
#' expo1 <- threshold(brfarmersDiffNet)
#' expo2 <- threshold(brfarmersDiffNet^2)
#'
#' # Computing correlation
#' cor(expo1,expo2)
#'
#' # Drawing a qqplot
#' qqplot(expo1, expo2)
#'
#' # Working with inverse ------------------------------------------------------
#' brf2_step <- brfarmersDiffNet^2
#' brf2_step <- 1/brf2_step
#'
#' @export
#' @name diffnet-arithmetic
#' @family diffnet methods
`^.diffnet` <- function(x,y) {
if (y < 2) return(x)
for (i in 1:x$meta$nper) {
g <- x$graph[[i]]
for (p in 1:(y-1))
x$graph[[i]] <- x$graph[[i]] %*% g
}
x
}
#' @rdname diffnet-arithmetic
#' @export
graph_power <- function(x, y, valued=getOption("diffnet.valued", FALSE)) {
# If no valued
if (!valued)
for (i in 1:x$meta$nper)
x$graph[[i]]@x <- rep(1, length(x$graph[[i]]@x))
x^y
}
#' @rdname diffnet-arithmetic
#' @export
`/.diffnet` <- function(y, x) {
if (inherits(x, "diffnet") && (inherits(y, "numeric") | inherits(y, "integer"))) {
for (i in 1:x$meta$nper)
x$graph[[i]]@x <- y/(x$graph[[i]]@x)
return(x)
} else if (inherits(y, "diffnet") && (inherits(x, "numeric") | inherits(x, "integer"))) {
for (i in 1:y$meta$nper)
y$graph[[i]]@x <- x/(y$graph[[i]]@x)
return(y)
} else stop("No method for x:", class(x), " and y:", class(y))
}
#' @rdname diffnet-arithmetic
#' @export
#' @examples
#' # Removing the first 3 vertex of medInnovationsDiffnet ----------------------
#' data(medInnovationsDiffNet)
#'
#' # Using a diffnet object
#' first3Diffnet <- medInnovationsDiffNet[1:3,,]
#' medInnovationsDiffNet - first3Diffnet
#'
#' # Using indexes
#' medInnovationsDiffNet - 1:3
#'
#' # Using ids
#' medInnovationsDiffNet - as.character(1001:1003)
`-.diffnet` <- function(x, y) {
if (inherits(x, "diffnet") & inherits(y, "diffnet")) {
# Listing the id numbers that wont be removed
ids.to.remove <- y$meta$ids
ids.to.remove <- which(x$meta$ids %in% ids.to.remove)
x[-ids.to.remove, , drop=FALSE]
} else if (inherits(x, "diffnet") & any(class(y) %in% c("integer", "numeric"))) {
# Dropping using ids
x[-y,, drop=FALSE]
} else if (inherits(x, "diffnet") & inherits(y, "character")) {
# Checking labels exists
test <- which(!(y %in% x$meta$ids))
if (length(test))
stop("Some elements in -y- (right-hand side of the expression) are not ",
"in the set of ids of the diffnet object:\n\t",
paste0(y[test], collapse=", "),".")
y <- which(x$meta$ids %in% y)
x[-y,,drop=FALSE]
} else
stop("Subtraction between -",class(x),"- and -", class(y), "- not supported.")
}
#' @export
#' @rdname diffnet-arithmetic
`*.diffnet` <- function(x,y) {
if (inherits(x, "diffnet") & inherits(y, "diffnet")) {
# Checking dimensions
test <- all(dim(x) == dim(y))
if (!test)
stop('Both -x- and -y- must have the same dimensions.')
x$graph <- mapply(`*`, x$graph, y$graph)
return(x)
} else if (inherits(x, "diffnet") & is.numeric(y)) {
x$graph <- mapply(`*`, x$graph, y)
return(x)
} else
stop("Multiplication between -",class(x),"- and -", class(y), "- not supported.")
}
#' @export
#' @rdname diffnet-arithmetic
`&.diffnet` <- function(x,y) {
x$graph <- mapply(function(a,b) methods::as(a & b, "dgCMatrix"), x$graph, y$graph)
x
}
#' @export
#' @rdname diffnet-arithmetic
`|.diffnet` <- function(x,y) {
x$graph <- mapply(function(a,b) methods::as(a | b, "dgCMatrix"), x$graph, y$graph)
x
}
#' Matrix multiplication
#'
#' Matrix multiplication methods, including \code{\link{diffnet}}
#' objects. This function creates a generic method for \code{\link[base:matmult]{\%*\%}}
#' allowing for multiplying diffnet objects.
#'
#' @param x Numeric or complex matrices or vectors, or \code{diffnet} objects.
#' @param y Numeric or complex matrices or vectors, or \code{diffnet} objects.
#'
#' @details This function can be usefult to generate alternative graphs, for
#' example, users could compute the n-steps graph by doing \code{net \%*\% net}
#' (see examples).
#'
#' @return In the case of \code{diffnet} objects performs matrix multiplication
#' via \code{\link{mapply}} using \code{x$graph} and \code{y$graph} as arguments,
#' returnling a \code{diffnet}. Otherwise returns the default according to
#' \code{\link[base:matmult]{\%*\%}}.
#'
#' @examples
#' # Finding the Simmelian Ties network ----------------------------------------
#'
#' # Random diffnet graph
#' set.seed(773)
#' net <- rdiffnet(100, 4, seed.graph='small-world', rgraph.args=list(k=8))
#' netsim <- net
#'
#' # According to Dekker (2006), Simmelian ties can be computed as follows
#' netsim <- net * t(net) # Keeping mutal
#' netsim <- netsim * (netsim %*% netsim)
#'
#' # Checking out differences (netsim should have less)
#' nlinks(net)
#' nlinks(netsim)
#'
#' mapply(`-`, nlinks(net), nlinks(netsim))
#'
#' @export
#' @rdname diffnetmatmult
#' @family diffnet methods
`%*%` <- function(x, y) UseMethod("%*%")
#' @export
#' @rdname diffnetmatmult
`%*%.default` <- function(x, y) {
if (inherits(y, "diffnet")) `%*%.diffnet`(x,y)
else base::`%*%`(x=x,y=y)
}
#' @export
#' @rdname diffnetmatmult
`%*%.diffnet` <- function(x, y) {
mat2dgCList <- function(w,z) {
w <- lapply(seq_len(nslices(z)), function(u) methods::as(w, "dgCMatrix"))
names(w) <- dimnames(z)[[3]]
w
}
if (inherits(x, "diffnet") && inherits(y, "diffnet")) {
x$graph <- mapply(base::`%*%`, x$graph, y$graph)
} else if (inherits(x, "diffnet") && !inherits(y, "diffnet")) {
if (identical(rep(dim(x)[1],2), dim(y)))
x$graph <- mapply(base::`%*%`, x$graph, mat2dgCList(y, x))
else stop("-y- must have the same dimension as -x-")
} else if (inherits(y, "diffnet") && !inherits(x, "diffnet")) {
if (identical(rep(dim(y)[1],2), dim(x))) {
y$graph <- mapply(base::`%*%`, mat2dgCList(x, y), y$graph)
return(y)
}
else stop("-y- must have the same dimension as -x-")
}
x
}
#' Coerce a diffnet graph into an array
#'
#' @param x A diffnet object.
#' @param ... Ignored.
#' @details
#' The function takes the list of sparse matrices stored in \code{x} and creates
#' an array with them. Attributes and other elements from the diffnet object are
#' dropped.
#'
#' \code{dimnames} are obtained from the metadata of the diffnet object.
#'
#' @return A three-dimensional array of \eqn{T} matrices of size \eqn{n\times n}{n * n}.
#' @seealso \code{\link{diffnet}}.
#' @family diffnet methods
#' @examples
#' # Creating a random diffnet object
#' set.seed(84117)
#' mydiffnet <- rdiffnet(30, 5)
#'
#' # Coercing it into an array
#' as.array(mydiffnet)
#' @export
as.array.diffnet <- function(x, ...) {
# Coercing into matrices
z <- lapply(x$graph, function(y) {
as.matrix(y)
})
# Creating the array
out <- with(x$meta, array(dim=c(n, n, nper)))
for (i in 1:length(z))
out[,,i] <- z[[i]]
# Naming dimensions
dimnames(out) <- with(x$meta, list(ids, ids, pers))
out
}
#' Count the number of vertices/edges/slices in a graph
#'
#' @template graph_template
#' @return For \code{nvertices} and \code{nslices}, an integer scalar equal to the number
#' of vertices and slices in the graph. Otherwise, from \code{nedges}, either a list
#' of size \eqn{t} with the counts of edges (non-zero elements in the adjacency matrices) at
#' each time period, or, when \code{graph} is static, a single scalar with
#' such number.
#' @details
#' \code{nnodes} and \code{nlinks} are just aliases for \code{nvertices} and
#' \code{nedges} respectively.
#' @export
#' @examples
#' # Creating a dynamic graph (we will use this for all the classes) -----------
#' set.seed(13133)
#' diffnet <- rdiffnet(100, 4)
#'
#' # Lets use the first time period as a static graph
#' graph_mat <- diffnet$graph[[1]]
#' graph_dgCMatrix <- methods::as(graph_mat, "dgCMatrix")
#'
#' # Now lets generate the other dynamic graphs
#' graph_list <- diffnet$graph
#' graph_array <- as.array(diffnet) # using the as.array method for diffnet objects
#'
#' # Now we can compare vertices counts
#' nvertices(diffnet)
#' nvertices(graph_list)
#' nvertices(graph_array)
#'
#' nvertices(graph_mat)
#' nvertices(graph_dgCMatrix)
#'
#' # ... and edges count
#' nedges(diffnet)
#' nedges(graph_list)
#' nedges(graph_array)
#'
#' nedges(graph_mat)
#' nedges(graph_dgCMatrix)
nvertices <- function(graph) {
cls <- class(graph)
if (any(c("array", "matrix", "dgCMatrix") %in% cls)) {
nrow(graph)
} else if ("list" %in% cls) {
nrow(graph[[1]])
} else if ("diffnet" %in% cls) {
graph$meta$n
} else if ("igraph" %in% cls) {
igraph::vcount(graph)
} else if ("network" %in% cls) {
network::network.size(graph)
} else
stopifnot_graph(graph)
}
#' @rdname nvertices
#' @export
nnodes <- nvertices
#' @export
#' @rdname nvertices
nedges <- function(graph) {
cls <- class(graph)
if ("matrix" %in% cls) {
sum(graph != 0)
} else if ("array" %in% cls) {
# Computing and coercing into a list
x <- as.list(apply(graph, 3, function(x) sum(x!=0)))
# Naming
tnames <- names(x)
if (!length(tnames)) names(x) <- 1:length(x)
x
} else if ("dgCMatrix" %in% cls) {
length(graph@i)
} else if ("list" %in% cls) {
# Computing
x <- lapply(graph, function(x) length(x@i))
# Naming
tnames <- names(x)
if (!length(tnames)) names(x) <- 1:length(x)
x
} else if ("diffnet" %in% cls) {
lapply(graph$graph, function(x) sum(x@x != 0))
} else if ("igraph" %in% cls) {
igraph::ecount(graph)
} else if ("network" %in% cls) {
network::network.edgecount(graph)
} else
stopifnot_graph(graph)
}
#' @export
#' @rdname nvertices
nlinks <- nedges
#' @export
#' @rdname nvertices
nslices <- function(graph) {
cls <- class(graph)
if ("matrix" %in% cls) {
1L
} else if ("array" %in% cls) {
dim(graph)[3]
} else if ("dgCMatrix" %in% cls) {
1L
} else if ("diffnet" %in% cls) {
graph$meta$nper
} else if ("list" %in% cls) {
length(graph)
} else
stopifnot_graph(graph)
}
#' @export
#' @rdname diffnet-class
nodes <- function(graph) {
cls <- class(graph)
if ("diffnet" %in% cls)
return(graph$meta$ids)
else if ("list" %in% cls) {
ans <- rownames(graph[[1]])
if (!length(ans)) stop("There are not names to fetch")
else return(ans)
} else if (any(c("matrix", "dgCMatrix", "array") %in% cls)) {
ans <- rownames(graph)
if (!length(ans)) stop("There are not names to fetch")
else return(ans)
}
else stopifnot_graph(graph)
}
#' @export
#' @rdname diffnet-class
#' @param FUN a function to be passed to lapply
diffnetLapply <- function(graph, FUN, ...) {
lapply(seq_len(nslices(graph)), function(x, graph, ...) {
FUN(x,
graph = graph$graph[[x]],
toa = graph$toa,
vertex.static.attrs = graph$vertex.static.attrs,
vertex.dyn.attrs = graph$vertex.dyn.attrs[[x]],
adopt = graph$adopt[,x,drop=FALSE],
cumadopt = graph$cumadopt[,x,drop=FALSE],
meta = graph$meta)
}, graph=graph,...)
}
# debug(diffnetLapply)
# diffnetLapply(medInnovationsDiffNet, function(x, graph, cumadopt, ...) {
# sum(cumadopt)
# })
#' @export
#' @rdname diffnet-class
str.diffnet <- function(object, ...) {
utils::str(unclass(object))
}
#' @export
#' @rdname diffnet-class
dimnames.diffnet <- function(x) {
with(x, list(
meta$ids,
c(colnames(vertex.static.attrs), names(vertex.dyn.attrs[[1]])),
meta$pers)
)
}
#' @export
#' @rdname diffnet-class
#' @method t diffnet
t.diffnet <- function(x) {
x$graph <- lapply(x$graph, getMethod("t", "dgCMatrix"))
x
}
#' @rdname diffnet-class
#' @export
dim.diffnet <- function(x) {
k <- length(with(x, c(colnames(vertex.static.attrs), names(vertex.dyn.attrs[[1]]))))
as.integer(with(x$meta, c(n, k, nper)))
}
|
library(iECAT)
### Name: iECAT
### Title: Integrating External Controls to Association Tests
### Aliases: iECAT iECAT.SSD.OneSet_SetIndex
### ** Examples
library(SKAT)
data(Example, package="iECAT")
attach(Example)
# iECAT-O
# test the first gene
obj<-SKAT_Null_Model(Y ~ 1, out_type="D")
Z = Z.list[[1]]
tbl.external.all = tbl.external.all.list[[1]]
iECAT(Z, obj, tbl.external.all, method="optimal")
# test for the first 3 genes in the Example dataset
p.value.all<-rep(0,3)
p.value.internal.all<-rep(0,3)
for(i in 1:3){
re<-iECAT(Z.list[[i]], obj, tbl.external.all.list[[i]], method="optimal")
p.value.all[i]<-re$p.value
p.value.internal.all[i]<-re$p.value.internal
}
# iECAT-O p-values
p.value.all
# SKAT-O p-values
p.value.internal.all
| /data/genthat_extracted_code/iECAT/examples/iECAT.rd.R | no_license | surayaaramli/typeRrh | R | false | false | 760 | r | library(iECAT)
### Name: iECAT
### Title: Integrating External Controls to Association Tests
### Aliases: iECAT iECAT.SSD.OneSet_SetIndex
### ** Examples
library(SKAT)
data(Example, package="iECAT")
attach(Example)
# iECAT-O
# test the first gene
obj<-SKAT_Null_Model(Y ~ 1, out_type="D")
Z = Z.list[[1]]
tbl.external.all = tbl.external.all.list[[1]]
iECAT(Z, obj, tbl.external.all, method="optimal")
# test for the first 3 genes in the Example dataset
p.value.all<-rep(0,3)
p.value.internal.all<-rep(0,3)
for(i in 1:3){
re<-iECAT(Z.list[[i]], obj, tbl.external.all.list[[i]], method="optimal")
p.value.all[i]<-re$p.value
p.value.internal.all[i]<-re$p.value.internal
}
# iECAT-O p-values
p.value.all
# SKAT-O p-values
p.value.internal.all
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/logdensity2loglik.R
\name{logdensity2loglik}
\alias{logdensity2loglik}
\title{Compute the log likelihood function of the diffusion model}
\usage{
logdensity2loglik(logdensity, x, del, param, args = NULL)
}
\arguments{
\item{logdensity}{the model log likelihood function}
\item{x}{Time series of the observed state variables}
\item{del}{The uniform time step between observations}
\item{param}{The parameter vector}
\item{args}{Specifiy whether to use implied vol}
}
\description{
Compute the log likelihood function of the diffusion model
}
\examples{
logdensity2loglik(ModelU1,c(0.1,0.2,0.13,0.14),0.1,c(0.01,0.2))
}
| /man/logdensity2loglik.Rd | no_license | radovankavicky/MLEMVD | R | false | true | 702 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/logdensity2loglik.R
\name{logdensity2loglik}
\alias{logdensity2loglik}
\title{Compute the log likelihood function of the diffusion model}
\usage{
logdensity2loglik(logdensity, x, del, param, args = NULL)
}
\arguments{
\item{logdensity}{the model log likelihood function}
\item{x}{Time series of the observed state variables}
\item{del}{The uniform time step between observations}
\item{param}{The parameter vector}
\item{args}{Specifiy whether to use implied vol}
}
\description{
Compute the log likelihood function of the diffusion model
}
\examples{
logdensity2loglik(ModelU1,c(0.1,0.2,0.13,0.14),0.1,c(0.01,0.2))
}
|
#
# ์์ฑ์: ์ด์์
# ์์ฑ์ผ: 2019-12-16
# ์ ์ถ์ผ: 2019-12-16
#
# ๋ฌธ1)
# state.x77 ๋ฐ์ดํฐ์
์์ ๋ฌธ๋งน๋ฅ (Illiteracy)์ ์ด์ฉํด ๋ฒ์ฃ์จ(Murder)์ ์์ธก
# ํ๋ ๋จ์์ ํ ํ๊ท๋ชจ๋ธ์ ๋ง๋์์ค. ๊ทธ๋ฆฌ๊ณ ๋ฌธ๋งน๋ฅ ์ด 0.5, 1.0, 1.5์ผ ๋ ๋ฒ
# ์ฃ์จ์ ์์ธกํ์ฌ ๋ณด์์ค.
st <- data.frame(state.x77)
st_model <- lm(Murder~Illiteracy,data=st)
st_model
df <- data.frame(Illiteracy = c(0.5, 1.0, 1.5))
predict(st_model,df)
plot(df$Illiteracy,predict(st_model,df),col='red',cex=2,pch=20)
abline(st_model)
# ๋ฌธ2)
# trees ๋ฐ์ดํฐ์
์์ ๋๋ฌด๋๋ (Girth)๋ก ๋๋ฌด์ ๋ณผ๋ฅจ(Volume)์ ์์ธกํ๋ ๋จ
# ์ ํ ํ๊ท๋ชจ๋ธ์ ๋ง๋์์ค. ๊ทธ๋ฆฌ๊ณ ๋๋ฌด ๋๋ ๊ฐ 8.5, 9.0, 9.5์ผ ๋, ๋๋ฌด์
# ๋ณผ๋ฅจ(Volume)์ ์์ธกํ์ฌ ๋ณด์์ค.
trees_model <- lm(Volume~Girth,data=trees)
trees_model
df <- data.frame(Girth = c(8.5,9.0,9.5))
predict(trees_model,df)
plot(df$Girth,predict(trees_model,df),col='red',cex=2,pch=20)
abline(trees_model)
# ๋ฌธ3)
# pressure ๋ฐ์ดํฐ์
์์ ์จ๋(temperature)๋ก ๊ธฐ์(pressure)์ ์์ธกํ๋ ๋จ
# ์์ ํ ํ๊ท๋ชจ๋ธ์ ๋ง๋์์ค. ๊ทธ๋ฆฌ๊ณ ์จ๋๊ฐ 65, 95, 155์ผ ๋ ๊ธฐ์์ ์์ธก
# ํ์ฌ ๋ณด์์ค.
pr_model <- lm(pressure~temperature,data=pressure)
pr_model
df <- data.frame(temperature=c(65,95,155))
predict(pr_model,df)
plot(df$temperature,predict(pr_model,df),col='red',cex=2,pch=20)
abline(pr_model)
| /LSJ_1216.R | no_license | seonggegun/workR | R | false | false | 1,421 | r | #
# ์์ฑ์: ์ด์์
# ์์ฑ์ผ: 2019-12-16
# ์ ์ถ์ผ: 2019-12-16
#
# ๋ฌธ1)
# state.x77 ๋ฐ์ดํฐ์
์์ ๋ฌธ๋งน๋ฅ (Illiteracy)์ ์ด์ฉํด ๋ฒ์ฃ์จ(Murder)์ ์์ธก
# ํ๋ ๋จ์์ ํ ํ๊ท๋ชจ๋ธ์ ๋ง๋์์ค. ๊ทธ๋ฆฌ๊ณ ๋ฌธ๋งน๋ฅ ์ด 0.5, 1.0, 1.5์ผ ๋ ๋ฒ
# ์ฃ์จ์ ์์ธกํ์ฌ ๋ณด์์ค.
st <- data.frame(state.x77)
st_model <- lm(Murder~Illiteracy,data=st)
st_model
df <- data.frame(Illiteracy = c(0.5, 1.0, 1.5))
predict(st_model,df)
plot(df$Illiteracy,predict(st_model,df),col='red',cex=2,pch=20)
abline(st_model)
# ๋ฌธ2)
# trees ๋ฐ์ดํฐ์
์์ ๋๋ฌด๋๋ (Girth)๋ก ๋๋ฌด์ ๋ณผ๋ฅจ(Volume)์ ์์ธกํ๋ ๋จ
# ์ ํ ํ๊ท๋ชจ๋ธ์ ๋ง๋์์ค. ๊ทธ๋ฆฌ๊ณ ๋๋ฌด ๋๋ ๊ฐ 8.5, 9.0, 9.5์ผ ๋, ๋๋ฌด์
# ๋ณผ๋ฅจ(Volume)์ ์์ธกํ์ฌ ๋ณด์์ค.
trees_model <- lm(Volume~Girth,data=trees)
trees_model
df <- data.frame(Girth = c(8.5,9.0,9.5))
predict(trees_model,df)
plot(df$Girth,predict(trees_model,df),col='red',cex=2,pch=20)
abline(trees_model)
# ๋ฌธ3)
# pressure ๋ฐ์ดํฐ์
์์ ์จ๋(temperature)๋ก ๊ธฐ์(pressure)์ ์์ธกํ๋ ๋จ
# ์์ ํ ํ๊ท๋ชจ๋ธ์ ๋ง๋์์ค. ๊ทธ๋ฆฌ๊ณ ์จ๋๊ฐ 65, 95, 155์ผ ๋ ๊ธฐ์์ ์์ธก
# ํ์ฌ ๋ณด์์ค.
pr_model <- lm(pressure~temperature,data=pressure)
pr_model
df <- data.frame(temperature=c(65,95,155))
predict(pr_model,df)
plot(df$temperature,predict(pr_model,df),col='red',cex=2,pch=20)
abline(pr_model)
|
# Simulate process
mu <- 1
Y <- matrix(0.5)
dist <- matrix("Constant")
delta <- matrix(2)
N <- c(0)
t <- 10000
test <- Hawkes.sim(mu,Y,dist,delta,N,t)
# true alpha is 1 and beta is 2
alpha <- seq(0.1,0.8,length.out = 100)
beta <- seq(0.1,0.8, length.out = 100)
tmp <- matrix(0, ncol = 100, nrow = 100)
for(i in 1:length(alpha)){
for(j in 1:length(beta)){
tmp[j,i] <- Hawkes.ll(t, 1, alpha[i], beta[j])
}
}
library(plotly)
q <- plot_ly(x = alpha, y = beta, z = tmp, color = I("red")) %>% add_surface()
q %>% layout(scene = list(xaxis = list(title = "Alpha"), yaxis = list(title = "Beta"),
zaxis = list(title = "Log-Likelihood")))
max(tmp)
which(tmp == max(tmp), arr.ind = TRUE)
M <- 1
m <- c(1)
y <- matrix(0.5, ncol = M, nrow = M)
d <- matrix("Constant", ncol = M, nrow = M)
del <- matrix(2, ncol = M, nrow = M)
n <- c(0)
test <- Hawkes.sim(mu = m, Y = y, dist = d,
delta = del, N = n, t = 100, params = list())
M <- 2
m <- c(1,2)
y <- matrix(c(1,0.1,0.1,1), ncol = M, nrow = M)
d <- matrix("Constant", ncol = M, nrow = M)
del <- matrix(c(1,0.1,0.1,1), ncol = M, nrow = M)
n <- c(0,0)
test1 <- Hawkes.sim(mu = m, Y = y, dist = d,
delta = del, N = n, t = 100, params = list())
# MCMC and optim - here's how
# optim
t <- hawkes::simulateHawkes(1, 0.5, 5, 10000)[[1]]
f <- function(params){
- Hawkes.ll(t, params[1], params[2], params[3])
}
params <- optim(c(1,0.1,0.1), f)
paste( c("mu", "alpha", "beta"), round(params$par,2), sep=" = ")
# M is 1 case
M <- 1
mu <- 1
a <- matrix(0.5)
b <- matrix(2)
test <- Hawkes.sim2(mu, a, b, 10000, 0)
# M is 2 case
M <- 2
mu <- c(1,2)
a <- matrix(c(0.5, 0.2, 0.2, 0.5), ncol = 2)
b <- matrix(c(2, 1.1, 1.1, 2.2), ncol = 2)
test <- Hawkes.sim2(mu, a, b, 10000, 0)
q <- list()
for(i in 1:M){
tmpvec <- c()
for(j in 1:length(tmp$t)){
if(tmp$N[j] == i){
tmpvec <- c(tmpvec, tmp$t[j])
}
}
q[[i]] <- tmpvec
}
# Plots
t <- hawkes::simulateHawkes(1, 0.5, 2, 100)[[1]]
ei <- estimated_intensity(c(0.5,2,1), t)
cmp <- compensator(c(0.5,2,1), t)
plot(t, ei, type = "l",
col = "red", xlab = "t", ylab = "Intensity", ylim = c(0, 10))
cols <- c("red")
lines(t, cmp, col = "blue")
grid(20,20)
Hawkes.plot(test)
plot(test$r, test$N[,1], type = "s",
col = "red", xlab = "Time", ylab = "Count",
ylim = c(10^floor(log10(min(test$N))), max(test$N)+1), lwd = 3,
cex.lab = 1.5, cex.axis = 1.5, cex.main = 1.5)
grid(25,25)
| /R/Extras/oldthoughts.R | no_license | AndrewC1998/HawkesProcesses | R | false | false | 2,475 | r | # Simulate process
mu <- 1
Y <- matrix(0.5)
dist <- matrix("Constant")
delta <- matrix(2)
N <- c(0)
t <- 10000
test <- Hawkes.sim(mu,Y,dist,delta,N,t)
# true alpha is 1 and beta is 2
alpha <- seq(0.1,0.8,length.out = 100)
beta <- seq(0.1,0.8, length.out = 100)
tmp <- matrix(0, ncol = 100, nrow = 100)
for(i in 1:length(alpha)){
for(j in 1:length(beta)){
tmp[j,i] <- Hawkes.ll(t, 1, alpha[i], beta[j])
}
}
library(plotly)
q <- plot_ly(x = alpha, y = beta, z = tmp, color = I("red")) %>% add_surface()
q %>% layout(scene = list(xaxis = list(title = "Alpha"), yaxis = list(title = "Beta"),
zaxis = list(title = "Log-Likelihood")))
max(tmp)
which(tmp == max(tmp), arr.ind = TRUE)
M <- 1
m <- c(1)
y <- matrix(0.5, ncol = M, nrow = M)
d <- matrix("Constant", ncol = M, nrow = M)
del <- matrix(2, ncol = M, nrow = M)
n <- c(0)
test <- Hawkes.sim(mu = m, Y = y, dist = d,
delta = del, N = n, t = 100, params = list())
M <- 2
m <- c(1,2)
y <- matrix(c(1,0.1,0.1,1), ncol = M, nrow = M)
d <- matrix("Constant", ncol = M, nrow = M)
del <- matrix(c(1,0.1,0.1,1), ncol = M, nrow = M)
n <- c(0,0)
test1 <- Hawkes.sim(mu = m, Y = y, dist = d,
delta = del, N = n, t = 100, params = list())
# MCMC and optim - here's how
# optim
t <- hawkes::simulateHawkes(1, 0.5, 5, 10000)[[1]]
f <- function(params){
- Hawkes.ll(t, params[1], params[2], params[3])
}
params <- optim(c(1,0.1,0.1), f)
paste( c("mu", "alpha", "beta"), round(params$par,2), sep=" = ")
# M is 1 case
M <- 1
mu <- 1
a <- matrix(0.5)
b <- matrix(2)
test <- Hawkes.sim2(mu, a, b, 10000, 0)
# M is 2 case
M <- 2
mu <- c(1,2)
a <- matrix(c(0.5, 0.2, 0.2, 0.5), ncol = 2)
b <- matrix(c(2, 1.1, 1.1, 2.2), ncol = 2)
test <- Hawkes.sim2(mu, a, b, 10000, 0)
q <- list()
for(i in 1:M){
tmpvec <- c()
for(j in 1:length(tmp$t)){
if(tmp$N[j] == i){
tmpvec <- c(tmpvec, tmp$t[j])
}
}
q[[i]] <- tmpvec
}
# Plots
t <- hawkes::simulateHawkes(1, 0.5, 2, 100)[[1]]
ei <- estimated_intensity(c(0.5,2,1), t)
cmp <- compensator(c(0.5,2,1), t)
plot(t, ei, type = "l",
col = "red", xlab = "t", ylab = "Intensity", ylim = c(0, 10))
cols <- c("red")
lines(t, cmp, col = "blue")
grid(20,20)
Hawkes.plot(test)
plot(test$r, test$N[,1], type = "s",
col = "red", xlab = "Time", ylab = "Count",
ylim = c(10^floor(log10(min(test$N))), max(test$N)+1), lwd = 3,
cex.lab = 1.5, cex.axis = 1.5, cex.main = 1.5)
grid(25,25)
|
lens<-read.table("data/results_SRR061958.sample50K.fa.bam_block_lens.csv")
nums<-read.table("data/results_SRR061958.sample50K.fa.bam_block_nums.csv")
library(lattice)
p1<-histogram(~lens,xlab="Lengths of zero blocks")
png("plots/zero_block_lengths.png")
plot(p1)
dev.off()
p1<-histogram(~nums,
breaks=0:15,
xlab="Number of zero blocks in a read")
png("plots/zero_block_nums.png")
plot(p1)
dev.off() | /scripts/get_zero_contigs.R | no_license | macieksk/ithaka-experiments | R | false | false | 403 | r |
lens<-read.table("data/results_SRR061958.sample50K.fa.bam_block_lens.csv")
nums<-read.table("data/results_SRR061958.sample50K.fa.bam_block_nums.csv")
library(lattice)
p1<-histogram(~lens,xlab="Lengths of zero blocks")
png("plots/zero_block_lengths.png")
plot(p1)
dev.off()
p1<-histogram(~nums,
breaks=0:15,
xlab="Number of zero blocks in a read")
png("plots/zero_block_nums.png")
plot(p1)
dev.off() |
## read txt file, remove header, rename col, convert classes, convert ? to NA
data <- read.table("./data/power.txt",
sep=";", header = TRUE,
col.names=c("date","time","actpo","reactpo","voltage",
"intensity","sub1","sub2","sub3"),
colClasses=c("character","character","numeric",
"numeric","numeric","numeric",
"numeric","numeric","numeric"),
na.strings = "?"
)
## Subset the dataset
sub <- subset(data,data$date %in% c("1/2/2007","2/2/2007"))
## Conver date to Date format
sub$date <- as.Date(sub$date,format="%d/%m/%Y")
combine <- paste(sub$date,sub$time)
sub$date <- strptime(combine,format="%Y-%m-%d %H:%M:%S")
## Build plot 3
library(datasets)
## Change locale to english before creating plot3, due to X-axis
Sys.setlocale(category="LC_ALL","C")
## Create plot 3 in PNG device
png(file="plot3.png", bg="transparent")
with(sub,plot(date,sub1,type = "l" ,ylab="Energy sub metering",
xlab =" ")
)
with(sub,points(date,sub2, type="l", col="red"))
with(sub,points(date,sub3,type="l",col="blue"))
## Create legend
legend("topright", pch="-", cex=0.8, col=c("black","red","blue"),
legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
## Copy to png device
dev.off() | /figure/plot3.R | no_license | John5on-lin/ExData_Plotting1 | R | false | false | 1,365 | r | ## read txt file, remove header, rename col, convert classes, convert ? to NA
data <- read.table("./data/power.txt",
sep=";", header = TRUE,
col.names=c("date","time","actpo","reactpo","voltage",
"intensity","sub1","sub2","sub3"),
colClasses=c("character","character","numeric",
"numeric","numeric","numeric",
"numeric","numeric","numeric"),
na.strings = "?"
)
## Subset the dataset
sub <- subset(data,data$date %in% c("1/2/2007","2/2/2007"))
## Conver date to Date format
sub$date <- as.Date(sub$date,format="%d/%m/%Y")
combine <- paste(sub$date,sub$time)
sub$date <- strptime(combine,format="%Y-%m-%d %H:%M:%S")
## Build plot 3
library(datasets)
## Change locale to english before creating plot3, due to X-axis
Sys.setlocale(category="LC_ALL","C")
## Create plot 3 in PNG device
png(file="plot3.png", bg="transparent")
with(sub,plot(date,sub1,type = "l" ,ylab="Energy sub metering",
xlab =" ")
)
with(sub,points(date,sub2, type="l", col="red"))
with(sub,points(date,sub3,type="l",col="blue"))
## Create legend
legend("topright", pch="-", cex=0.8, col=c("black","red","blue"),
legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
## Copy to png device
dev.off() |
library(shiny)
library(maps) ## added by Yongsu
library(mapproj) ## added by Yongsu
counties <- readRDS("counties.rds")
source("helpers.R")
# User interface ----
ui <- fluidPage(
titlePanel("censusVis"),
sidebarLayout(
sidebarPanel(
helpText("Create demographic maps with
information from the 2010 US Census."),
selectInput("var",
label = "Choose a variable to display",
choices = c("Percent White", "Percent Black",
"Percent Hispanic", "Percent Asian"),
selected = "Percent White"),
sliderInput("range",
label = "Range of interest:",
min = 0, max = 100, value = c(0, 100))
),
mainPanel(plotOutput("map"))
)
)
# Server logic ----
server <- function(input, output) {
output$map <- renderPlot({
args <- switch(input$var,
"Percent White" = list(counties$white, "darkgreen", "% White"),
"Percent Black" = list(counties$black, "black", "% Black"),
"Percent Hispanic" = list(counties$hispanic, "darkorange", "% Hispanic"),
"Percent Asian" = list(counties$asian, "darkviolet", "% Asian"))
args$min <- input$range[1]
args$max <- input$range[2]
do.call(percent_map, args)
})
}
# Run app ----
shinyApp(ui, server)
| /app.R | no_license | brianchan4226/CensusApp | R | false | false | 1,404 | r | library(shiny)
library(maps) ## added by Yongsu
library(mapproj) ## added by Yongsu
counties <- readRDS("counties.rds")
source("helpers.R")
# User interface ----
ui <- fluidPage(
titlePanel("censusVis"),
sidebarLayout(
sidebarPanel(
helpText("Create demographic maps with
information from the 2010 US Census."),
selectInput("var",
label = "Choose a variable to display",
choices = c("Percent White", "Percent Black",
"Percent Hispanic", "Percent Asian"),
selected = "Percent White"),
sliderInput("range",
label = "Range of interest:",
min = 0, max = 100, value = c(0, 100))
),
mainPanel(plotOutput("map"))
)
)
# Server logic ----
server <- function(input, output) {
output$map <- renderPlot({
args <- switch(input$var,
"Percent White" = list(counties$white, "darkgreen", "% White"),
"Percent Black" = list(counties$black, "black", "% Black"),
"Percent Hispanic" = list(counties$hispanic, "darkorange", "% Hispanic"),
"Percent Asian" = list(counties$asian, "darkviolet", "% Asian"))
args$min <- input$range[1]
args$max <- input$range[2]
do.call(percent_map, args)
})
}
# Run app ----
shinyApp(ui, server)
|
###################### Obttain data ##################
# Read data Activity dataset
path <- getwd()
name <- "activity.csv"
dt_activity <- read.csv(paste(path, name, sep="/"), sep = ",")
sum(dt_activity_1$steps)
##################### Proces Date ##########################
# Part 1 - Plot and mead/Medain off step by day
# Copy data.frame in new data.frame
dt_activity_1 <- dt_activity
# Subset, exclude NA rows.
dt_activity_1 <- dt_activity_1[complete.cases(dt_activity_1),]
# aggrate by day
dt_steps_by_day <- as.data.frame(xtabs(steps ~ date , dt_activity_1))
# plot Histogram
hist(dt_steps_by_day$Freq )
# Calcualete mean and Median
mean_activity_1 <- mean (dt_steps_by_day$Freq)
median_activity_1 <- median(dt_steps_by_day$Freq)
# Part 2 - plot average 5 Min interval
# Copy data.frame in new data.frame
dt_activity_2 <- dt_activity
# Subset, exclude NA rows.
dt_activity_2 <- dt_activity_2[complete.cases(dt_activity_2),]
# aggrate by day
dt_steps_by_day2 <- as.data.frame(aggregate(dt_activity_2$steps, by=list(dt_activity_2$interval), FUN=mean, data = dt_activity_2 ))
# Max Value
maxvalue <- max(dt_steps_by_day2$x)
dt_steps_by_day2_max <- dt_steps_by_day2[dt_steps_by_day2$x == maxvalue, ]
# plot
plot_part2 <- plot(x=dt_steps_by_day2$Group.1 , y= dt_steps_by_day2$x , type = "l")
# part 3 - missing Value's
# report number of missing NA
dt_activity_na <- dt_activity[is.na(dt_activity[,1]),]
missing_na <- length(dt_activity_na[,1])
# filing in missing Value's
# value for missing values
mean_not_missing <- mean(dt_activity_1[complete.cases(dt_activity_1),1])
sum(dt_replace_na[complete.cases(dt_replace_na),1])
# replece na with mean
dt_replace_na <- dt_activity
dt_replace_na[is.na(dt_replace_na[,1]),1] <- mean_not_missing
dt_steps_by_day_replaced_na <- as.data.frame(xtabs(steps ~ date , dt_replace_na))
# plot average 3 min interval
# aggrate by day
dt_replace_na_agg <- as.data.frame(aggregate(dt_replace_na$steps, by=list(dt_replace_na$interval), FUN=mean, data = dt_replace_na))
# plot
plot(x=dt_replace_na_agg$Group.1 , y= dt_replace_na_agg$x , type = "l")
# part 4 - week / weekend days
# Add day name to data.frame
dt_replace_na$day <- weekdays(as.Date(dt_replace_na$date), abbreviate = FALSE)
# split dataset into weekday and weekenddays
weekend <- dt_replace_na[dt_replace_na$day %in% c("zaterdag","zondag"),]
week <- dt_replace_na[!(dt_replace_na$day %in% c("zaterdag","zondag")),]
# aggrate by day
dt_replace_na1_weekend <- as.data.frame(aggregate(weekend$steps, by=list(weekend$interval), FUN=mean, data = weekend ))
dt_replace_na1_week <- as.data.frame(aggregate(week$steps, by=list(week$interval), FUN=mean, data = week ))
# plot
par(mfrow = c(1,2))
plot(x=dt_replace_na1_weekend$Group.1 , y = dt_replace_na1_weekend$x , type = "l")
plot(x=dt_replace_na1_week$Group.1 , y = dt_replace_na1_week$x , type = "l")
| /Project 1 rep_res.R | no_license | paul-celen/Rep-Research-project-1 | R | false | false | 3,671 | r | ###################### Obttain data ##################
# Read data Activity dataset
path <- getwd()
name <- "activity.csv"
dt_activity <- read.csv(paste(path, name, sep="/"), sep = ",")
sum(dt_activity_1$steps)
##################### Proces Date ##########################
# Part 1 - Plot and mead/Medain off step by day
# Copy data.frame in new data.frame
dt_activity_1 <- dt_activity
# Subset, exclude NA rows.
dt_activity_1 <- dt_activity_1[complete.cases(dt_activity_1),]
# aggrate by day
dt_steps_by_day <- as.data.frame(xtabs(steps ~ date , dt_activity_1))
# plot Histogram
hist(dt_steps_by_day$Freq )
# Calcualete mean and Median
mean_activity_1 <- mean (dt_steps_by_day$Freq)
median_activity_1 <- median(dt_steps_by_day$Freq)
# Part 2 - plot average 5 Min interval
# Copy data.frame in new data.frame
dt_activity_2 <- dt_activity
# Subset, exclude NA rows.
dt_activity_2 <- dt_activity_2[complete.cases(dt_activity_2),]
# aggrate by day
dt_steps_by_day2 <- as.data.frame(aggregate(dt_activity_2$steps, by=list(dt_activity_2$interval), FUN=mean, data = dt_activity_2 ))
# Max Value
maxvalue <- max(dt_steps_by_day2$x)
dt_steps_by_day2_max <- dt_steps_by_day2[dt_steps_by_day2$x == maxvalue, ]
# plot
plot_part2 <- plot(x=dt_steps_by_day2$Group.1 , y= dt_steps_by_day2$x , type = "l")
# part 3 - missing Value's
# report number of missing NA
dt_activity_na <- dt_activity[is.na(dt_activity[,1]),]
missing_na <- length(dt_activity_na[,1])
# filing in missing Value's
# value for missing values
mean_not_missing <- mean(dt_activity_1[complete.cases(dt_activity_1),1])
sum(dt_replace_na[complete.cases(dt_replace_na),1])
# replece na with mean
dt_replace_na <- dt_activity
dt_replace_na[is.na(dt_replace_na[,1]),1] <- mean_not_missing
dt_steps_by_day_replaced_na <- as.data.frame(xtabs(steps ~ date , dt_replace_na))
# plot average 3 min interval
# aggrate by day
dt_replace_na_agg <- as.data.frame(aggregate(dt_replace_na$steps, by=list(dt_replace_na$interval), FUN=mean, data = dt_replace_na))
# plot
plot(x=dt_replace_na_agg$Group.1 , y= dt_replace_na_agg$x , type = "l")
# part 4 - week / weekend days
# Add day name to data.frame
dt_replace_na$day <- weekdays(as.Date(dt_replace_na$date), abbreviate = FALSE)
# split dataset into weekday and weekenddays
weekend <- dt_replace_na[dt_replace_na$day %in% c("zaterdag","zondag"),]
week <- dt_replace_na[!(dt_replace_na$day %in% c("zaterdag","zondag")),]
# aggrate by day
dt_replace_na1_weekend <- as.data.frame(aggregate(weekend$steps, by=list(weekend$interval), FUN=mean, data = weekend ))
dt_replace_na1_week <- as.data.frame(aggregate(week$steps, by=list(week$interval), FUN=mean, data = week ))
# plot
par(mfrow = c(1,2))
plot(x=dt_replace_na1_weekend$Group.1 , y = dt_replace_na1_weekend$x , type = "l")
plot(x=dt_replace_na1_week$Group.1 , y = dt_replace_na1_week$x , type = "l")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/var.R
\name{forecast.VAR}
\alias{forecast.VAR}
\title{Forecast a model from the fable package}
\usage{
\method{forecast}{VAR}(
object,
new_data = NULL,
specials = NULL,
bootstrap = FALSE,
times = 5000,
...
)
}
\arguments{
\item{object}{A model for which forecasts are required.}
\item{new_data}{A tsibble containing the time points and exogenous regressors to produce forecasts for.}
\item{specials}{(passed by \code{\link[fabletools:forecast]{fabletools::forecast.mdl_df()}}).}
\item{bootstrap}{If \code{TRUE}, then forecast distributions are computed using simulation with resampled errors.}
\item{times}{The number of sample paths to use in estimating the forecast distribution when \code{bootstrap = TRUE}.}
\item{...}{Other arguments passed to methods}
}
\value{
A list of forecasts.
}
\description{
Produces forecasts from a trained model.
}
\examples{
lung_deaths <- cbind(mdeaths, fdeaths) \%>\%
as_tsibble(pivot_longer = FALSE)
lung_deaths \%>\%
model(VAR(vars(mdeaths, fdeaths) ~ AR(3))) \%>\%
forecast()
}
| /man/forecast.VAR.Rd | no_license | cran/fable | R | false | true | 1,119 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/var.R
\name{forecast.VAR}
\alias{forecast.VAR}
\title{Forecast a model from the fable package}
\usage{
\method{forecast}{VAR}(
object,
new_data = NULL,
specials = NULL,
bootstrap = FALSE,
times = 5000,
...
)
}
\arguments{
\item{object}{A model for which forecasts are required.}
\item{new_data}{A tsibble containing the time points and exogenous regressors to produce forecasts for.}
\item{specials}{(passed by \code{\link[fabletools:forecast]{fabletools::forecast.mdl_df()}}).}
\item{bootstrap}{If \code{TRUE}, then forecast distributions are computed using simulation with resampled errors.}
\item{times}{The number of sample paths to use in estimating the forecast distribution when \code{bootstrap = TRUE}.}
\item{...}{Other arguments passed to methods}
}
\value{
A list of forecasts.
}
\description{
Produces forecasts from a trained model.
}
\examples{
lung_deaths <- cbind(mdeaths, fdeaths) \%>\%
as_tsibble(pivot_longer = FALSE)
lung_deaths \%>\%
model(VAR(vars(mdeaths, fdeaths) ~ AR(3))) \%>\%
forecast()
}
|
# ๋จ์ผ ์ง๋จ ํ๊ท ๊ฒ์ (๋จ์ผ ํ๋ณธ T ๊ฒ์ )
setwd("/Users/yuhayung/Desktop/coding/ํ์/Rtraining/dataset2")
data <- read.csv("one_sample.csv", header = T)
str(data) # 150
head(data)
x<- data$time
head(x)
summary(x)
mean(x)
mean(x,na.rm = T) # ๋ฐ์ดํฐ ์ ์
x1 <- na.omit(x) # na ๋ฐ์ดํฐ (omit) ๋นผ๊ธฐ
mean(x1)
# ์ ๊ท๋ถํฌ ๊ฒ์
# ๊ท๋ฌด๊ฐ์ค - x์ ๋ฐ์ดํฐ ๋ถํฌ๋ ์ ๊ท๋ถํฌ์ด๋ค.
shapiro.test(x1) # x1 ์ ๋ํ ์ ๊ท๋ถํฌ ๊ฒ์
# Shapiro-Wilk normality test
#
# data: x1
# W = 0.99137, p-value = 0.7242
# p ๋ฒจ๋ฅ๊ฐ์ด ์ ์ ์์ค ๋ณด๋ค ํฌ๋ค ์ฆ, ์ ๊ท๋ถํฌ๋ฅผ ๋ฐ๋ฅธ๋ค. ๋ฐ๋ผ์ T ๊ฒ์ ์ผ๋ก ํ๊ท ์ฐจ์ด ๊ฒ์ ์ ์ํํ๋ค.
# ์ ๊ท๋ถํฌ ์๊ฐํ
par(mfrow = c(1,2))
hist(x1)
qqnorm(x1)
qqline(x1, lty = 1, col = "blue" )
# ํ๊ท ์ฐจ์ด ๊ฒ์
# t- test (x, y = NULL, alternative = c("two.sided"/"less"/"greater"), mu = 0, paired = F, var.equal = F, conf.level = 0.95, ...)
t.test(x1, mu = 5.2) # mu ๋ชจ์ง๋จ์ ํ๊ท ๊ฐ
# One Sample t-test
# data: x1
# t = 3.9461, df = 108, p-value = 0.0001417 <= p-value ์ ์์์ค 0.05 ๋ณด๋ค ์๊ธฐ ๋๋ฌธ์ ๊ท๋ฌด๊ฐ์ค ์ฑํ
# alternative hypothesis: true mean is not equal to 5.2
# 95 percent confidence interval:
# 5.377613 5.736148
# sample estimates:
# mean of x
# 5.556881
t.test(x1, mu = 5.2, alternative = "greater", conf.level = 0.95)
# One Sample t-test
# data: x1
# t = 3.9461, df = 108, p-value = 7.083e-05 <= p-value ์ ์์์ค 0.05 ๋ณด๋ค ๋งค์ฐ ์๊ธฐ ๋๋ฌธ์ ์ฑํ
# alternative hypothesis: true mean is greater than 5.2
# 95 percent confidence interval:
# 5.406833 Inf
# sample estimates:
# mean of x
# 5.556881
qt(0.05, 108, lower.tail = F) # ๊ท๋ฌด๊ฐ์ค ์๊ณ๊ฐ ํ์ธ
# [1] 1.659085
# ๋ถ๋ง์กฑ ๊ณ ๊ฐ 14๋ช
์ ๋์์ผ๋ก 95% ์ ๋ขฐ ์์ค์์ ์์ธก ๊ฒ์ ์ ์ํํ ๊ฒฐ๊ณผ ๊ฒ์ ํต๊ณ๋ p- value ๊ฐ์ 0.0006735๋ก ์ ์ ์์ค 0.05 ๋ณด๋ค
# ์์ ๊ธฐ์กด ๋ถ๋ง์จ 20%๊ณผ ์ฐจ์ด๊ฐ ์๋ค๊ณ ๋ณผ ์ ์๋ค. ์ฆ, ๊ธฐ์กด 2019๋
๋ ๊ณ ๊ฐ ๋ถ๋ง์จ๊ณผ 2020๋
๋ CS ๊ต์ก ํ ๋ถ๋ง์จ์ ์ฐจ์ด๊ฐ ์๋ค๊ณ ๋ณผ ์ ์๋ค.
#
# ํ์ง๋ง ์์ธก ๊ฒ์ ๊ฒฐ๊ณผ์์๋ ๊ธฐ์กด ๋ถ๋ง์จ๋ณด๋ค ํฌ๋ค, ํน์ ์๋ค๋ ๋ฐฉํฅ์ฑ์ ์ ์๋์ง ์๋๋ค.
# ๋ฐ๋ผ์ ๋ฐฉํฅ์ฑ์ ๊ฐ๋ ๋จ์ธก ๊ฐ์ค ๊ฒ์ ์ ํตํด์ ๊ธฐ์กด ์ง๋จ๊ณผ ๋น๊ตํ์ฌ ์ ๊ท ์ง๋จ์ ๋ถ๋ง์จ์ด ๊ฐ์ ๋์๋์ง๋ฅผ ํ์ธํด์ผ ํ๋ค.
| /์ง๋จ ๊ฒ์ /แแ
กแซแแ
ตแฏ แแ
ตแธแแ
กแซ แแ
งแผแแ
ฒแซ แแ
ฅแทแแ
ฅแผ (T-test).R | no_license | freegray/R_training | R | false | false | 2,439 | r |
# ๋จ์ผ ์ง๋จ ํ๊ท ๊ฒ์ (๋จ์ผ ํ๋ณธ T ๊ฒ์ )
setwd("/Users/yuhayung/Desktop/coding/ํ์/Rtraining/dataset2")
data <- read.csv("one_sample.csv", header = T)
str(data) # 150
head(data)
x<- data$time
head(x)
summary(x)
mean(x)
mean(x,na.rm = T) # ๋ฐ์ดํฐ ์ ์
x1 <- na.omit(x) # na ๋ฐ์ดํฐ (omit) ๋นผ๊ธฐ
mean(x1)
# ์ ๊ท๋ถํฌ ๊ฒ์
# ๊ท๋ฌด๊ฐ์ค - x์ ๋ฐ์ดํฐ ๋ถํฌ๋ ์ ๊ท๋ถํฌ์ด๋ค.
shapiro.test(x1) # x1 ์ ๋ํ ์ ๊ท๋ถํฌ ๊ฒ์
# Shapiro-Wilk normality test
#
# data: x1
# W = 0.99137, p-value = 0.7242
# p ๋ฒจ๋ฅ๊ฐ์ด ์ ์ ์์ค ๋ณด๋ค ํฌ๋ค ์ฆ, ์ ๊ท๋ถํฌ๋ฅผ ๋ฐ๋ฅธ๋ค. ๋ฐ๋ผ์ T ๊ฒ์ ์ผ๋ก ํ๊ท ์ฐจ์ด ๊ฒ์ ์ ์ํํ๋ค.
# ์ ๊ท๋ถํฌ ์๊ฐํ
par(mfrow = c(1,2))
hist(x1)
qqnorm(x1)
qqline(x1, lty = 1, col = "blue" )
# ํ๊ท ์ฐจ์ด ๊ฒ์
# t- test (x, y = NULL, alternative = c("two.sided"/"less"/"greater"), mu = 0, paired = F, var.equal = F, conf.level = 0.95, ...)
t.test(x1, mu = 5.2) # mu ๋ชจ์ง๋จ์ ํ๊ท ๊ฐ
# One Sample t-test
# data: x1
# t = 3.9461, df = 108, p-value = 0.0001417 <= p-value ์ ์์์ค 0.05 ๋ณด๋ค ์๊ธฐ ๋๋ฌธ์ ๊ท๋ฌด๊ฐ์ค ์ฑํ
# alternative hypothesis: true mean is not equal to 5.2
# 95 percent confidence interval:
# 5.377613 5.736148
# sample estimates:
# mean of x
# 5.556881
t.test(x1, mu = 5.2, alternative = "greater", conf.level = 0.95)
# One Sample t-test
# data: x1
# t = 3.9461, df = 108, p-value = 7.083e-05 <= p-value ์ ์์์ค 0.05 ๋ณด๋ค ๋งค์ฐ ์๊ธฐ ๋๋ฌธ์ ์ฑํ
# alternative hypothesis: true mean is greater than 5.2
# 95 percent confidence interval:
# 5.406833 Inf
# sample estimates:
# mean of x
# 5.556881
qt(0.05, 108, lower.tail = F) # ๊ท๋ฌด๊ฐ์ค ์๊ณ๊ฐ ํ์ธ
# [1] 1.659085
# ๋ถ๋ง์กฑ ๊ณ ๊ฐ 14๋ช
์ ๋์์ผ๋ก 95% ์ ๋ขฐ ์์ค์์ ์์ธก ๊ฒ์ ์ ์ํํ ๊ฒฐ๊ณผ ๊ฒ์ ํต๊ณ๋ p- value ๊ฐ์ 0.0006735๋ก ์ ์ ์์ค 0.05 ๋ณด๋ค
# ์์ ๊ธฐ์กด ๋ถ๋ง์จ 20%๊ณผ ์ฐจ์ด๊ฐ ์๋ค๊ณ ๋ณผ ์ ์๋ค. ์ฆ, ๊ธฐ์กด 2019๋
๋ ๊ณ ๊ฐ ๋ถ๋ง์จ๊ณผ 2020๋
๋ CS ๊ต์ก ํ ๋ถ๋ง์จ์ ์ฐจ์ด๊ฐ ์๋ค๊ณ ๋ณผ ์ ์๋ค.
#
# ํ์ง๋ง ์์ธก ๊ฒ์ ๊ฒฐ๊ณผ์์๋ ๊ธฐ์กด ๋ถ๋ง์จ๋ณด๋ค ํฌ๋ค, ํน์ ์๋ค๋ ๋ฐฉํฅ์ฑ์ ์ ์๋์ง ์๋๋ค.
# ๋ฐ๋ผ์ ๋ฐฉํฅ์ฑ์ ๊ฐ๋ ๋จ์ธก ๊ฐ์ค ๊ฒ์ ์ ํตํด์ ๊ธฐ์กด ์ง๋จ๊ณผ ๋น๊ตํ์ฌ ์ ๊ท ์ง๋จ์ ๋ถ๋ง์จ์ด ๊ฐ์ ๋์๋์ง๋ฅผ ํ์ธํด์ผ ํ๋ค.
|
# Author: Francois Aguet
library(peer, quietly=TRUE) # https://github.com/PMBio/peer
library(argparser, quietly=TRUE)
WriteTable <- function(data, filename, index.name) {
datafile <- file(filename, open = "wt")
on.exit(close(datafile))
header <- c(index.name, colnames(data))
writeLines(paste0(header, collapse="\t"), con=datafile, sep="\n")
write.table(data, datafile, sep="\t", col.names=FALSE, quote=FALSE)
}
p <- arg_parser("Run PEER factor estimation")
p <- add_argument(p, "expr.file", help="")
p <- add_argument(p, "prefix", help="")
p <- add_argument(p, "n", help="Number of hidden confounders to estimate")
p <- add_argument(p, "--covariates", help="Observed covariates")
p <- add_argument(p, "--alphaprior_a", help="", default=0.001)
p <- add_argument(p, "--alphaprior_b", help="", default=0.01)
p <- add_argument(p, "--epsprior_a", help="", default=0.1)
p <- add_argument(p, "--epsprior_b", help="", default=10)
# p <- add_argument(p, "--max_iter", help="", default=1000)
p <- add_argument(p, "--max_iter", help="", default=100)
p <- add_argument(p, "--output_dir", short="-o", help="Output directory", default=".")
argv <- parse_args(p)
cat("PEER: loading expression data ... ")
if (grepl('.gz$', argv$expr.file)) {
nrows <- as.integer(system(paste0("zcat ", argv$expr.file, " | wc -l | cut -d' ' -f1 "), intern=TRUE, wait=TRUE))
} else {
nrows <- as.integer(system(paste0("wc -l ", argv$expr.file, " | cut -d' ' -f1 "), intern=TRUE, wait=TRUE))
}
if (grepl('.bed$', argv$expr.file) || grepl('.bed.gz$', argv$expr.file)) {
df <- read.table(argv$expr.file, sep="\t", nrows=nrows, header=TRUE, check.names=FALSE, comment.char="")
df <- df[, 7:ncol(df)]
} else {
df <- read.table(argv$expr.file, sep="\t", nrows=nrows, header=TRUE, check.names=FALSE, comment.char="", row.names=1)
}
M <- t(as.matrix(df))
cat("done.\n")
# run PEER
cat(paste0("PEER: estimating hidden confounders (", argv$n, ")\n"))
model <- PEER()
invisible(PEER_setNk(model, argv$n))
invisible(PEER_setPhenoMean(model, M))
invisible(PEER_setPriorAlpha(model, argv$alphaprior_a, argv$alphaprior_b))
invisible(PEER_setPriorEps(model, argv$epsprior_a, argv$epsprior_b))
invisible(PEER_setNmax_iterations(model, argv$max_iter))
if (!is.null(argv$covariates) && !is.na(argv$covariates)) {
covar.df <- read.table(argv$covariates, sep="\t", header=TRUE, row.names=1, as.is=TRUE)
covar.df <- sapply(covar.df, as.numeric)
cat(paste0(" * including ", dim(covar.df)[2], " covariates", "\n"))
invisible(PEER_setCovariates(model, as.matrix(covar.df))) # samples x covariates
}
time <- system.time(PEER_update(model))
X <- PEER_getX(model) # samples x PEER factors
A <- PEER_getAlpha(model) # PEER factors x 1
R <- t(PEER_getResiduals(model)) # genes x samples
# add relevant row/column names
c <- paste0("InferredCov",1:ncol(X))
rownames(X) <- rownames(M)
colnames(X) <- c
rownames(A) <- c
colnames(A) <- "Alpha"
A <- as.data.frame(A)
A$Relevance <- 1.0 / A$Alpha
rownames(R) <- colnames(M)
colnames(R) <- rownames(M)
# write results
cat("PEER: writing results ... ")
WriteTable(t(X), file.path(argv$output_dir, paste0(argv$prefix, ".PEER_covariates.txt")), "ID") # format(X, digits=6)
WriteTable(A, file.path(argv$output_dir, paste0(argv$prefix, ".PEER_alpha.txt")), "ID")
WriteTable(R, file.path(argv$output_dir, paste0(argv$prefix, ".PEER_residuals.txt")), "ID")
cat("done.\n") | /MP_eQTL_QTLtools/src/run_PEER.R | no_license | zhengzhanye/mulinlab-pip | R | false | false | 3,416 | r | # Author: Francois Aguet
library(peer, quietly=TRUE) # https://github.com/PMBio/peer
library(argparser, quietly=TRUE)
WriteTable <- function(data, filename, index.name) {
datafile <- file(filename, open = "wt")
on.exit(close(datafile))
header <- c(index.name, colnames(data))
writeLines(paste0(header, collapse="\t"), con=datafile, sep="\n")
write.table(data, datafile, sep="\t", col.names=FALSE, quote=FALSE)
}
p <- arg_parser("Run PEER factor estimation")
p <- add_argument(p, "expr.file", help="")
p <- add_argument(p, "prefix", help="")
p <- add_argument(p, "n", help="Number of hidden confounders to estimate")
p <- add_argument(p, "--covariates", help="Observed covariates")
p <- add_argument(p, "--alphaprior_a", help="", default=0.001)
p <- add_argument(p, "--alphaprior_b", help="", default=0.01)
p <- add_argument(p, "--epsprior_a", help="", default=0.1)
p <- add_argument(p, "--epsprior_b", help="", default=10)
# p <- add_argument(p, "--max_iter", help="", default=1000)
p <- add_argument(p, "--max_iter", help="", default=100)
p <- add_argument(p, "--output_dir", short="-o", help="Output directory", default=".")
argv <- parse_args(p)
cat("PEER: loading expression data ... ")
if (grepl('.gz$', argv$expr.file)) {
nrows <- as.integer(system(paste0("zcat ", argv$expr.file, " | wc -l | cut -d' ' -f1 "), intern=TRUE, wait=TRUE))
} else {
nrows <- as.integer(system(paste0("wc -l ", argv$expr.file, " | cut -d' ' -f1 "), intern=TRUE, wait=TRUE))
}
if (grepl('.bed$', argv$expr.file) || grepl('.bed.gz$', argv$expr.file)) {
df <- read.table(argv$expr.file, sep="\t", nrows=nrows, header=TRUE, check.names=FALSE, comment.char="")
df <- df[, 7:ncol(df)]
} else {
df <- read.table(argv$expr.file, sep="\t", nrows=nrows, header=TRUE, check.names=FALSE, comment.char="", row.names=1)
}
M <- t(as.matrix(df))
cat("done.\n")
# run PEER
cat(paste0("PEER: estimating hidden confounders (", argv$n, ")\n"))
model <- PEER()
invisible(PEER_setNk(model, argv$n))
invisible(PEER_setPhenoMean(model, M))
invisible(PEER_setPriorAlpha(model, argv$alphaprior_a, argv$alphaprior_b))
invisible(PEER_setPriorEps(model, argv$epsprior_a, argv$epsprior_b))
invisible(PEER_setNmax_iterations(model, argv$max_iter))
if (!is.null(argv$covariates) && !is.na(argv$covariates)) {
covar.df <- read.table(argv$covariates, sep="\t", header=TRUE, row.names=1, as.is=TRUE)
covar.df <- sapply(covar.df, as.numeric)
cat(paste0(" * including ", dim(covar.df)[2], " covariates", "\n"))
invisible(PEER_setCovariates(model, as.matrix(covar.df))) # samples x covariates
}
time <- system.time(PEER_update(model))
X <- PEER_getX(model) # samples x PEER factors
A <- PEER_getAlpha(model) # PEER factors x 1
R <- t(PEER_getResiduals(model)) # genes x samples
# add relevant row/column names
c <- paste0("InferredCov",1:ncol(X))
rownames(X) <- rownames(M)
colnames(X) <- c
rownames(A) <- c
colnames(A) <- "Alpha"
A <- as.data.frame(A)
A$Relevance <- 1.0 / A$Alpha
rownames(R) <- colnames(M)
colnames(R) <- rownames(M)
# write results
cat("PEER: writing results ... ")
WriteTable(t(X), file.path(argv$output_dir, paste0(argv$prefix, ".PEER_covariates.txt")), "ID") # format(X, digits=6)
WriteTable(A, file.path(argv$output_dir, paste0(argv$prefix, ".PEER_alpha.txt")), "ID")
WriteTable(R, file.path(argv$output_dir, paste0(argv$prefix, ".PEER_residuals.txt")), "ID")
cat("done.\n") |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lisy.R
\name{lisy}
\alias{lisy}
\title{lisy}
\usage{
lisy(seed = 1, nclues = 4, nspread = 5, incidental = "names",
antonym = "both", ninfer = 1, direct = "of", Ndist = 4,
dist = "mixed", distprob = 0.5, itemSet = "random", items = NULL,
scales = NULL)
}
\arguments{
\item{seed}{Generates the same question again from local computer.}
\item{nclues}{Generates the number of sentences to make up the item.}
\item{nspread}{Calculates the spread of possible incidentals in total.}
\item{incidental}{Tells the function whether the item features are 'names' or 'objects'.}
\item{antonym}{Determine whether to use both antonyms ('both') or only one type ("first" or "second").}
\item{ninfer}{Generate answers that requires a X amount of inference from the items. Up to 3 is the maximum.}
\item{direct}{Deciding on whether the clues are organised in an ordered("of" = ordered forward / "ob" = ordered backward) or unordered ('alt' = alternative) fashion. Note. 'alt' can only be used when ninfer is 3 or greater.}
\item{Ndist}{Returns the number of distractors per question.}
\item{dist}{Select the type of distractors. You have three options ('mixed', 'invalid','false'). If dist='false', then the number of false distractors must be less than the number of clues by 1.}
\item{distprob}{Calculates the number of comparison variation for the distractors.}
\item{itemSet}{This is the choice of itemset you want. If itemSet='random' then the generator will randomly select one ('People', 'Fruits', 'Superheroes'). Change itemset='own' if you are using your own item set.}
\item{items}{Input own item type. At least 10 items. Default items are used when items = NULL.}
\item{scales}{Input own antonyms. At least 2 antonyms (i.e."bigger","smaller"). Default antonyms are used when scales = NULL.}
}
\description{
This function generates linear syllogistic reasoning items. This is for research purposes.
}
\details{
There are several things to note. To use own item set, please have at least 10 items within the itemset. In order for antonyms comparison to work, please ensure that you have at least 2 antonyms The function will stop if the criteria is not met. The genearation of items are slower if you have a huge item set (e.g. In the millions!).
When nspread and nclue is = 3. This means that there are 3 sentences, and only 3 names. This makes it impossible to generate an invalid distractor. As such, only the false distractors will be created. Since there are only three clues, then at most 2 false distractors can be created.
When nspread and nclues are the same, all the names of the invalid distractors will be taken from the names that are used in the clues. As nspread value increases, the likelihood of having names not taken from the clues increases. Making the distractors fairly easy as there is a higher likelihood that the names taken from the matrix might not appear in the clues. Hence, keeping the value of nspread and nclue as close as possible is recommended.
This function only generates items that requires up to 3 inferences. As the required inferences increases, then number of clues needed also increases. Inference is the implied comparison between sentences which allows the test taker to make an inform decision. When ninfer = 1 and the antonym is declared as either 'first' or 'second', then the correct answer will always be the opposite of the antonym used in the sentence. When ninfer = 2, the correct answer will be in the right direction.
Direct is the direction of the line of thought. If direct = "ob" it means that solving the items requires the test taker to work 'ordered backward'. If it is 'of', it means 'ordered forward' and finally if it is 'alt', then it means the clues are not inorder. direct = 'alt' can only be used when ninfer = 3.
When distprob = 0.5, the distribution of the antonym for the distractors will be mixed. When distprob is either 1 or 0, then only one of the two antonym will be used. This is only used if one wishes to study distractor analysis.
}
\examples{
#Generate an item with default item set
lisy(seed=10,nclues=4,nspread=6,incidental='names',
antonym="first",ninfer = 3, direct='ob', Ndist=3,
dist="mixed",distprob=0.5,itemSet='random',
items= NULL,scales = NULL)
#Item set
superheroes <- c('Spider man','Super man','Batman','Wolverine',
'Catwoman','Thor','The Shadow','Silver Surfer', 'Flash','Wonder woman',
'Mr. Fantastic', 'Aqua man', "Hawkeye", 'Starfire', 'Venom', "General Zod")
#Antonym
compare <- c("taller","shorter", "older", "younger",
"smaller", "bigger","stronger", "weaker")
#Generate item with own dataset
lisy(seed=10,nclues=4,nspread=6,incidental='names',
antonym="first",ninfer = 3, direct='ob',
Ndist=3, dist="mixed",distprob=0.5,
itemSet='own',items= superheroes, scales = compare)
#loop through 30 items
nitems <- 30
params <- data.frame(seed=1:nitems,
nclues=ceiling((1:nitems)/20)+3,
nspread=ceiling((1:nitems)/15)+4)
qtable <- NULL
for (i in 1:nitems) {
runs <- lisy(seed=i,
nclues=params$nclues[i],
nspread=params$nspread[i],
incidental= 'names',antonym="first",ninfer = 2,
direct='of', Ndist=4,dist="mixed",distprob=.5,
itemSet='own', items= superheroes, scales = compare)
qtable[[i]] <- runs
}
qtable
}
\author{
Aiden Loe and Francis Smart
}
| /man/lisy.Rd | no_license | EconometricsBySimulation/AIG | R | false | true | 5,492 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lisy.R
\name{lisy}
\alias{lisy}
\title{lisy}
\usage{
lisy(seed = 1, nclues = 4, nspread = 5, incidental = "names",
antonym = "both", ninfer = 1, direct = "of", Ndist = 4,
dist = "mixed", distprob = 0.5, itemSet = "random", items = NULL,
scales = NULL)
}
\arguments{
\item{seed}{Generates the same question again from local computer.}
\item{nclues}{Generates the number of sentences to make up the item.}
\item{nspread}{Calculates the spread of possible incidentals in total.}
\item{incidental}{Tells the function whether the item features are 'names' or 'objects'.}
\item{antonym}{Determine whether to use both antonyms ('both') or only one type ("first" or "second").}
\item{ninfer}{Generate answers that requires a X amount of inference from the items. Up to 3 is the maximum.}
\item{direct}{Deciding on whether the clues are organised in an ordered("of" = ordered forward / "ob" = ordered backward) or unordered ('alt' = alternative) fashion. Note. 'alt' can only be used when ninfer is 3 or greater.}
\item{Ndist}{Returns the number of distractors per question.}
\item{dist}{Select the type of distractors. You have three options ('mixed', 'invalid','false'). If dist='false', then the number of false distractors must be less than the number of clues by 1.}
\item{distprob}{Calculates the number of comparison variation for the distractors.}
\item{itemSet}{This is the choice of itemset you want. If itemSet='random' then the generator will randomly select one ('People', 'Fruits', 'Superheroes'). Change itemset='own' if you are using your own item set.}
\item{items}{Input own item type. At least 10 items. Default items are used when items = NULL.}
\item{scales}{Input own antonyms. At least 2 antonyms (i.e."bigger","smaller"). Default antonyms are used when scales = NULL.}
}
\description{
This function generates linear syllogistic reasoning items. This is for research purposes.
}
\details{
There are several things to note. To use own item set, please have at least 10 items within the itemset. In order for antonyms comparison to work, please ensure that you have at least 2 antonyms The function will stop if the criteria is not met. The genearation of items are slower if you have a huge item set (e.g. In the millions!).
When nspread and nclue is = 3. This means that there are 3 sentences, and only 3 names. This makes it impossible to generate an invalid distractor. As such, only the false distractors will be created. Since there are only three clues, then at most 2 false distractors can be created.
When nspread and nclues are the same, all the names of the invalid distractors will be taken from the names that are used in the clues. As nspread value increases, the likelihood of having names not taken from the clues increases. Making the distractors fairly easy as there is a higher likelihood that the names taken from the matrix might not appear in the clues. Hence, keeping the value of nspread and nclue as close as possible is recommended.
This function only generates items that requires up to 3 inferences. As the required inferences increases, then number of clues needed also increases. Inference is the implied comparison between sentences which allows the test taker to make an inform decision. When ninfer = 1 and the antonym is declared as either 'first' or 'second', then the correct answer will always be the opposite of the antonym used in the sentence. When ninfer = 2, the correct answer will be in the right direction.
Direct is the direction of the line of thought. If direct = "ob" it means that solving the items requires the test taker to work 'ordered backward'. If it is 'of', it means 'ordered forward' and finally if it is 'alt', then it means the clues are not inorder. direct = 'alt' can only be used when ninfer = 3.
When distprob = 0.5, the distribution of the antonym for the distractors will be mixed. When distprob is either 1 or 0, then only one of the two antonym will be used. This is only used if one wishes to study distractor analysis.
}
\examples{
#Generate an item with default item set
lisy(seed=10,nclues=4,nspread=6,incidental='names',
antonym="first",ninfer = 3, direct='ob', Ndist=3,
dist="mixed",distprob=0.5,itemSet='random',
items= NULL,scales = NULL)
#Item set
superheroes <- c('Spider man','Super man','Batman','Wolverine',
'Catwoman','Thor','The Shadow','Silver Surfer', 'Flash','Wonder woman',
'Mr. Fantastic', 'Aqua man', "Hawkeye", 'Starfire', 'Venom', "General Zod")
#Antonym
compare <- c("taller","shorter", "older", "younger",
"smaller", "bigger","stronger", "weaker")
#Generate item with own dataset
lisy(seed=10,nclues=4,nspread=6,incidental='names',
antonym="first",ninfer = 3, direct='ob',
Ndist=3, dist="mixed",distprob=0.5,
itemSet='own',items= superheroes, scales = compare)
#loop through 30 items
nitems <- 30
params <- data.frame(seed=1:nitems,
nclues=ceiling((1:nitems)/20)+3,
nspread=ceiling((1:nitems)/15)+4)
qtable <- NULL
for (i in 1:nitems) {
runs <- lisy(seed=i,
nclues=params$nclues[i],
nspread=params$nspread[i],
incidental= 'names',antonym="first",ninfer = 2,
direct='of', Ndist=4,dist="mixed",distprob=.5,
itemSet='own', items= superheroes, scales = compare)
qtable[[i]] <- runs
}
qtable
}
\author{
Aiden Loe and Francis Smart
}
|
ggplotConfusionMatrix <- function(m, plot_title = NULL){
library(caret)
library(ggplot2)
library(scales)
library(tidyr)
#mycaption <- paste("Accuracy", percent_format()(m$overall[1]),
# "Kappa", percent_format()(m$overall[2]))
mycaption <- paste("Accuracy", percent_format()(m$overall[1]))
p <-
ggplot(data = as.data.frame(m$table) ,
aes(x = Reference, y = Prediction)) +
geom_tile(aes(fill = log(Freq)), colour = "white") +
scale_fill_gradient(low = "white", high = "steelblue") +
geom_text(aes(x = Reference, y = Prediction, label = Freq)) +
theme_minimal() +
theme(legend.position = "none",
text = element_text(size = 20),
axis.text = element_text(size = 18),
plot.title = element_text(hjust = 0.5)) +
labs(caption = mycaption, title = plot_title)
return(p)
}
| /sandbox/ggplotConfusionMatrix.R | permissive | dib-lab/2020-ibd | R | false | false | 869 | r | ggplotConfusionMatrix <- function(m, plot_title = NULL){
library(caret)
library(ggplot2)
library(scales)
library(tidyr)
#mycaption <- paste("Accuracy", percent_format()(m$overall[1]),
# "Kappa", percent_format()(m$overall[2]))
mycaption <- paste("Accuracy", percent_format()(m$overall[1]))
p <-
ggplot(data = as.data.frame(m$table) ,
aes(x = Reference, y = Prediction)) +
geom_tile(aes(fill = log(Freq)), colour = "white") +
scale_fill_gradient(low = "white", high = "steelblue") +
geom_text(aes(x = Reference, y = Prediction, label = Freq)) +
theme_minimal() +
theme(legend.position = "none",
text = element_text(size = 20),
axis.text = element_text(size = 18),
plot.title = element_text(hjust = 0.5)) +
labs(caption = mycaption, title = plot_title)
return(p)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.