content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/route53recoveryreadiness_operations.R
\name{route53recoveryreadiness_delete_recovery_group}
\alias{route53recoveryreadiness_delete_recovery_group}
\title{Deletes a recovery group}
\usage{
route53recoveryreadiness_delete_recovery_group(RecoveryGroupName)
}
\arguments{
\item{RecoveryGroupName}{[required] The name of a recovery group.}
}
\description{
Deletes a recovery group.
See \url{https://www.paws-r-sdk.com/docs/route53recoveryreadiness_delete_recovery_group/} for full documentation.
}
\keyword{internal}
|
/cran/paws.networking/man/route53recoveryreadiness_delete_recovery_group.Rd
|
permissive
|
paws-r/paws
|
R
| false
| true
| 591
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/route53recoveryreadiness_operations.R
\name{route53recoveryreadiness_delete_recovery_group}
\alias{route53recoveryreadiness_delete_recovery_group}
\title{Deletes a recovery group}
\usage{
route53recoveryreadiness_delete_recovery_group(RecoveryGroupName)
}
\arguments{
\item{RecoveryGroupName}{[required] The name of a recovery group.}
}
\description{
Deletes a recovery group.
See \url{https://www.paws-r-sdk.com/docs/route53recoveryreadiness_delete_recovery_group/} for full documentation.
}
\keyword{internal}
|
first_vec <- c(1, 5, 4, 2, 3, 7, 6)
second_vec <- c(9, 2, 1, 8, 3, 4, 5, 6, 10, 7, 12, 11)
third_vec <- c(8, 3, 5, 1, 7, 1, 10)
find_longer_vector <- function(vec_one, vec_two) {
if (length(vec_one) > length(vec_two)) {
return("First")
} else if (length(vec_one) < length(vec_two)) {
return("Second")
} else {
return("Equal Length")
}
}
first_vs_second <- find_longer_vector(first_vec, second_vec)
first_vs_third <- find_longer_vector(first_vec, third_vec)
---------------------------------
is_divisible <- function(divisor, dividend) {
whole <- floor(divisor / dividend)
rem <- divisor - (whole * dividend)
if (rem == 0) {
return(TRUE)
} else {
return(FALSE)
}
}
div_5731_by_11 <- is_divisible(5731, 11)
--------------------------------
subtract_all <- function(start, ...) {
current_num <- start
for (num in list(...)) {
current_num <- current_num - num
}
return(current_num)
}
first_subtraction <- subtract_all(10, 1, 2, 3)
second_subtraction <- subtract_all(100, 71, 22)
----------------------------------
|
/functions_in_R.r
|
no_license
|
SubbuDS/Data_Science-and-Big-Data
|
R
| false
| false
| 1,089
|
r
|
first_vec <- c(1, 5, 4, 2, 3, 7, 6)
second_vec <- c(9, 2, 1, 8, 3, 4, 5, 6, 10, 7, 12, 11)
third_vec <- c(8, 3, 5, 1, 7, 1, 10)
find_longer_vector <- function(vec_one, vec_two) {
if (length(vec_one) > length(vec_two)) {
return("First")
} else if (length(vec_one) < length(vec_two)) {
return("Second")
} else {
return("Equal Length")
}
}
first_vs_second <- find_longer_vector(first_vec, second_vec)
first_vs_third <- find_longer_vector(first_vec, third_vec)
---------------------------------
is_divisible <- function(divisor, dividend) {
whole <- floor(divisor / dividend)
rem <- divisor - (whole * dividend)
if (rem == 0) {
return(TRUE)
} else {
return(FALSE)
}
}
div_5731_by_11 <- is_divisible(5731, 11)
--------------------------------
subtract_all <- function(start, ...) {
current_num <- start
for (num in list(...)) {
current_num <- current_num - num
}
return(current_num)
}
first_subtraction <- subtract_all(10, 1, 2, 3)
second_subtraction <- subtract_all(100, 71, 22)
----------------------------------
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/randomForest.R
\name{build_model.randomForest}
\alias{build_model.randomForest}
\title{build_model.randomForest}
\usage{
\method{build_model}{randomForest}(object, which_tree = 1, ...)
}
\arguments{
\item{object}{a object of class randomForest}
\item{which_tree}{an integer indicating which single tree to build}
\item{...}{further arguments passed to or from other methods}
}
\value{
a \code{list} of lists representation of the tree that can be
inserted into a cell or pool
}
\description{
Builds an entire PFA list of lists based on a single randomForest model tree
}
\examples{
dat <- data.frame(X1 = runif(100),
X2 = rnorm(100))
dat$Y <- factor((rexp(100,5) + 5 * dat$X1 - 4 * dat$X2) > 0)
model <- randomForest::randomForest(Y ~ X1 + X2, data=dat, ntree=10)
my_tree <- build_model(model, 1)
}
|
/aurelius/man/build_model.randomForest.Rd
|
permissive
|
mafpimentel/hadrian
|
R
| false
| true
| 900
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/randomForest.R
\name{build_model.randomForest}
\alias{build_model.randomForest}
\title{build_model.randomForest}
\usage{
\method{build_model}{randomForest}(object, which_tree = 1, ...)
}
\arguments{
\item{object}{a object of class randomForest}
\item{which_tree}{an integer indicating which single tree to build}
\item{...}{further arguments passed to or from other methods}
}
\value{
a \code{list} of lists representation of the tree that can be
inserted into a cell or pool
}
\description{
Builds an entire PFA list of lists based on a single randomForest model tree
}
\examples{
dat <- data.frame(X1 = runif(100),
X2 = rnorm(100))
dat$Y <- factor((rexp(100,5) + 5 * dat$X1 - 4 * dat$X2) > 0)
model <- randomForest::randomForest(Y ~ X1 + X2, data=dat, ntree=10)
my_tree <- build_model(model, 1)
}
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
+ inv <- NULL
+ set <- function(y) {
+ x <<- y
+ inv <<- NULL
+ }
+ get <- function() x
+
+ setinverse <- function(inverse) inv <<- inverse
+ getinverse <- function() inv
+ list(set = set, get = get, setinverse = setinverse, getinverse = getinverse)
+ }
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
+ inv <- x$getinverse()
+ if(!is.null(inv)) {
+ message("getting cached data")
+ return(inv)
+ }
+ data <- x$get()
+ inv <- solve(data, ...)
+ x$setinverse(inv)
+ inv
}
}
|
/cachematrix.R
|
no_license
|
Makako78/ProgrammingAssignment2
|
R
| false
| false
| 874
|
r
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
+ inv <- NULL
+ set <- function(y) {
+ x <<- y
+ inv <<- NULL
+ }
+ get <- function() x
+
+ setinverse <- function(inverse) inv <<- inverse
+ getinverse <- function() inv
+ list(set = set, get = get, setinverse = setinverse, getinverse = getinverse)
+ }
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
+ inv <- x$getinverse()
+ if(!is.null(inv)) {
+ message("getting cached data")
+ return(inv)
+ }
+ data <- x$get()
+ inv <- solve(data, ...)
+ x$setinverse(inv)
+ inv
}
}
|
library(testthat)
library(data.table)
test_package("data.table")
|
/tests/test-all.R
|
no_license
|
dselivanov/data.table
|
R
| false
| false
| 66
|
r
|
library(testthat)
library(data.table)
test_package("data.table")
|
library(Seurat)
### Name: AddSamples
### Title: Add samples into existing Seurat object.
### Aliases: AddSamples
### ** Examples
pbmc1 <- SubsetData(object = pbmc_small, cells.use = pbmc_small@cell.names[1:40])
pbmc1
pbmc2 <- SubsetData(object = pbmc_small, cells.use = pbmc_small@cell.names[41:80])
pbmc2_data <- pbmc2@data
dim(pbmc2_data)
pbmc_added <- AddSamples(object = pbmc1, new.data = pbmc2_data)
pbmc_added
|
/data/genthat_extracted_code/Seurat/examples/AddSamples.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 424
|
r
|
library(Seurat)
### Name: AddSamples
### Title: Add samples into existing Seurat object.
### Aliases: AddSamples
### ** Examples
pbmc1 <- SubsetData(object = pbmc_small, cells.use = pbmc_small@cell.names[1:40])
pbmc1
pbmc2 <- SubsetData(object = pbmc_small, cells.use = pbmc_small@cell.names[41:80])
pbmc2_data <- pbmc2@data
dim(pbmc2_data)
pbmc_added <- AddSamples(object = pbmc1, new.data = pbmc2_data)
pbmc_added
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fslsmooth.help.R
\name{fslsmooth.help}
\alias{fslsmooth.help}
\title{fslsmooth Help}
\usage{
fslsmooth.help(...)
}
\arguments{
\item{...}{passed to \code{\link{fslmaths.help}}}
}
\value{
Prints help output and returns output as character vector
}
\description{
This function calls \code{fslmaths}'s help, as
\code{fslsmooth} is a wrapper for \code{fslmaths}
}
\examples{
if (have.fsl()){
fslsmooth.help()
}
}
|
/man/fslsmooth.help.Rd
|
no_license
|
kuonanhong/fslr
|
R
| false
| true
| 491
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fslsmooth.help.R
\name{fslsmooth.help}
\alias{fslsmooth.help}
\title{fslsmooth Help}
\usage{
fslsmooth.help(...)
}
\arguments{
\item{...}{passed to \code{\link{fslmaths.help}}}
}
\value{
Prints help output and returns output as character vector
}
\description{
This function calls \code{fslmaths}'s help, as
\code{fslsmooth} is a wrapper for \code{fslmaths}
}
\examples{
if (have.fsl()){
fslsmooth.help()
}
}
|
library(CryptRndTest)
### Name: random.walk.tests
### Title: Random Walk Tests
### Aliases: random.walk.tests
### Keywords: Anderson-Darling Kolmogorov-Smirnov Chi-Square nonparametric
### goodness-of-fit test randomness test
### ** Examples
RNGkind(kind = "Super-Duper")
B=64 # Bit length is 64.
k=500 # Generate 500 integers.
dat=round(runif(k,0,(2^B-1)))
x=sfsmisc::digitsBase(dat, base= 2, B) #Convert to base 2
alpha = 0.05
test=random.walk.tests(x, B, Excursion = TRUE, Expansion = TRUE, Height = TRUE, alpha)
print(test)
|
/data/genthat_extracted_code/CryptRndTest/examples/random.walk.tests.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 584
|
r
|
library(CryptRndTest)
### Name: random.walk.tests
### Title: Random Walk Tests
### Aliases: random.walk.tests
### Keywords: Anderson-Darling Kolmogorov-Smirnov Chi-Square nonparametric
### goodness-of-fit test randomness test
### ** Examples
RNGkind(kind = "Super-Duper")
B=64 # Bit length is 64.
k=500 # Generate 500 integers.
dat=round(runif(k,0,(2^B-1)))
x=sfsmisc::digitsBase(dat, base= 2, B) #Convert to base 2
alpha = 0.05
test=random.walk.tests(x, B, Excursion = TRUE, Expansion = TRUE, Height = TRUE, alpha)
print(test)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getNodeCDF.R
\name{getNodeCDF}
\alias{getNodeCDF}
\title{Get CDf for Node}
\usage{
getNodeCDF(varName, datasources)
}
\arguments{
\item{varName}{a character, the name of study variable.}
\item{datasources}{a list of parameters to access files sytems or databases.}
}
\description{
Computes the x value from given percentile.
}
\details{
Read each data file (node) and compute the local cumDistFunc for each data node.
}
\author{
Rui Camacho, Paula Raissa
}
|
/man/getNodeCDF.Rd
|
no_license
|
paularaissa/distStatsServer
|
R
| false
| true
| 536
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getNodeCDF.R
\name{getNodeCDF}
\alias{getNodeCDF}
\title{Get CDf for Node}
\usage{
getNodeCDF(varName, datasources)
}
\arguments{
\item{varName}{a character, the name of study variable.}
\item{datasources}{a list of parameters to access files sytems or databases.}
}
\description{
Computes the x value from given percentile.
}
\details{
Read each data file (node) and compute the local cumDistFunc for each data node.
}
\author{
Rui Camacho, Paula Raissa
}
|
require(vioplot)
dev.new(width=850, height=650)
t1 <- scan("~/SDN/renaissance/Evaluation/ivanandantonPlots/stabTimeMultiC/t1.txt")
t2 <- scan("~/SDN/renaissance/Evaluation/ivanandantonPlots/stabTimeMultiC/t2.txt")
t3 <- scan("~/SDN/renaissance/Evaluation/ivanandantonPlots/stabTimeMultiC/t3.txt")
t4 <- scan("~/SDN/renaissance/Evaluation/ivanandantonPlots/stabTimeMultiC/t4.txt")
t5 <- scan("~/SDN/renaissance/Evaluation/ivanandantonPlots/stabTimeMultiC/t5.txt")
t6 <- scan("~/SDN/renaissance/Evaluation/ivanandantonPlots/stabTimeMultiC/t6.txt")
t7 <- scan("~/SDN/renaissance/Evaluation/ivanandantonPlots/stabTimeMultiC/t7.txt")
a1 <- scan("~/SDN/renaissance/Evaluation/ivanandantonPlots/stabTimeMultiC/a1.txt")
a2 <- scan("~/SDN/renaissance/Evaluation/ivanandantonPlots/stabTimeMultiC/a2.txt")
a3 <- scan("~/SDN/renaissance/Evaluation/ivanandantonPlots/stabTimeMultiC/a3.txt")
a4 <- scan("~/SDN/renaissance/Evaluation/ivanandantonPlots/stabTimeMultiC/a4.txt")
a5 <- scan("~/SDN/renaissance/Evaluation/ivanandantonPlots/stabTimeMultiC/a5.txt")
a6 <- scan("~/SDN/renaissance/Evaluation/ivanandantonPlots/stabTimeMultiC/a6.txt")
a7 <- scan("~/SDN/renaissance/Evaluation/ivanandantonPlots/stabTimeMultiC/a7.txt")
e1 <- scan("~/SDN/renaissance/Evaluation/ivanandantonPlots/stabTimeMultiC/e1.txt")
e2 <- scan("~/SDN/renaissance/Evaluation/ivanandantonPlots/stabTimeMultiC/e2.txt")
e3 <- scan("~/SDN/renaissance/Evaluation/ivanandantonPlots/stabTimeMultiC/e3.txt")
e4 <- scan("~/SDN/renaissance/Evaluation/ivanandantonPlots/stabTimeMultiC/e4.txt")
e5 <- scan("~/SDN/renaissance/Evaluation/ivanandantonPlots/stabTimeMultiC/e5.txt")
e6 <- scan("~/SDN/renaissance/Evaluation/ivanandantonPlots/stabTimeMultiC/e6.txt")
e7 <- scan("~/SDN/renaissance/Evaluation/ivanandantonPlots/stabTimeMultiC/e7.txt")
x1 <- c(t1)
x2 <- c(t2)
x3 <- c(t3)
x4 <- c(t4)
x5 <- c(t5)
x6 <- c(t6)
x7 <- c(t7)
x8 <- c(a1)
x9 <- c(a2)
x10 <- c(a3)
x11 <- c(a4)
x12 <- c(a5)
x13 <- c(a6)
x14 <- c(a7)
x15 <- c(e1)
x16 <- c(e2)
x17 <- c(e3)
x18 <- c(e4)
x19 <- c(e5)
x20 <- c(e6)
x21 <- c(e7)
op <- par(mar = c(6,6,4,2) + 0.2)
plot(0:1,0:1,type="n",xlim=c(0.5,21.5),ylim=c(9,130),axes=FALSE,ann=FALSE)
vioplot(x1,x2,x3,x4,x5,x6,x7,x8,x9,x10,x11,x12,x13,x14,x15,x16,x17,x18,x19,x20,x21, col="gray", add=TRUE)
title(font.lab = 1, cex.lab=2, ylab="Time (seconds)", xlab="Network (Controllers)")
axis(side=1,at=1:21,labels=c("T1","T2","T3","T4","T5","T6","T7","A1","A2","A3","A4","A5","A6","A7","E1","E2","E3","E4","E5","E6","E7"), cex.axis=1.5, font = 1)
axis(2, at = seq(10,130, by = 5), las=2, cex.axis=1.5)
par(op)
|
/Evaluation/rPlots/stabTimeMultiC/stab_time_multiple_controllers.R
|
no_license
|
rubiruchi/Renaissance-SDN
|
R
| false
| false
| 2,577
|
r
|
require(vioplot)
dev.new(width=850, height=650)
t1 <- scan("~/SDN/renaissance/Evaluation/ivanandantonPlots/stabTimeMultiC/t1.txt")
t2 <- scan("~/SDN/renaissance/Evaluation/ivanandantonPlots/stabTimeMultiC/t2.txt")
t3 <- scan("~/SDN/renaissance/Evaluation/ivanandantonPlots/stabTimeMultiC/t3.txt")
t4 <- scan("~/SDN/renaissance/Evaluation/ivanandantonPlots/stabTimeMultiC/t4.txt")
t5 <- scan("~/SDN/renaissance/Evaluation/ivanandantonPlots/stabTimeMultiC/t5.txt")
t6 <- scan("~/SDN/renaissance/Evaluation/ivanandantonPlots/stabTimeMultiC/t6.txt")
t7 <- scan("~/SDN/renaissance/Evaluation/ivanandantonPlots/stabTimeMultiC/t7.txt")
a1 <- scan("~/SDN/renaissance/Evaluation/ivanandantonPlots/stabTimeMultiC/a1.txt")
a2 <- scan("~/SDN/renaissance/Evaluation/ivanandantonPlots/stabTimeMultiC/a2.txt")
a3 <- scan("~/SDN/renaissance/Evaluation/ivanandantonPlots/stabTimeMultiC/a3.txt")
a4 <- scan("~/SDN/renaissance/Evaluation/ivanandantonPlots/stabTimeMultiC/a4.txt")
a5 <- scan("~/SDN/renaissance/Evaluation/ivanandantonPlots/stabTimeMultiC/a5.txt")
a6 <- scan("~/SDN/renaissance/Evaluation/ivanandantonPlots/stabTimeMultiC/a6.txt")
a7 <- scan("~/SDN/renaissance/Evaluation/ivanandantonPlots/stabTimeMultiC/a7.txt")
e1 <- scan("~/SDN/renaissance/Evaluation/ivanandantonPlots/stabTimeMultiC/e1.txt")
e2 <- scan("~/SDN/renaissance/Evaluation/ivanandantonPlots/stabTimeMultiC/e2.txt")
e3 <- scan("~/SDN/renaissance/Evaluation/ivanandantonPlots/stabTimeMultiC/e3.txt")
e4 <- scan("~/SDN/renaissance/Evaluation/ivanandantonPlots/stabTimeMultiC/e4.txt")
e5 <- scan("~/SDN/renaissance/Evaluation/ivanandantonPlots/stabTimeMultiC/e5.txt")
e6 <- scan("~/SDN/renaissance/Evaluation/ivanandantonPlots/stabTimeMultiC/e6.txt")
e7 <- scan("~/SDN/renaissance/Evaluation/ivanandantonPlots/stabTimeMultiC/e7.txt")
x1 <- c(t1)
x2 <- c(t2)
x3 <- c(t3)
x4 <- c(t4)
x5 <- c(t5)
x6 <- c(t6)
x7 <- c(t7)
x8 <- c(a1)
x9 <- c(a2)
x10 <- c(a3)
x11 <- c(a4)
x12 <- c(a5)
x13 <- c(a6)
x14 <- c(a7)
x15 <- c(e1)
x16 <- c(e2)
x17 <- c(e3)
x18 <- c(e4)
x19 <- c(e5)
x20 <- c(e6)
x21 <- c(e7)
op <- par(mar = c(6,6,4,2) + 0.2)
plot(0:1,0:1,type="n",xlim=c(0.5,21.5),ylim=c(9,130),axes=FALSE,ann=FALSE)
vioplot(x1,x2,x3,x4,x5,x6,x7,x8,x9,x10,x11,x12,x13,x14,x15,x16,x17,x18,x19,x20,x21, col="gray", add=TRUE)
title(font.lab = 1, cex.lab=2, ylab="Time (seconds)", xlab="Network (Controllers)")
axis(side=1,at=1:21,labels=c("T1","T2","T3","T4","T5","T6","T7","A1","A2","A3","A4","A5","A6","A7","E1","E2","E3","E4","E5","E6","E7"), cex.axis=1.5, font = 1)
axis(2, at = seq(10,130, by = 5), las=2, cex.axis=1.5)
par(op)
|
### Copyright (c) 2011, Yahoo! Inc. All rights reserved.
### Copyrights licensed under the New BSD License. See the accompanying LICENSE file for terms.
###
### Author: Liang Zhang
# compute probability given eta and alpha. eta here could be a vector of values
get.splinep <- function(knotval,eta){
neg <- eta < 0
A <- knotval
ans <- rep(NA,length(eta))
ans[neg] <- 2*A/(1+exp(-2.0*(1-A)*eta[neg]))
ans[!neg] <- 2*A - 1 + 2*(1-A)/(1+exp(-2*A*eta[!neg]))
ans
}
splinefn <- function(knotval,etapos,etaneg){
-(sum(log(get.splinep(knotval,etapos))) + sum(log(1-get.splinep(knotval,etaneg))))
}
estalpha <- function(Y,mu,o){
pos <- Y>0
etapos <- o[pos]+mu; etaneg <- o[!pos]+mu
optimize(f=splinefn,lower=.001,upper=.8,etapos=etapos,etaneg=etaneg)[[1]]
}
predict.y.from.factors <- function(user, item, x, alpha, beta, u, v, b, use.C=FALSE){
if(use.C) return(x %*% b + alpha[user] + beta[item] + sum_margin(u[user,,drop=FALSE] * v[item,,drop=FALSE], 1))
else return(x %*% b + alpha[user] + beta[item] + apply(u[user,,drop=FALSE] * v[item,,drop=FALSE], 1, sum));
}
predict.from.factors <- function(user, item, x, alpha, beta, u, v, b, is.logistic, use.C=FALSE){
pred.y = predict.y.from.factors(user, item, x, alpha, beta, u, v, b, use.C);
if(is.logistic){
pred.y = 1/(1+exp(-pred.y));
}
return(pred.y);
}
check.input.logistic <- function(
user, item, y, x, w, z,
alpha, beta, u, v,
b, g0, G, d0, D,
var_alpha, var_beta, var_u, var_v=1,
version=1,
check.NA=FALSE
){
if(!is.vector(b)) stop("b should be a vector");
if(!is.vector(g0) && !is.matrix(g0)) stop("g0 should be a vector");
if(!is.vector(d0) && !is.matrix(d0)) stop("d0 should be a vector");
if(!is.matrix(G)) stop("G should be a matrix");
if(!is.matrix(D)) stop("D should be a matrix");
if(!is.vector(y)) stop("y should be a vector");
if(!is.vector(user)) stop("user should be a vector");
if(!is.vector(item)) stop("item should be a vector");
nObs = length(y);
nUsers = length(alpha);
nItems = length(beta);
nJointFeatures = length(b);
nUserFeatures = length(g0);
nItemFeatures = length(d0);
nFactors = ncol(G);
check.individual("feature$x_obs", x, c("double", "dgCMatrix"), c(nObs, nJointFeatures), isNullOK=FALSE, stopIfAnyNull=list("param$b"=b), check.NA=check.NA);
check.individual("feature$x_src", w, c("double", "dgCMatrix"), c(nUsers, nUserFeatures), isNullOK=FALSE, stopIfAnyNull=list("param$g0"=g0), check.NA=check.NA);
check.individual("feature$x_dst", z, c("double", "dgCMatrix"), c(nItems, nItemFeatures), isNullOK=FALSE, stopIfAnyNull=list("param$d0"=d0), check.NA=check.NA);
check.individual(
"factor$alpha", alpha, "double", list(c(nUsers, 1), nUsers), isNullOK=FALSE,
stopIfAnyNull=list("obs$y"=y,"obs$src.id"=user,"feature$x_src"=w,"param$g0"=g0,"param$var_alpha"=var_alpha),
check.NA=check.NA
);
check.individual(
"factor$beta", beta, "double", list(c(nItems, 1), nItems), isNullOK=FALSE,
stopIfAnyNull=list("obs$y"=y,"obs$dst.id"=item,"feature$x_dst"=z,"param$d0"=d0,"param$var_beta"=var_beta),
check.NA=check.NA
);
check.individual(
"factor$u", u, "double", c(nUsers, nFactors), isNullOK=(out$nFactors==0),
stopIfAnyNull=list("obs$y"=y,"obs$src.id"=user,"feature$x_src"=w,"param$G"=G,"param$var_u"=var_u),
check.NA=check.NA
);
check.individual(
"factor$v", v, "double", c(nItems, nFactors), isNullOK=(out$nFactors==0),
stopIfAnyNull=list("obs$y"=y,"obs$dst.id"=item,"feature$x_dst"=z,"param$D"=D,"param$var_v"=var_v),
check.NA=check.NA
);
if(version == 1){
if(!length(var_alpha) == 1) stop("var_alpha should have length 1");
if(!length(var_beta) == 1) stop("var_beta should have length 1");
if(!length(var_u) == 1 && !length(var_u)==nFactors) stop("var_u should have length 1 or nFactors");
if(!length(var_v) == 1 && !length(var_v)==nFactors) stop("var_v should have length 1 or nFactors");
if(var_alpha < 0) stop("var_alpha < 0");
if(var_beta < 0) stop("var_beta < 0");
if(any(var_u < 0)) stop("var_u < 0");
if(any(var_v < 0)) stop("var_v < 0");
}else if(version == 2){
if(!(length(var_alpha) == 1 || length(var_alpha) == length(alpha))) stop("var_alpha should have length 1 or length(alpha)");
if(!(length(var_beta) == 1 || length(var_beta) == length(beta))) stop("var_beta should have length 1 or length(beta)");
if(!(length(var_u) == 1 || all(dim(var_u) == c(nUsers, nFactors, nFactors)))) stop("var_u should have length 1 or nUsers x nFactors x nFactors");
if(!(length(var_v) == 1 || all(dim(var_v) == c(nItems, nFactors, nFactors)))) stop("var_v should have length 1 or nItems x nFactors x nFactors");
if(any(var_alpha < 0)) stop("var_alpha < 0");
if(any(var_beta < 0)) stop("var_beta < 0");
if(length(var_u) == 1){
if(var_u < 0) stop("var_u < 0");
}else{
for(f in 1:nFactors){ if(any(var_u[,f,f] < 0)) stop("var_u < 0");}
}
if(length(var_v) == 1){
if(var_v < 0) stop("var_v < 0");
}else{
for(f in 1:nFactors){ if(any(var_v[,f,f] < 0)) stop("var_v < 0");}
}
}else stop("Unkown version number: version = ",version);
if(ncol(D) != nFactors) stop("ncol(D) != nFactors");
if(nrow(G) != nUserFeatures) stop("nrow(G) != nUserFeatures");
if(nrow(D) != nItemFeatures) stop("nrow(D) != nItemFeatures");
if(nObs < nUsers || nObs < nItems) stop("nObs < nUsers || nObs < nItems");
if(length(user) != nObs) stop("length(user) != nObs");
if(length(item) != nObs) stop("length(item) != nObs");
}
logLikelihood.logistic.old <- function(
user, item, y, x, w, z,
alpha, beta, u, v,
b, g0, G, d0, D,
var_alpha, var_beta, var_u, var_v=1, debug=0, use.C=FALSE
){
if(debug >= 1) check.input.logistic(user, item, y, x, w, z, alpha, beta, u, v, b, g0, G, d0, D, var_alpha, var_beta, var_u, var_v);
nObs = length(y);
nUsers = length(alpha);
nItems = length(beta);
nFactors = ncol(u);
ans = 0;
o = predict.y.from.factors(user, item, x, alpha, beta, u, v, b, use.C);
ans = ans + sum(y*o) - sum(log(1+exp(o)));
if (length(var_u)==1) {
err = u - w %*% G;
ans = ans - (sum(err^2) / var_u + nUsers * nFactors * log(var_u))/2;
err = v - z %*% D;
ans = ans - (sum(err^2) / var_v + nItems * nFactors * log(var_v))/2;
} else {
err = u - w %*% G;
for (k in 1:nFactors) {
ans = ans - (sum(err[,k]^2)/var_u[k] + nUsers * log(var_u[k]))/2;
}
err = v - z %*% D;
for (k in 1:nFactors) {
ans = ans - (sum(err[,k]^2)/var_v[k] + nItems * log(var_v[k]))/2;
}
}
err = alpha - w %*% g0;
ans = ans - (sum(err^2) / var_alpha + nUsers * log(var_alpha))/2;
err = beta - z %*% d0;
ans = ans - (sum(err^2) / var_beta + nItems * log(var_beta))/2;
return(ans);
}
logLikelihood.logistic <- function(
user, item, y, x, w, z,
alpha, beta, u, v,
b, g0, G, d0, D,
var_alpha, var_beta, var_u, var_v=1, ars_alpha=0.5,
beta.int = F, debug=0, use.C=FALSE
){
if(debug >= 1) check.input.logistic(user, item, y, x, w, z, alpha, beta, u, v, b, g0, G, d0, D, var_alpha, var_beta, var_u, var_v);
nObs = length(y);
nUsers = length(alpha);
nItems = length(beta);
nFactors = ncol(u);
ans = 0;
o = predict.y.from.factors(user, item, x, alpha, beta, u, v, b, use.C);
p = 2*ars_alpha/(1+exp(-2*(1-ars_alpha)*o));
p[o>=0] = 2*ars_alpha - 1 + 2*(1-ars_alpha)/(1+exp(-2*ars_alpha*o[o>=0]));
ans = ans + sum(y*log(p)+(1-y)*log(1-p));
#ans = ans + sum(y*o) - sum(log(1+exp(o)));
if (length(var_u)==1) {
err = u - w %*% G;
ans = ans - (sum(err^2) / var_u + nUsers * nFactors * log(var_u))/2;
err = v - z %*% D;
ans = ans - (sum(err^2) / var_v + nItems * nFactors * log(var_v))/2;
} else {
err = u - w %*% G;
for (k in 1:nFactors) {
ans = ans - (sum(err[,k]^2)/var_u[k] + nUsers * log(var_u[k]))/2;
}
err = v - z %*% D;
for (k in 1:nFactors) {
ans = ans - (sum(err[,k]^2)/var_v[k] + nItems * log(var_v[k]))/2;
}
}
err = alpha - w %*% g0;
ans = ans - (sum(err^2) / var_alpha + nUsers * log(var_alpha))/2;
if (beta.int==F) err = beta - z %*% d0 else err = beta - cbind(1,z) %*% d0;
ans = ans - (sum(err^2) / var_beta + nItems * log(var_beta))/2;
return(ans);
}
|
/src/RLFM-ars-logistic/R/util.R
|
permissive
|
clumbus963/Latent-Factor-Models
|
R
| false
| false
| 8,766
|
r
|
### Copyright (c) 2011, Yahoo! Inc. All rights reserved.
### Copyrights licensed under the New BSD License. See the accompanying LICENSE file for terms.
###
### Author: Liang Zhang
# compute probability given eta and alpha. eta here could be a vector of values
get.splinep <- function(knotval,eta){
neg <- eta < 0
A <- knotval
ans <- rep(NA,length(eta))
ans[neg] <- 2*A/(1+exp(-2.0*(1-A)*eta[neg]))
ans[!neg] <- 2*A - 1 + 2*(1-A)/(1+exp(-2*A*eta[!neg]))
ans
}
splinefn <- function(knotval,etapos,etaneg){
-(sum(log(get.splinep(knotval,etapos))) + sum(log(1-get.splinep(knotval,etaneg))))
}
estalpha <- function(Y,mu,o){
pos <- Y>0
etapos <- o[pos]+mu; etaneg <- o[!pos]+mu
optimize(f=splinefn,lower=.001,upper=.8,etapos=etapos,etaneg=etaneg)[[1]]
}
predict.y.from.factors <- function(user, item, x, alpha, beta, u, v, b, use.C=FALSE){
if(use.C) return(x %*% b + alpha[user] + beta[item] + sum_margin(u[user,,drop=FALSE] * v[item,,drop=FALSE], 1))
else return(x %*% b + alpha[user] + beta[item] + apply(u[user,,drop=FALSE] * v[item,,drop=FALSE], 1, sum));
}
predict.from.factors <- function(user, item, x, alpha, beta, u, v, b, is.logistic, use.C=FALSE){
pred.y = predict.y.from.factors(user, item, x, alpha, beta, u, v, b, use.C);
if(is.logistic){
pred.y = 1/(1+exp(-pred.y));
}
return(pred.y);
}
check.input.logistic <- function(
user, item, y, x, w, z,
alpha, beta, u, v,
b, g0, G, d0, D,
var_alpha, var_beta, var_u, var_v=1,
version=1,
check.NA=FALSE
){
if(!is.vector(b)) stop("b should be a vector");
if(!is.vector(g0) && !is.matrix(g0)) stop("g0 should be a vector");
if(!is.vector(d0) && !is.matrix(d0)) stop("d0 should be a vector");
if(!is.matrix(G)) stop("G should be a matrix");
if(!is.matrix(D)) stop("D should be a matrix");
if(!is.vector(y)) stop("y should be a vector");
if(!is.vector(user)) stop("user should be a vector");
if(!is.vector(item)) stop("item should be a vector");
nObs = length(y);
nUsers = length(alpha);
nItems = length(beta);
nJointFeatures = length(b);
nUserFeatures = length(g0);
nItemFeatures = length(d0);
nFactors = ncol(G);
check.individual("feature$x_obs", x, c("double", "dgCMatrix"), c(nObs, nJointFeatures), isNullOK=FALSE, stopIfAnyNull=list("param$b"=b), check.NA=check.NA);
check.individual("feature$x_src", w, c("double", "dgCMatrix"), c(nUsers, nUserFeatures), isNullOK=FALSE, stopIfAnyNull=list("param$g0"=g0), check.NA=check.NA);
check.individual("feature$x_dst", z, c("double", "dgCMatrix"), c(nItems, nItemFeatures), isNullOK=FALSE, stopIfAnyNull=list("param$d0"=d0), check.NA=check.NA);
check.individual(
"factor$alpha", alpha, "double", list(c(nUsers, 1), nUsers), isNullOK=FALSE,
stopIfAnyNull=list("obs$y"=y,"obs$src.id"=user,"feature$x_src"=w,"param$g0"=g0,"param$var_alpha"=var_alpha),
check.NA=check.NA
);
check.individual(
"factor$beta", beta, "double", list(c(nItems, 1), nItems), isNullOK=FALSE,
stopIfAnyNull=list("obs$y"=y,"obs$dst.id"=item,"feature$x_dst"=z,"param$d0"=d0,"param$var_beta"=var_beta),
check.NA=check.NA
);
check.individual(
"factor$u", u, "double", c(nUsers, nFactors), isNullOK=(out$nFactors==0),
stopIfAnyNull=list("obs$y"=y,"obs$src.id"=user,"feature$x_src"=w,"param$G"=G,"param$var_u"=var_u),
check.NA=check.NA
);
check.individual(
"factor$v", v, "double", c(nItems, nFactors), isNullOK=(out$nFactors==0),
stopIfAnyNull=list("obs$y"=y,"obs$dst.id"=item,"feature$x_dst"=z,"param$D"=D,"param$var_v"=var_v),
check.NA=check.NA
);
if(version == 1){
if(!length(var_alpha) == 1) stop("var_alpha should have length 1");
if(!length(var_beta) == 1) stop("var_beta should have length 1");
if(!length(var_u) == 1 && !length(var_u)==nFactors) stop("var_u should have length 1 or nFactors");
if(!length(var_v) == 1 && !length(var_v)==nFactors) stop("var_v should have length 1 or nFactors");
if(var_alpha < 0) stop("var_alpha < 0");
if(var_beta < 0) stop("var_beta < 0");
if(any(var_u < 0)) stop("var_u < 0");
if(any(var_v < 0)) stop("var_v < 0");
}else if(version == 2){
if(!(length(var_alpha) == 1 || length(var_alpha) == length(alpha))) stop("var_alpha should have length 1 or length(alpha)");
if(!(length(var_beta) == 1 || length(var_beta) == length(beta))) stop("var_beta should have length 1 or length(beta)");
if(!(length(var_u) == 1 || all(dim(var_u) == c(nUsers, nFactors, nFactors)))) stop("var_u should have length 1 or nUsers x nFactors x nFactors");
if(!(length(var_v) == 1 || all(dim(var_v) == c(nItems, nFactors, nFactors)))) stop("var_v should have length 1 or nItems x nFactors x nFactors");
if(any(var_alpha < 0)) stop("var_alpha < 0");
if(any(var_beta < 0)) stop("var_beta < 0");
if(length(var_u) == 1){
if(var_u < 0) stop("var_u < 0");
}else{
for(f in 1:nFactors){ if(any(var_u[,f,f] < 0)) stop("var_u < 0");}
}
if(length(var_v) == 1){
if(var_v < 0) stop("var_v < 0");
}else{
for(f in 1:nFactors){ if(any(var_v[,f,f] < 0)) stop("var_v < 0");}
}
}else stop("Unkown version number: version = ",version);
if(ncol(D) != nFactors) stop("ncol(D) != nFactors");
if(nrow(G) != nUserFeatures) stop("nrow(G) != nUserFeatures");
if(nrow(D) != nItemFeatures) stop("nrow(D) != nItemFeatures");
if(nObs < nUsers || nObs < nItems) stop("nObs < nUsers || nObs < nItems");
if(length(user) != nObs) stop("length(user) != nObs");
if(length(item) != nObs) stop("length(item) != nObs");
}
logLikelihood.logistic.old <- function(
user, item, y, x, w, z,
alpha, beta, u, v,
b, g0, G, d0, D,
var_alpha, var_beta, var_u, var_v=1, debug=0, use.C=FALSE
){
if(debug >= 1) check.input.logistic(user, item, y, x, w, z, alpha, beta, u, v, b, g0, G, d0, D, var_alpha, var_beta, var_u, var_v);
nObs = length(y);
nUsers = length(alpha);
nItems = length(beta);
nFactors = ncol(u);
ans = 0;
o = predict.y.from.factors(user, item, x, alpha, beta, u, v, b, use.C);
ans = ans + sum(y*o) - sum(log(1+exp(o)));
if (length(var_u)==1) {
err = u - w %*% G;
ans = ans - (sum(err^2) / var_u + nUsers * nFactors * log(var_u))/2;
err = v - z %*% D;
ans = ans - (sum(err^2) / var_v + nItems * nFactors * log(var_v))/2;
} else {
err = u - w %*% G;
for (k in 1:nFactors) {
ans = ans - (sum(err[,k]^2)/var_u[k] + nUsers * log(var_u[k]))/2;
}
err = v - z %*% D;
for (k in 1:nFactors) {
ans = ans - (sum(err[,k]^2)/var_v[k] + nItems * log(var_v[k]))/2;
}
}
err = alpha - w %*% g0;
ans = ans - (sum(err^2) / var_alpha + nUsers * log(var_alpha))/2;
err = beta - z %*% d0;
ans = ans - (sum(err^2) / var_beta + nItems * log(var_beta))/2;
return(ans);
}
logLikelihood.logistic <- function(
user, item, y, x, w, z,
alpha, beta, u, v,
b, g0, G, d0, D,
var_alpha, var_beta, var_u, var_v=1, ars_alpha=0.5,
beta.int = F, debug=0, use.C=FALSE
){
if(debug >= 1) check.input.logistic(user, item, y, x, w, z, alpha, beta, u, v, b, g0, G, d0, D, var_alpha, var_beta, var_u, var_v);
nObs = length(y);
nUsers = length(alpha);
nItems = length(beta);
nFactors = ncol(u);
ans = 0;
o = predict.y.from.factors(user, item, x, alpha, beta, u, v, b, use.C);
p = 2*ars_alpha/(1+exp(-2*(1-ars_alpha)*o));
p[o>=0] = 2*ars_alpha - 1 + 2*(1-ars_alpha)/(1+exp(-2*ars_alpha*o[o>=0]));
ans = ans + sum(y*log(p)+(1-y)*log(1-p));
#ans = ans + sum(y*o) - sum(log(1+exp(o)));
if (length(var_u)==1) {
err = u - w %*% G;
ans = ans - (sum(err^2) / var_u + nUsers * nFactors * log(var_u))/2;
err = v - z %*% D;
ans = ans - (sum(err^2) / var_v + nItems * nFactors * log(var_v))/2;
} else {
err = u - w %*% G;
for (k in 1:nFactors) {
ans = ans - (sum(err[,k]^2)/var_u[k] + nUsers * log(var_u[k]))/2;
}
err = v - z %*% D;
for (k in 1:nFactors) {
ans = ans - (sum(err[,k]^2)/var_v[k] + nItems * log(var_v[k]))/2;
}
}
err = alpha - w %*% g0;
ans = ans - (sum(err^2) / var_alpha + nUsers * log(var_alpha))/2;
if (beta.int==F) err = beta - z %*% d0 else err = beta - cbind(1,z) %*% d0;
ans = ans - (sum(err^2) / var_beta + nItems * log(var_beta))/2;
return(ans);
}
|
# Binding libraries
require("shiny")
require("RPostgreSQL")
require("sqldf")
require("shinyjs")
require("DT")
# Initializing PostgreSQL database
initializeDatabase <- function() {
sqldf(paste(
readLines("./sql_scripts/initialize_create_tables.sql"),
collapse = "\n"
))
sqldf(paste(
readLines("./sql_scripts/initialize_insert_data.sql"),
collapse = "\n"
))
}
# Loading data of the main results table from database
loadMainResultTable <- function() {
data <-
sqldf(paste(readLines("./sql_scripts/query_results.sql"),
collapse = "\n"))
dt <- datatable(
data.frame(data),
selection = "single",
options = list(scrollY = "200", pageLength = 10),
colnames = c(
"Planning period ID",
"Act. vol. of Slot Car X1",
"Act. vol. of Slot Car Z2",
"Mat. exp. of Slot Car X1",
"Mat. exp. of Slot Car Z2",
"Overhead exp. char. to Slot Car X1",
"Overhead exp. char. to Slot Car Z2",
"Overhead expense charged to products",
"Committed expense",
"Flexible expense",
"Budgeted unused capacity",
"Capacity utilization variance",
"Flexible budget",
"Spending variance"
)
) %>%
formatRound(columns = "volumeproduct1", digits = 2) %>%
formatRound(columns = "volumeproduct2", digits = 2) %>%
formatCurrency(columns = "materialexpenseproduct1") %>%
formatCurrency(columns = "materialexpenseproduct2") %>%
formatCurrency(columns = "expensechargedtoproduct1") %>%
formatCurrency(columns = "expensechargedtoproduct2") %>%
formatCurrency(columns = "expensechargedtoproducts") %>%
formatCurrency(columns = "committedexpense") %>%
formatCurrency(columns = "flexibleexpense") %>%
formatCurrency(columns = "unusedcapacity") %>%
formatCurrency(columns = "capacityutilizationvariance") %>%
formatCurrency(columns = "flexiblebudget") %>%
formatCurrency(columns = "spendingvariance")
tabl <- DT::renderDT(dt)
return(tabl)
}
# Get the data of selected row of the main results table
getDataOfSelectedRow <- function(selectedRow) {
data <-
sqldf(paste(readLines("./sql_scripts/query_results.sql"),
collapse = "\n"))
return(data[selectedRow, ])
}
# Loading data of the cost pool table of selected period from database
loadCostPoolTable <- function(periodId) {
data <- sqldf(
sprintf(
"
SELECT
ActivityID,
ResourceType,
Variator,
BudgetedCostPoolExpense,
ActualCostPoolExpense
FROM
TB_Cost_Pool
WHERE PeriodID = %s
ORDER BY ActivityID;",
periodId
)
)
dt <- datatable(
data.frame(data),
selection = "single",
options = list(scrollY = "200", pageLength = 10),
colnames = c(
"Activity ID",
"Resource type",
"Variator",
"Bud. cost pool expense",
"Act. cost pool expense"
)
) %>%
formatRound(columns = "variator", digits = 2) %>%
formatCurrency(columns = "budgetedcostpoolexpense") %>%
formatCurrency(columns = "actualcostpoolexpense")
tabl <- DT::renderDT(dt)
return(tabl)
}
# Loading data of the activity pool table of selected period from database
loadActivityPoolTable <- function(periodId) {
data <- sqldf(
sprintf(
"
SELECT
ActivityID,
CommittedExpense,
FlexibleExpense,
CapacityDriverRate,
BudgetedDriverRate,
UnusedCapacity,
CapacityUtilizationVariance,
ExpenseChargedToProducts,
SpendingVariance,
FlexibleBudget
FROM
TB_Activity_Pool
WHERE PeriodID = %s
ORDER BY ActivityID;
",
periodId
)
)
dt <- datatable(
data.frame(data),
selection = "single",
options = list(scrollY = "200", pageLength = 10),
colnames = c(
"Activity ID",
"Committed expense",
"Flexible expense",
"Cap. driver rate",
"Bud. driver rate",
"Bud. unused capacity",
"Capacity utilization variance",
"Expense charged to products",
"Spending variance",
"Flexible budget"
)
) %>%
formatCurrency(columns = "committedexpense") %>%
formatCurrency(columns = "flexibleexpense") %>%
formatCurrency(columns = "capacitydriverrate") %>%
formatCurrency(columns = "budgeteddriverrate") %>%
formatCurrency(columns = "expensechargedtoproducts") %>%
formatCurrency(columns = "unusedcapacity") %>%
formatCurrency(columns = "capacityutilizationvariance") %>%
formatCurrency(columns = "spendingvariance") %>%
formatCurrency(columns = "flexiblebudget")
tabl <- DT::renderDT(dt)
return(tabl)
}
# Loading data of chart of accounts table from database
loadChartOfAccountsTable <- function() {
data <- sqldf(
"
SELECT
AccountID,
AccountType,
BookingMatrixNumber,
AccountName,
ResourceType,
CASE
WHEN CostType = TRUE THEN 'Overhead'
WHEN CostType = FALSE THEN 'Direct'
ELSE NULL END
FROM
TB_General_Ledger_Account
ORDER BY
AccountID
ASC;
"
)
dt <- datatable(
data.frame(data),
selection = "single",
options = list(scrollY = "600",
pageLength = 25),
colnames = c(
"G/L account ID",
"Account type",
"Booking matrix number",
"Account name",
"Resource type",
"Cost type"
)
)
tabl <- DT::renderDT(dt)
return(tabl)
}
# Loading data of bill of materials table from database
loadBillOfMaterialTable <- function() {
data <-
sqldf(paste(
readLines("./sql_scripts/query_bill_of_materials.sql"),
collapse = "\n"
))
dt <- datatable(
data.frame(data),
selection = "single",
options = list(scrollY = "600", pageLength = 25),
colnames = c(
"Finished good ID",
"Item level",
"Material ID",
"Material name",
"Material type",
"Quantity",
"Unit",
"Unit cost"
)
) %>% formatRound(columns = "quantity", digits = 3) %>% formatCurrency(columns = "unitcost")
tabl <- DT::renderDT(dt)
return(tabl)
}
# Load data of routing table from database
loadRouting <- function() {
data <- sqldf(
"
SELECT
FinishedGoodID,
TB_Activity.ActivityID,
ActivityName,
Description,
ActivityCostDriver,
ActivityCostDriverQuantity,
StdProdCoefPers,
StdProdCoefEquip
FROM
TB_Routing
LEFT JOIN
TB_Activity
ON
TB_Routing.ActivityID = TB_Activity.ActivityID;
"
)
dt <- datatable(
data.frame(data),
selection = "single",
options = list(scrollY = "600", pageLength = 25),
colnames = c(
"Finished good ID",
"Activity ID",
"Activity name",
"Description",
"Activity cost driver",
"Quantity",
"Std. prod. coef. personnel",
"Std. prod. coef. equipment"
)
) %>%
formatRound(columns = "activitycostdriverquantity", digits = 2) %>%
formatRound(columns = "stdprodcoefpers", digits = 3) %>%
formatRound(columns = "stdprodcoefequip", digits = 3)
tabl <- DT::renderDT(dt)
return(tabl)
}
# Loading data of an arbitrary table from database
loadDatabaseTable <- function(tableName) {
data <- sqldf(sprintf("SELECT * FROM %s;",
tableName))
dt <- datatable(
data.frame(data),
selection = "single",
options = list(scrollY = "600", pageLength = 25)
)
tabl <- DT::renderDT(dt)
return(tabl)
}
# Get selected column of a quantity structure from particular planning period and finished good
getColumnOfQuantityStructure <-
function(column, periodId, finishedGoodId) {
value <- sqldf(
sprintf(
"
SELECT %s
FROM TB_Production_Volume
WHERE PeriodID = %s AND FinishedGoodID = %s;
",
column,
periodId,
finishedGoodId
)
)
return(value[, 1])
}
# Get selected column of a expense structure from particular planning period and account
getColumnOfExpenseStructure <-
function(column, periodId, accountId) {
value <- sqldf(
sprintf(
"
SELECT %s
FROM TB_Operating_Expense
WHERE PeriodID = %s AND AccountID = %s;
",
column,
periodId,
accountId
)
)
return(value[, 1])
}
# Insert a new quantity structure to a planning period
insertQuantityStructure <-
function(periodId,
finishedGoodId,
capacityVolume,
budgetedVolume) {
sqldf(
sprintf(
"
INSERT INTO TB_Production_Volume(PeriodID, FinishedGoodID, CapacityVolume, BudgetedVolume)
VALUES (%s, %s, %s, %s)
ON CONFLICT (PeriodID, FinishedGoodID) DO NOTHING;
",
periodId,
finishedGoodId,
capacityVolume,
budgetedVolume
)
)
}
# Update a quantity structure of a particular planning period
updateQuantityStructure <-
function(periodId, finishedGoodId, actualVolume) {
sqldf(
sprintf(
"
UPDATE TB_Production_Volume
SET ActualVolume = %s
WHERE PeriodID = %s AND FinishedGoodID = %s;
",
actualVolume,
periodId,
finishedGoodId
)
)
}
# Insert a new expense structure to a planning period
insertExpenseStructure <-
function(periodId,
accountId,
budgetedExpense,
variator) {
sqldf(
sprintf(
"
INSERT INTO TB_Operating_Expense(PeriodID, AccountID, BudgetedExpense, Variator)
VALUES (%s, %s, %s, %s)
ON CONFLICT (PeriodID, AccountID) DO NOTHING;
",
periodId,
accountId,
budgetedExpense,
variator
)
)
}
# Update an expense structure of a particular planning period
updateExpenseStructure <-
function(periodId, accountId, actualExpense) {
sqldf(
sprintf(
"
UPDATE TB_Operating_Expense
SET ActualExpense = %s
WHERE PeriodID = %s AND AccountID = %s;
",
actualExpense,
periodId,
accountId
)
)
}
# Load server application
server <- function(input, output, session) {
# Establish connection to PoststgreSQL using RPostgreSQL
username <- "postgres"
password <- ""
ipaddress <- "localhost"
portnumber <- 5432
databasename <- "postgres"
drv <- dbDriver("PostgreSQL")
con <- dbConnect(
drv,
user = username,
password = password,
host = ipaddress,
port = portnumber,
dbname = databasename
)
options(
sqldf.RPostgreSQL.user = username,
sqldf.RPostgreSQL.password = password,
sqldf.RPostgreSQL.dbname = databasename,
sqldf.RPostgreSQL.host = ipaddress,
sqldf.RPostgreSQL.port = portnumber
)
# Initializing the database (only required for the first run, because of the enumerations)
initializeDatabase()
# Loading content of main table
output$table_main_result <- loadMainResultTable()
# Initialize an empty table into cost pool table
output$table_cost_pool <- DT::renderDT(datatable(NULL))
# Initialize an empty table into activity pool table
output$table_activity_pool <- DT::renderDT(datatable(NULL))
# Loading content of chart of accounts
output$table_chart_of_accounts <- loadChartOfAccountsTable()
# Loading content of bill of materials
output$table_bill_of_materials <- loadBillOfMaterialTable()
# Loading content of routing
output$table_rounting <- loadRouting()
# Displaying the TU Wien logo
output$img_tuwien_logo <- renderUI({
tags$img(src = "https://upload.wikimedia.org/wikipedia/commons/thumb/a/a1/TU_Wien-Logo.svg/200px-TU_Wien-Logo.svg.png")
})
# Displaying the "txt_about" - statement
output$txt_about <- renderText({
readLines(
textConnection(
"This R Shiny application, concerning flexible budgeting, is part of the prototypical implementation of a master thesis conducted at the Vienna University of Technology.
The underlying concepts rest on the capacity-based ABC approach with committed and flexible resources introduced by Kaplan (1994). \u00A9 Christoph Fraller, 01425649",
encoding = "UTF-8"
),
encoding = "UTF-8"
)
})
periods <- sqldf("SELECT PeriodID FROM TB_Planning_Period;")
if (nrow(periods) <= 1) {
periods <- 0
}
updateSelectInput(session,
"select_period",
choices = periods,
selected = 0)
volumesInputFields <- data.frame(
FinishedGood = c(120, 140),
CapVol = c("cap_vol_input_x1", "cap_vol_input_z2"),
BudVol = c("bud_vol_input_x1", "bud_vol_input_z2"),
ActVol = c("act_vol_input_x1", "act_vol_input_z2")
)
expensesInputFields <- data.frame(
Account = c(699, 700, 709, 720, 798),
BudExp = c(
"699_bud_input",
"700_bud_input",
"709_bud_input",
"720_bud_input",
"798_bud_input"
),
Var = c(
"699_var_input",
"700_var_input",
"709_var_input",
"720_var_input",
"798_var_input"
),
ActExp = c(
"699_act_input",
"700_act_input",
"709_act_input",
"720_act_input",
"798_act_input"
)
)
# Selecting planning period event
observeEvent(input$select_period, {
periodId <- input$select_period
budgetedParConf <-
sqldf(
sprintf(
"SELECT BudgetedParametersConfirmed FROM TB_Planning_Period WHERE PeriodID = %s;",
periodId
)
)
actualParConf <-
sqldf(
sprintf(
"SELECT ActualParametersConfirmed FROM TB_Planning_Period WHERE PeriodID = %s;",
periodId
)
)
if (budgetedParConf[, 1]) {
sapply(volumesInputFields$CapVol, disable)
sapply(volumesInputFields$BudVol, disable)
for (i in 1:nrow(volumesInputFields)) {
updateTextInput(
session,
volumesInputFields$CapVol[i],
value = getColumnOfQuantityStructure(
"CapacityVolume",
periodId,
volumesInputFields$FinishedGood[i]
)
)
updateTextInput(
session,
volumesInputFields$BudVol[i],
value = getColumnOfQuantityStructure(
"BudgetedVolume",
periodId,
volumesInputFields$FinishedGood[i]
)
)
}
sapply(expensesInputFields$BudExp, disable)
sapply(expensesInputFields$Var, disable)
for (i in 1:nrow(expensesInputFields)) {
updateTextInput(
session,
expensesInputFields$BudExp[i],
value = getColumnOfExpenseStructure(
"BudgetedExpense",
periodId,
expensesInputFields$Account[i]
)
)
updateTextInput(
session,
expensesInputFields$Var[i],
value = getColumnOfExpenseStructure("Variator",
periodId,
expensesInputFields$Account[i])
)
}
disable("reset_bud_par_button")
disable("confirm_bud_par_button")
}
else {
sapply(volumesInputFields$CapVol, enable)
sapply(volumesInputFields$BudVol, enable)
for (i in 1:nrow(volumesInputFields)) {
updateTextInput(session, volumesInputFields$CapVol[i], value = "")
updateTextInput(session, volumesInputFields$BudVol[i], value = "")
}
sapply(expensesInputFields$BudExp, enable)
sapply(expensesInputFields$Var, enable)
for (i in 1:nrow(expensesInputFields)) {
updateTextInput(session, expensesInputFields$BudExp[i], value = "")
updateTextInput(session, expensesInputFields$Var[i], value = "")
}
enable("reset_bud_par_button")
enable("confirm_bud_par_button")
}
if (!actualParConf[, 1] & budgetedParConf[, 1]) {
sapply(volumesInputFields$ActVol, enable)
for (i in 1:nrow(volumesInputFields)) {
updateTextInput(session, volumesInputFields$ActVol[i], value = "")
}
sapply(expensesInputFields$ActExp, enable)
for (i in 1:nrow(expensesInputFields)) {
updateTextInput(session, expensesInputFields$ActExp[i], value = "")
}
enable("reset_act_par_button")
enable("confirm_act_par_button")
}
else {
if (actualParConf[, 1]) {
sapply(volumesInputFields$ActVol, disable)
for (i in 1:nrow(volumesInputFields)) {
updateTextInput(
session,
volumesInputFields$ActVol[i],
value = getColumnOfQuantityStructure(
"ActualVolume",
periodId,
volumesInputFields$FinishedGood[i]
)
)
}
sapply(expensesInputFields$ActExp, disable)
for (i in 1:nrow(expensesInputFields)) {
updateTextInput(
session,
expensesInputFields$ActExp[i],
value = getColumnOfExpenseStructure(
"ActualExpense",
periodId,
expensesInputFields$Account[i]
)
)
}
disable("reset_act_par_button")
disable("confirm_act_par_button")
}
else {
sapply(volumesInputFields$ActVol, disable)
for (i in 1:nrow(volumesInputFields)) {
updateTextInput(session, volumesInputFields$ActVol[i], value = "")
}
sapply(expensesInputFields$ActExp, disable)
for (i in 1:nrow(expensesInputFields)) {
updateTextInput(session, expensesInputFields$ActExp[i], value = "")
}
disable("reset_act_par_button")
disable("confirm_act_par_button")
}
}
})
# Add new period (user input) event
observeEvent(input$new_period_button, {
sqldf(
"
INSERT INTO TB_Planning_Period (PeriodID, BudgetedParametersConfirmed, ActualParametersConfirmed, PreviousPeriodID)
SELECT CASE WHEN MAX(PeriodID) IS NOT NULL THEN MAX(PeriodID)+1 ELSE 0 END, FALSE, FALSE, MAX(PeriodID) FROM TB_Planning_Period
ON CONFLICT (PeriodID) DO NOTHING;
"
)
periodId <-
sqldf("SELECT MAX(PeriodID) FROM TB_Planning_Period;")[1, ]
sqldf(
sprintf(
"
INSERT INTO TB_Cost_Object_Structure(PeriodID, FinishedGoodID)
VALUES (%s, 120)
ON CONFLICT (PeriodID, FinishedGoodID) DO NOTHING;
INSERT INTO TB_Cost_Object_Structure(PeriodID, FinishedGoodID)
VALUES (%s, 140)
ON CONFLICT (PeriodID, FinishedGoodID) DO NOTHING;
",
periodId,
periodId
)
)
periods <- sqldf("
SELECT PeriodID FROM TB_Planning_Period;
")
updateSelectInput(
session,
"select_period",
"Select planning period",
choices = periods,
selected = ifelse(nrow(periods) > 0, periods[nrow(periods), ], 0)
)
})
# Observe input fields of volume and expense for naive callibration
observe({
toggleState(
"new_naiveperiod_button",
(input$naiv_vol_input != "" |
is.null(input$naiv_vol_input)) &
(input$naiv_exp_input != "" | is.null(input$naiv_exp_input))
)
})
# Add new period (naive callibration) event
observeEvent(input$new_naiveperiod_button, {
sqldf(
"
INSERT INTO TB_Planning_Period (PeriodID, BudgetedParametersConfirmed, ActualParametersConfirmed, PreviousPeriodID)
SELECT CASE WHEN MAX(PeriodID) IS NOT NULL THEN MAX(PeriodID)+1 ELSE 0 END, TRUE, TRUE, MAX(PeriodID) FROM TB_Planning_Period
ON CONFLICT (PeriodID) DO NOTHING;
"
)
periodId <-
sqldf("SELECT MAX(PeriodID) FROM TB_Planning_Period;")[1, ]
sqldf(
sprintf(
"
INSERT INTO TB_Cost_Object_Structure(PeriodID, FinishedGoodID)
VALUES (%s, 120)
ON CONFLICT (PeriodID, FinishedGoodID) DO NOTHING;
INSERT INTO TB_Cost_Object_Structure(PeriodID, FinishedGoodID)
VALUES (%s, 140)
ON CONFLICT (PeriodID, FinishedGoodID) DO NOTHING;
",
periodId,
periodId
)
)
sqldf(paste(
readLines("./sql_scripts/insert_naive_callibration_ex_ante.sql"),
collapse = "\n"
))
sqldf(
sprintf(
"
UPDATE TB_Production_Volume
SET ActualVolume = ROUND(BudgetedVolume * %s)
WHERE PeriodID = %s;
UPDATE TB_Operating_Expense
SET ActualExpense = BudgetedExpense * %s
WHERE PeriodID = %s;
",
1 - (as.numeric(input$naiv_vol_input) / 100),
periodId,
1 - (as.numeric(input$naiv_exp_input) / 100),
periodId
)
)
sqldf(paste(
readLines("./sql_scripts/insert_naive_callibration_ex_post.sql"),
collapse = "\n"
))
periods <- sqldf("
SELECT PeriodID FROM TB_Planning_Period;
")
updateSelectInput(
session,
"select_period",
"Select planning period",
choices = periods,
selected = ifelse(nrow(periods) > 0, periods[nrow(periods), ], 0)
)
output$table_main_result <- loadMainResultTable()
output$table_cost_pool <- DT::renderDT(datatable(NULL))
output$table_activity_pool <- DT::renderDT(datatable(NULL))
})
# Confirm budgeted parameters event
observeEvent(input$confirm_bud_par_button, {
periodId <- input$select_period
for (i in 1:nrow(volumesInputFields)) {
insertQuantityStructure(
periodId,
volumesInputFields$FinishedGood[i],
as.numeric(input[[as.character(volumesInputFields$CapVol[i])]]),
as.numeric(input[[as.character(volumesInputFields$BudVol[i])]])
)
}
for (i in 1:nrow(expensesInputFields)) {
insertExpenseStructure(
periodId,
expensesInputFields$Account[i],
as.numeric(input[[as.character(expensesInputFields$BudExp[i])]]),
as.numeric(input[[as.character(expensesInputFields$Var[i])]])
)
}
sqldf(paste(readLines("./sql_scripts/insert_expert_estimation_ex_ante.sql"),
collapse = "\n"))
sqldf(
sprintf(
"
UPDATE TB_Planning_Period
SET BudgetedParametersConfirmed = TRUE
WHERE PeriodID = %s;
",
periodId
)
)
updateSelectInput(session, "select_period", selected = 0)
updateSelectInput(session, "select_period", selected = periodId)
output$table_main_result <- loadMainResultTable()
output$table_cost_pool <- DT::renderDT(datatable(NULL))
output$table_activity_pool <- DT::renderDT(datatable(NULL))
})
# Confirm actual parameters event
observeEvent(input$confirm_act_par_button, {
periodId <- input$select_period
for (i in 1:nrow(volumesInputFields)) {
updateQuantityStructure(periodId,
volumesInputFields$FinishedGood[i],
as.numeric(input[[as.character(volumesInputFields$ActVol[i])]]))
}
for (i in 1:nrow(expensesInputFields)) {
updateExpenseStructure(periodId,
expensesInputFields$Account[i],
as.numeric(input[[as.character(expensesInputFields$ActExp[i])]]))
}
sqldf(paste(readLines("./sql_scripts/insert_expert_estimation_ex_post.sql"),
collapse = "\n"))
sqldf(
sprintf(
"
UPDATE TB_Planning_Period
SET ActualParametersConfirmed = TRUE
WHERE PeriodID = %s;
",
periodId
)
)
updateSelectInput(session, "select_period", selected = 0)
updateSelectInput(session, "select_period", selected = periodId)
output$table_main_result <- loadMainResultTable()
output$table_cost_pool <- DT::renderDT(datatable(NULL))
output$table_activity_pool <- DT::renderDT(datatable(NULL))
})
# Reset budgeted parameters event
observeEvent(input$reset_bud_par_button, {
for (i in 1:nrow(volumesInputFields)) {
updateTextInput(session, volumesInputFields$CapVol[i], value = "")
updateTextInput(session, volumesInputFields$BudVol[i], value = "")
}
for (i in 1:nrow(expensesInputFields)) {
updateTextInput(session, expensesInputFields$BudExp[i], value = "")
updateTextInput(session, expensesInputFields$Var[i], value = "")
}
})
# Reset actual parameters event
observeEvent(input$reset_act_par_button, {
for (i in 1:nrow(volumesInputFields)) {
updateTextInput(session, volumesInputFields$ActVol[i], value = "")
}
for (i in 1:nrow(expensesInputFields)) {
updateTextInput(session, expensesInputFields$ActExp[i], value = "")
}
})
# Load database table event
observeEvent(input$table_selector, {
output$table_database <- loadDatabaseTable(input$table_selector)
})
# Observe rows select in tab inspection of outcomes event
observeEvent(input$table_main_result_rows_selected, {
data <- getDataOfSelectedRow(input$table_main_result_rows_selected)
periodId <- as.numeric(data[, 1])
output$table_cost_pool <- loadCostPoolTable(periodId)
output$table_activity_pool <- loadActivityPoolTable(periodId)
})
# Reset database event
observeEvent(input$reset_db_button, {
sqldf(paste(
readLines("./sql_scripts/reset_database.sql"),
collapse = "\n"
))
initializeDatabase()
output$table_main_result <- loadMainResultTable()
output$table_cost_pool <- DT::renderDT(datatable(NULL))
output$table_activity_pool <- DT::renderDT(datatable(NULL))
output$table_chart_of_accounts <- loadChartOfAccountsTable()
output$table_bill_of_materials <- loadBillOfMaterialTable()
output$table_rounting <- loadRouting()
output$table_database <- loadDatabaseTable(input$table_selector)
updateSelectInput(session,
"select_period",
selected = 0,
choices = 0)
})
# Close PostgreSQL connection
dbDisconnect(con)
}
|
/rshiny_app/server.R
|
no_license
|
CFraller/mt_prototypical_impl
|
R
| false
| false
| 27,263
|
r
|
# Binding libraries
require("shiny")
require("RPostgreSQL")
require("sqldf")
require("shinyjs")
require("DT")
# Initializing PostgreSQL database
initializeDatabase <- function() {
sqldf(paste(
readLines("./sql_scripts/initialize_create_tables.sql"),
collapse = "\n"
))
sqldf(paste(
readLines("./sql_scripts/initialize_insert_data.sql"),
collapse = "\n"
))
}
# Loading data of the main results table from database
loadMainResultTable <- function() {
data <-
sqldf(paste(readLines("./sql_scripts/query_results.sql"),
collapse = "\n"))
dt <- datatable(
data.frame(data),
selection = "single",
options = list(scrollY = "200", pageLength = 10),
colnames = c(
"Planning period ID",
"Act. vol. of Slot Car X1",
"Act. vol. of Slot Car Z2",
"Mat. exp. of Slot Car X1",
"Mat. exp. of Slot Car Z2",
"Overhead exp. char. to Slot Car X1",
"Overhead exp. char. to Slot Car Z2",
"Overhead expense charged to products",
"Committed expense",
"Flexible expense",
"Budgeted unused capacity",
"Capacity utilization variance",
"Flexible budget",
"Spending variance"
)
) %>%
formatRound(columns = "volumeproduct1", digits = 2) %>%
formatRound(columns = "volumeproduct2", digits = 2) %>%
formatCurrency(columns = "materialexpenseproduct1") %>%
formatCurrency(columns = "materialexpenseproduct2") %>%
formatCurrency(columns = "expensechargedtoproduct1") %>%
formatCurrency(columns = "expensechargedtoproduct2") %>%
formatCurrency(columns = "expensechargedtoproducts") %>%
formatCurrency(columns = "committedexpense") %>%
formatCurrency(columns = "flexibleexpense") %>%
formatCurrency(columns = "unusedcapacity") %>%
formatCurrency(columns = "capacityutilizationvariance") %>%
formatCurrency(columns = "flexiblebudget") %>%
formatCurrency(columns = "spendingvariance")
tabl <- DT::renderDT(dt)
return(tabl)
}
# Get the data of selected row of the main results table
getDataOfSelectedRow <- function(selectedRow) {
data <-
sqldf(paste(readLines("./sql_scripts/query_results.sql"),
collapse = "\n"))
return(data[selectedRow, ])
}
# Loading data of the cost pool table of selected period from database
loadCostPoolTable <- function(periodId) {
data <- sqldf(
sprintf(
"
SELECT
ActivityID,
ResourceType,
Variator,
BudgetedCostPoolExpense,
ActualCostPoolExpense
FROM
TB_Cost_Pool
WHERE PeriodID = %s
ORDER BY ActivityID;",
periodId
)
)
dt <- datatable(
data.frame(data),
selection = "single",
options = list(scrollY = "200", pageLength = 10),
colnames = c(
"Activity ID",
"Resource type",
"Variator",
"Bud. cost pool expense",
"Act. cost pool expense"
)
) %>%
formatRound(columns = "variator", digits = 2) %>%
formatCurrency(columns = "budgetedcostpoolexpense") %>%
formatCurrency(columns = "actualcostpoolexpense")
tabl <- DT::renderDT(dt)
return(tabl)
}
# Loading data of the activity pool table of selected period from database
loadActivityPoolTable <- function(periodId) {
data <- sqldf(
sprintf(
"
SELECT
ActivityID,
CommittedExpense,
FlexibleExpense,
CapacityDriverRate,
BudgetedDriverRate,
UnusedCapacity,
CapacityUtilizationVariance,
ExpenseChargedToProducts,
SpendingVariance,
FlexibleBudget
FROM
TB_Activity_Pool
WHERE PeriodID = %s
ORDER BY ActivityID;
",
periodId
)
)
dt <- datatable(
data.frame(data),
selection = "single",
options = list(scrollY = "200", pageLength = 10),
colnames = c(
"Activity ID",
"Committed expense",
"Flexible expense",
"Cap. driver rate",
"Bud. driver rate",
"Bud. unused capacity",
"Capacity utilization variance",
"Expense charged to products",
"Spending variance",
"Flexible budget"
)
) %>%
formatCurrency(columns = "committedexpense") %>%
formatCurrency(columns = "flexibleexpense") %>%
formatCurrency(columns = "capacitydriverrate") %>%
formatCurrency(columns = "budgeteddriverrate") %>%
formatCurrency(columns = "expensechargedtoproducts") %>%
formatCurrency(columns = "unusedcapacity") %>%
formatCurrency(columns = "capacityutilizationvariance") %>%
formatCurrency(columns = "spendingvariance") %>%
formatCurrency(columns = "flexiblebudget")
tabl <- DT::renderDT(dt)
return(tabl)
}
# Loading data of chart of accounts table from database
loadChartOfAccountsTable <- function() {
data <- sqldf(
"
SELECT
AccountID,
AccountType,
BookingMatrixNumber,
AccountName,
ResourceType,
CASE
WHEN CostType = TRUE THEN 'Overhead'
WHEN CostType = FALSE THEN 'Direct'
ELSE NULL END
FROM
TB_General_Ledger_Account
ORDER BY
AccountID
ASC;
"
)
dt <- datatable(
data.frame(data),
selection = "single",
options = list(scrollY = "600",
pageLength = 25),
colnames = c(
"G/L account ID",
"Account type",
"Booking matrix number",
"Account name",
"Resource type",
"Cost type"
)
)
tabl <- DT::renderDT(dt)
return(tabl)
}
# Loading data of bill of materials table from database
loadBillOfMaterialTable <- function() {
data <-
sqldf(paste(
readLines("./sql_scripts/query_bill_of_materials.sql"),
collapse = "\n"
))
dt <- datatable(
data.frame(data),
selection = "single",
options = list(scrollY = "600", pageLength = 25),
colnames = c(
"Finished good ID",
"Item level",
"Material ID",
"Material name",
"Material type",
"Quantity",
"Unit",
"Unit cost"
)
) %>% formatRound(columns = "quantity", digits = 3) %>% formatCurrency(columns = "unitcost")
tabl <- DT::renderDT(dt)
return(tabl)
}
# Load data of routing table from database
loadRouting <- function() {
data <- sqldf(
"
SELECT
FinishedGoodID,
TB_Activity.ActivityID,
ActivityName,
Description,
ActivityCostDriver,
ActivityCostDriverQuantity,
StdProdCoefPers,
StdProdCoefEquip
FROM
TB_Routing
LEFT JOIN
TB_Activity
ON
TB_Routing.ActivityID = TB_Activity.ActivityID;
"
)
dt <- datatable(
data.frame(data),
selection = "single",
options = list(scrollY = "600", pageLength = 25),
colnames = c(
"Finished good ID",
"Activity ID",
"Activity name",
"Description",
"Activity cost driver",
"Quantity",
"Std. prod. coef. personnel",
"Std. prod. coef. equipment"
)
) %>%
formatRound(columns = "activitycostdriverquantity", digits = 2) %>%
formatRound(columns = "stdprodcoefpers", digits = 3) %>%
formatRound(columns = "stdprodcoefequip", digits = 3)
tabl <- DT::renderDT(dt)
return(tabl)
}
# Loading data of an arbitrary table from database
loadDatabaseTable <- function(tableName) {
data <- sqldf(sprintf("SELECT * FROM %s;",
tableName))
dt <- datatable(
data.frame(data),
selection = "single",
options = list(scrollY = "600", pageLength = 25)
)
tabl <- DT::renderDT(dt)
return(tabl)
}
# Get selected column of a quantity structure from particular planning period and finished good
getColumnOfQuantityStructure <-
function(column, periodId, finishedGoodId) {
value <- sqldf(
sprintf(
"
SELECT %s
FROM TB_Production_Volume
WHERE PeriodID = %s AND FinishedGoodID = %s;
",
column,
periodId,
finishedGoodId
)
)
return(value[, 1])
}
# Get selected column of a expense structure from particular planning period and account
getColumnOfExpenseStructure <-
function(column, periodId, accountId) {
value <- sqldf(
sprintf(
"
SELECT %s
FROM TB_Operating_Expense
WHERE PeriodID = %s AND AccountID = %s;
",
column,
periodId,
accountId
)
)
return(value[, 1])
}
# Insert a new quantity structure to a planning period
insertQuantityStructure <-
function(periodId,
finishedGoodId,
capacityVolume,
budgetedVolume) {
sqldf(
sprintf(
"
INSERT INTO TB_Production_Volume(PeriodID, FinishedGoodID, CapacityVolume, BudgetedVolume)
VALUES (%s, %s, %s, %s)
ON CONFLICT (PeriodID, FinishedGoodID) DO NOTHING;
",
periodId,
finishedGoodId,
capacityVolume,
budgetedVolume
)
)
}
# Update a quantity structure of a particular planning period
updateQuantityStructure <-
function(periodId, finishedGoodId, actualVolume) {
sqldf(
sprintf(
"
UPDATE TB_Production_Volume
SET ActualVolume = %s
WHERE PeriodID = %s AND FinishedGoodID = %s;
",
actualVolume,
periodId,
finishedGoodId
)
)
}
# Insert a new expense structure to a planning period
insertExpenseStructure <-
function(periodId,
accountId,
budgetedExpense,
variator) {
sqldf(
sprintf(
"
INSERT INTO TB_Operating_Expense(PeriodID, AccountID, BudgetedExpense, Variator)
VALUES (%s, %s, %s, %s)
ON CONFLICT (PeriodID, AccountID) DO NOTHING;
",
periodId,
accountId,
budgetedExpense,
variator
)
)
}
# Update an expense structure of a particular planning period
updateExpenseStructure <-
function(periodId, accountId, actualExpense) {
sqldf(
sprintf(
"
UPDATE TB_Operating_Expense
SET ActualExpense = %s
WHERE PeriodID = %s AND AccountID = %s;
",
actualExpense,
periodId,
accountId
)
)
}
# Load server application
server <- function(input, output, session) {
# Establish connection to PoststgreSQL using RPostgreSQL
username <- "postgres"
password <- ""
ipaddress <- "localhost"
portnumber <- 5432
databasename <- "postgres"
drv <- dbDriver("PostgreSQL")
con <- dbConnect(
drv,
user = username,
password = password,
host = ipaddress,
port = portnumber,
dbname = databasename
)
options(
sqldf.RPostgreSQL.user = username,
sqldf.RPostgreSQL.password = password,
sqldf.RPostgreSQL.dbname = databasename,
sqldf.RPostgreSQL.host = ipaddress,
sqldf.RPostgreSQL.port = portnumber
)
# Initializing the database (only required for the first run, because of the enumerations)
initializeDatabase()
# Loading content of main table
output$table_main_result <- loadMainResultTable()
# Initialize an empty table into cost pool table
output$table_cost_pool <- DT::renderDT(datatable(NULL))
# Initialize an empty table into activity pool table
output$table_activity_pool <- DT::renderDT(datatable(NULL))
# Loading content of chart of accounts
output$table_chart_of_accounts <- loadChartOfAccountsTable()
# Loading content of bill of materials
output$table_bill_of_materials <- loadBillOfMaterialTable()
# Loading content of routing
output$table_rounting <- loadRouting()
# Displaying the TU Wien logo
output$img_tuwien_logo <- renderUI({
tags$img(src = "https://upload.wikimedia.org/wikipedia/commons/thumb/a/a1/TU_Wien-Logo.svg/200px-TU_Wien-Logo.svg.png")
})
# Displaying the "txt_about" - statement
output$txt_about <- renderText({
readLines(
textConnection(
"This R Shiny application, concerning flexible budgeting, is part of the prototypical implementation of a master thesis conducted at the Vienna University of Technology.
The underlying concepts rest on the capacity-based ABC approach with committed and flexible resources introduced by Kaplan (1994). \u00A9 Christoph Fraller, 01425649",
encoding = "UTF-8"
),
encoding = "UTF-8"
)
})
periods <- sqldf("SELECT PeriodID FROM TB_Planning_Period;")
if (nrow(periods) <= 1) {
periods <- 0
}
updateSelectInput(session,
"select_period",
choices = periods,
selected = 0)
volumesInputFields <- data.frame(
FinishedGood = c(120, 140),
CapVol = c("cap_vol_input_x1", "cap_vol_input_z2"),
BudVol = c("bud_vol_input_x1", "bud_vol_input_z2"),
ActVol = c("act_vol_input_x1", "act_vol_input_z2")
)
expensesInputFields <- data.frame(
Account = c(699, 700, 709, 720, 798),
BudExp = c(
"699_bud_input",
"700_bud_input",
"709_bud_input",
"720_bud_input",
"798_bud_input"
),
Var = c(
"699_var_input",
"700_var_input",
"709_var_input",
"720_var_input",
"798_var_input"
),
ActExp = c(
"699_act_input",
"700_act_input",
"709_act_input",
"720_act_input",
"798_act_input"
)
)
# Selecting planning period event
observeEvent(input$select_period, {
periodId <- input$select_period
budgetedParConf <-
sqldf(
sprintf(
"SELECT BudgetedParametersConfirmed FROM TB_Planning_Period WHERE PeriodID = %s;",
periodId
)
)
actualParConf <-
sqldf(
sprintf(
"SELECT ActualParametersConfirmed FROM TB_Planning_Period WHERE PeriodID = %s;",
periodId
)
)
if (budgetedParConf[, 1]) {
sapply(volumesInputFields$CapVol, disable)
sapply(volumesInputFields$BudVol, disable)
for (i in 1:nrow(volumesInputFields)) {
updateTextInput(
session,
volumesInputFields$CapVol[i],
value = getColumnOfQuantityStructure(
"CapacityVolume",
periodId,
volumesInputFields$FinishedGood[i]
)
)
updateTextInput(
session,
volumesInputFields$BudVol[i],
value = getColumnOfQuantityStructure(
"BudgetedVolume",
periodId,
volumesInputFields$FinishedGood[i]
)
)
}
sapply(expensesInputFields$BudExp, disable)
sapply(expensesInputFields$Var, disable)
for (i in 1:nrow(expensesInputFields)) {
updateTextInput(
session,
expensesInputFields$BudExp[i],
value = getColumnOfExpenseStructure(
"BudgetedExpense",
periodId,
expensesInputFields$Account[i]
)
)
updateTextInput(
session,
expensesInputFields$Var[i],
value = getColumnOfExpenseStructure("Variator",
periodId,
expensesInputFields$Account[i])
)
}
disable("reset_bud_par_button")
disable("confirm_bud_par_button")
}
else {
sapply(volumesInputFields$CapVol, enable)
sapply(volumesInputFields$BudVol, enable)
for (i in 1:nrow(volumesInputFields)) {
updateTextInput(session, volumesInputFields$CapVol[i], value = "")
updateTextInput(session, volumesInputFields$BudVol[i], value = "")
}
sapply(expensesInputFields$BudExp, enable)
sapply(expensesInputFields$Var, enable)
for (i in 1:nrow(expensesInputFields)) {
updateTextInput(session, expensesInputFields$BudExp[i], value = "")
updateTextInput(session, expensesInputFields$Var[i], value = "")
}
enable("reset_bud_par_button")
enable("confirm_bud_par_button")
}
if (!actualParConf[, 1] & budgetedParConf[, 1]) {
sapply(volumesInputFields$ActVol, enable)
for (i in 1:nrow(volumesInputFields)) {
updateTextInput(session, volumesInputFields$ActVol[i], value = "")
}
sapply(expensesInputFields$ActExp, enable)
for (i in 1:nrow(expensesInputFields)) {
updateTextInput(session, expensesInputFields$ActExp[i], value = "")
}
enable("reset_act_par_button")
enable("confirm_act_par_button")
}
else {
if (actualParConf[, 1]) {
sapply(volumesInputFields$ActVol, disable)
for (i in 1:nrow(volumesInputFields)) {
updateTextInput(
session,
volumesInputFields$ActVol[i],
value = getColumnOfQuantityStructure(
"ActualVolume",
periodId,
volumesInputFields$FinishedGood[i]
)
)
}
sapply(expensesInputFields$ActExp, disable)
for (i in 1:nrow(expensesInputFields)) {
updateTextInput(
session,
expensesInputFields$ActExp[i],
value = getColumnOfExpenseStructure(
"ActualExpense",
periodId,
expensesInputFields$Account[i]
)
)
}
disable("reset_act_par_button")
disable("confirm_act_par_button")
}
else {
sapply(volumesInputFields$ActVol, disable)
for (i in 1:nrow(volumesInputFields)) {
updateTextInput(session, volumesInputFields$ActVol[i], value = "")
}
sapply(expensesInputFields$ActExp, disable)
for (i in 1:nrow(expensesInputFields)) {
updateTextInput(session, expensesInputFields$ActExp[i], value = "")
}
disable("reset_act_par_button")
disable("confirm_act_par_button")
}
}
})
# Add new period (user input) event
observeEvent(input$new_period_button, {
sqldf(
"
INSERT INTO TB_Planning_Period (PeriodID, BudgetedParametersConfirmed, ActualParametersConfirmed, PreviousPeriodID)
SELECT CASE WHEN MAX(PeriodID) IS NOT NULL THEN MAX(PeriodID)+1 ELSE 0 END, FALSE, FALSE, MAX(PeriodID) FROM TB_Planning_Period
ON CONFLICT (PeriodID) DO NOTHING;
"
)
periodId <-
sqldf("SELECT MAX(PeriodID) FROM TB_Planning_Period;")[1, ]
sqldf(
sprintf(
"
INSERT INTO TB_Cost_Object_Structure(PeriodID, FinishedGoodID)
VALUES (%s, 120)
ON CONFLICT (PeriodID, FinishedGoodID) DO NOTHING;
INSERT INTO TB_Cost_Object_Structure(PeriodID, FinishedGoodID)
VALUES (%s, 140)
ON CONFLICT (PeriodID, FinishedGoodID) DO NOTHING;
",
periodId,
periodId
)
)
periods <- sqldf("
SELECT PeriodID FROM TB_Planning_Period;
")
updateSelectInput(
session,
"select_period",
"Select planning period",
choices = periods,
selected = ifelse(nrow(periods) > 0, periods[nrow(periods), ], 0)
)
})
# Observe input fields of volume and expense for naive callibration
observe({
toggleState(
"new_naiveperiod_button",
(input$naiv_vol_input != "" |
is.null(input$naiv_vol_input)) &
(input$naiv_exp_input != "" | is.null(input$naiv_exp_input))
)
})
# Add new period (naive callibration) event
observeEvent(input$new_naiveperiod_button, {
sqldf(
"
INSERT INTO TB_Planning_Period (PeriodID, BudgetedParametersConfirmed, ActualParametersConfirmed, PreviousPeriodID)
SELECT CASE WHEN MAX(PeriodID) IS NOT NULL THEN MAX(PeriodID)+1 ELSE 0 END, TRUE, TRUE, MAX(PeriodID) FROM TB_Planning_Period
ON CONFLICT (PeriodID) DO NOTHING;
"
)
periodId <-
sqldf("SELECT MAX(PeriodID) FROM TB_Planning_Period;")[1, ]
sqldf(
sprintf(
"
INSERT INTO TB_Cost_Object_Structure(PeriodID, FinishedGoodID)
VALUES (%s, 120)
ON CONFLICT (PeriodID, FinishedGoodID) DO NOTHING;
INSERT INTO TB_Cost_Object_Structure(PeriodID, FinishedGoodID)
VALUES (%s, 140)
ON CONFLICT (PeriodID, FinishedGoodID) DO NOTHING;
",
periodId,
periodId
)
)
sqldf(paste(
readLines("./sql_scripts/insert_naive_callibration_ex_ante.sql"),
collapse = "\n"
))
sqldf(
sprintf(
"
UPDATE TB_Production_Volume
SET ActualVolume = ROUND(BudgetedVolume * %s)
WHERE PeriodID = %s;
UPDATE TB_Operating_Expense
SET ActualExpense = BudgetedExpense * %s
WHERE PeriodID = %s;
",
1 - (as.numeric(input$naiv_vol_input) / 100),
periodId,
1 - (as.numeric(input$naiv_exp_input) / 100),
periodId
)
)
sqldf(paste(
readLines("./sql_scripts/insert_naive_callibration_ex_post.sql"),
collapse = "\n"
))
periods <- sqldf("
SELECT PeriodID FROM TB_Planning_Period;
")
updateSelectInput(
session,
"select_period",
"Select planning period",
choices = periods,
selected = ifelse(nrow(periods) > 0, periods[nrow(periods), ], 0)
)
output$table_main_result <- loadMainResultTable()
output$table_cost_pool <- DT::renderDT(datatable(NULL))
output$table_activity_pool <- DT::renderDT(datatable(NULL))
})
# Confirm budgeted parameters event
observeEvent(input$confirm_bud_par_button, {
periodId <- input$select_period
for (i in 1:nrow(volumesInputFields)) {
insertQuantityStructure(
periodId,
volumesInputFields$FinishedGood[i],
as.numeric(input[[as.character(volumesInputFields$CapVol[i])]]),
as.numeric(input[[as.character(volumesInputFields$BudVol[i])]])
)
}
for (i in 1:nrow(expensesInputFields)) {
insertExpenseStructure(
periodId,
expensesInputFields$Account[i],
as.numeric(input[[as.character(expensesInputFields$BudExp[i])]]),
as.numeric(input[[as.character(expensesInputFields$Var[i])]])
)
}
sqldf(paste(readLines("./sql_scripts/insert_expert_estimation_ex_ante.sql"),
collapse = "\n"))
sqldf(
sprintf(
"
UPDATE TB_Planning_Period
SET BudgetedParametersConfirmed = TRUE
WHERE PeriodID = %s;
",
periodId
)
)
updateSelectInput(session, "select_period", selected = 0)
updateSelectInput(session, "select_period", selected = periodId)
output$table_main_result <- loadMainResultTable()
output$table_cost_pool <- DT::renderDT(datatable(NULL))
output$table_activity_pool <- DT::renderDT(datatable(NULL))
})
# Confirm actual parameters event
observeEvent(input$confirm_act_par_button, {
periodId <- input$select_period
for (i in 1:nrow(volumesInputFields)) {
updateQuantityStructure(periodId,
volumesInputFields$FinishedGood[i],
as.numeric(input[[as.character(volumesInputFields$ActVol[i])]]))
}
for (i in 1:nrow(expensesInputFields)) {
updateExpenseStructure(periodId,
expensesInputFields$Account[i],
as.numeric(input[[as.character(expensesInputFields$ActExp[i])]]))
}
sqldf(paste(readLines("./sql_scripts/insert_expert_estimation_ex_post.sql"),
collapse = "\n"))
sqldf(
sprintf(
"
UPDATE TB_Planning_Period
SET ActualParametersConfirmed = TRUE
WHERE PeriodID = %s;
",
periodId
)
)
updateSelectInput(session, "select_period", selected = 0)
updateSelectInput(session, "select_period", selected = periodId)
output$table_main_result <- loadMainResultTable()
output$table_cost_pool <- DT::renderDT(datatable(NULL))
output$table_activity_pool <- DT::renderDT(datatable(NULL))
})
# Reset budgeted parameters event
observeEvent(input$reset_bud_par_button, {
for (i in 1:nrow(volumesInputFields)) {
updateTextInput(session, volumesInputFields$CapVol[i], value = "")
updateTextInput(session, volumesInputFields$BudVol[i], value = "")
}
for (i in 1:nrow(expensesInputFields)) {
updateTextInput(session, expensesInputFields$BudExp[i], value = "")
updateTextInput(session, expensesInputFields$Var[i], value = "")
}
})
# Reset actual parameters event
observeEvent(input$reset_act_par_button, {
for (i in 1:nrow(volumesInputFields)) {
updateTextInput(session, volumesInputFields$ActVol[i], value = "")
}
for (i in 1:nrow(expensesInputFields)) {
updateTextInput(session, expensesInputFields$ActExp[i], value = "")
}
})
# Load database table event
observeEvent(input$table_selector, {
output$table_database <- loadDatabaseTable(input$table_selector)
})
# Observe rows select in tab inspection of outcomes event
observeEvent(input$table_main_result_rows_selected, {
data <- getDataOfSelectedRow(input$table_main_result_rows_selected)
periodId <- as.numeric(data[, 1])
output$table_cost_pool <- loadCostPoolTable(periodId)
output$table_activity_pool <- loadActivityPoolTable(periodId)
})
# Reset database event
observeEvent(input$reset_db_button, {
sqldf(paste(
readLines("./sql_scripts/reset_database.sql"),
collapse = "\n"
))
initializeDatabase()
output$table_main_result <- loadMainResultTable()
output$table_cost_pool <- DT::renderDT(datatable(NULL))
output$table_activity_pool <- DT::renderDT(datatable(NULL))
output$table_chart_of_accounts <- loadChartOfAccountsTable()
output$table_bill_of_materials <- loadBillOfMaterialTable()
output$table_rounting <- loadRouting()
output$table_database <- loadDatabaseTable(input$table_selector)
updateSelectInput(session,
"select_period",
selected = 0,
choices = 0)
})
# Close PostgreSQL connection
dbDisconnect(con)
}
|
#' @name loadWorkbook
#' @title Load an exisiting .xlsx file
#' @author Alexander Walker
#' @param file A path to an existing .xlsx or .xlsm file
#' @param xlsxFile alias for file
#' @description loadWorkbook returns a workbook object conserving styles and
#' formatting of the original .xlsx file.
#' @return Workbook object.
#' @export
#' @seealso \code{\link{removeWorksheet}}
#' @examples
#' ## load existing workbook from package folder
#' wb <- loadWorkbook(file = system.file("loadExample.xlsx", package= "openxlsx"))
#' names(wb) #list worksheets
#' wb ## view object
#' ## Add a worksheet
#' addWorksheet(wb, "A new worksheet")
#'
#' ## Save workbook
#' saveWorkbook(wb, "loadExample.xlsx", overwrite = TRUE)
loadWorkbook <- function(file, xlsxFile = NULL){
if(!is.null(xlsxFile))
file <- xlsxFile
if(!file.exists(file))
stop("File does not exist.")
wb <- createWorkbook()
## create temp dir
xmlDir <- file.path(tempdir(), paste0(tempfile(tmpdir = ""), "_openxlsx_loadWorkbook"))
## Unzip files to temp directory
xmlFiles <- unzip(file, exdir = xmlDir)
.relsXML <- xmlFiles[grepl("_rels/.rels$", xmlFiles, perl = TRUE)]
drawingsXML <- xmlFiles[grepl("drawings/drawing[0-9]+.xml$", xmlFiles, perl = TRUE)]
worksheetsXML <- xmlFiles[grepl("/worksheets/sheet[0-9]", xmlFiles, perl = TRUE)]
appXML <- xmlFiles[grepl("app.xml$", xmlFiles, perl = TRUE)]
coreXML <- xmlFiles[grepl("core.xml$", xmlFiles, perl = TRUE)]
workbookXML <- xmlFiles[grepl("workbook.xml$", xmlFiles, perl = TRUE)]
stylesXML <- xmlFiles[grepl("styles.xml$", xmlFiles, perl = TRUE)]
sharedStringsXML <- xmlFiles[grepl("sharedStrings.xml$", xmlFiles, perl = TRUE)]
themeXML <- xmlFiles[grepl("theme[0-9]+.xml$", xmlFiles, perl = TRUE)]
drawingRelsXML <- xmlFiles[grepl("drawing[0-9]+.xml.rels$", xmlFiles, perl = TRUE)]
sheetRelsXML <- xmlFiles[grepl("sheet[0-9]+.xml.rels$", xmlFiles, perl = TRUE)]
media <- xmlFiles[grepl("image[0-9]+.[a-z]+$", xmlFiles, perl = TRUE)]
charts <- xmlFiles[grepl("chart[0-9]+.[a-z]+$", xmlFiles, perl = TRUE)]
tablesXML <- xmlFiles[grepl("tables/table[0-9]+.xml$", xmlFiles, perl = TRUE)]
tableRelsXML <- xmlFiles[grepl("table[0-9]+.xml.rels$", xmlFiles, perl = TRUE)]
queryTablesXML <- xmlFiles[grepl("queryTable[0-9]+.xml$", xmlFiles, perl = TRUE)]
connectionsXML <- xmlFiles[grepl("connections.xml$", xmlFiles, perl = TRUE)]
extLinksXML <- xmlFiles[grepl("externalLink[0-9]+.xml$", xmlFiles, perl = TRUE)]
extLinksRelsXML <- xmlFiles[grepl("externalLink[0-9]+.xml.rels$", xmlFiles, perl = TRUE)]
# pivot tables
pivotTableXML <- xmlFiles[grepl("pivotTable[0-9]+.xml$", xmlFiles, perl = TRUE)]
pivotTableRelsXML <- xmlFiles[grepl("pivotTable[0-9]+.xml.rels$", xmlFiles, perl = TRUE)]
pivotDefXML <- xmlFiles[grepl("pivotCacheDefinition[0-9]+.xml$", xmlFiles, perl = TRUE)]
pivotDefRelsXML <- xmlFiles[grepl("pivotCacheDefinition[0-9]+.xml.rels$", xmlFiles, perl = TRUE)]
pivotRecordsXML <- xmlFiles[grepl("pivotCacheRecords[0-9]+.xml$", xmlFiles, perl = TRUE)]
## slicers
slicerXML <- xmlFiles[grepl("slicer[0-9]+.xml$", xmlFiles, perl = TRUE)]
slicerCachesXML <- xmlFiles[grepl("slicerCache[0-9]+.xml$", xmlFiles, perl = TRUE)]
## VBA Macro
vbaProject <- xmlFiles[grepl("vbaProject\\.bin$", xmlFiles, perl = TRUE)]
## remove all except media and charts
on.exit(expr = unlink(xmlFiles[!grepl("charts|media", xmlFiles, ignore.case = TRUE)], recursive = TRUE, force = TRUE), add = TRUE)
nSheets <- length(worksheetsXML)
## xl\
## xl\workbook
if(length(workbookXML) > 0){
workbook <- readLines(workbookXML, warn=FALSE, encoding="UTF-8")
workbook <- removeHeadTag(workbook)
sheets <- unlist(regmatches(workbook, gregexpr("<sheet .*/sheets>", workbook, perl = TRUE)))
## sheetId is meaningless
## sheet rId links to the worksheets/sheet(rId).xml file
sheetrId <- as.integer(unlist(regmatches(sheets, gregexpr('(?<=r:id="rId)[0-9]+', sheets, perl = TRUE))))
sheetId <- unlist(regmatches(sheets, gregexpr('(?<=sheetId=")[0-9]+', sheets, perl = TRUE)))
sheetNames <- unlist(regmatches(sheets, gregexpr('(?<=name=")[^"]+', sheets, perl = TRUE)))
sheetNames <- replaceXMLEntities(sheetNames)
## add worksheets to wb
invisible(lapply(sheetNames, function(sheetName) wb$addWorksheet(sheetName)))
## replace sheetId
for(i in 1:nSheets)
wb$workbook$sheets[[i]] <- gsub(sprintf(' sheetId="%s"', i), sprintf(' sheetId="%s"', sheetId[i]), wb$workbook$sheets[[i]])
## additional workbook attributes
calcPr <- .Call("openxlsx_getChildlessNode", workbook, "<calcPr ", PACKAGE = "openxlsx")
if(length(calcPr) > 0)
wb$workbook$calcPr <- calcPr
workbookPr <- .Call("openxlsx_getChildlessNode", workbook, "<workbookPr ", PACKAGE = "openxlsx")
if(length(calcPr) > 0)
wb$workbook$workbookPr <- workbookPr
## defined Names
dNames <- .Call("openxlsx_getNodes", workbook, "<definedNames>", PACKAGE = "openxlsx")
if(length(dNames) > 0){
dNames <- gsub("^<definedNames>|</definedNames>$", "", dNames)
wb$workbook$definedNames <- paste0(.Call("openxlsx_getNodes", dNames, "<definedName", PACKAGE = "openxlsx"), ">")
}
}
## xl\sharedStrings
if(length(sharedStringsXML) > 0){
sharedStrings <- readLines(sharedStringsXML, warn = FALSE, encoding = "UTF-8")
sharedStrings <- paste(sharedStrings, collapse = "\n")
sharedStrings <- removeHeadTag(sharedStrings)
uniqueCount <- as.integer(regmatches(sharedStrings, regexpr('(?<=uniqueCount=")[0-9]+', sharedStrings, perl = TRUE)))
## read in and get <si> nodes
vals <- .Call("openxlsx_getNodes", sharedStrings, "<si>", PACKAGE = "openxlsx")
Encoding(vals) <- "UTF-8"
attr(vals, "uniqueCount") <- uniqueCount
wb$sharedStrings <- vals
}
## xl\pivotTables & xl\pivotCache
if(length(pivotTableXML) > 0){
# pivotTable cacheId links to workbook.xml which links to workbook.xml.rels via rId
# we don't modify the cacheId, only the rId
nPivotTables <- length(pivotTableXML)
rIds <- 20000L + 1:nPivotTables
pivotTableXML <- pivotTableXML[order(nchar(pivotTableXML), pivotTableXML)]
pivotTableRelsXML <- pivotTableRelsXML[order(nchar(pivotTableRelsXML), pivotTableRelsXML)]
pivotDefXML <- pivotDefXML[order(nchar(pivotDefXML), pivotDefXML)]
pivotDefRelsXML <- pivotDefRelsXML[order(nchar(pivotDefRelsXML), pivotDefRelsXML)]
pivotRecordsXML <- pivotRecordsXML[order(nchar(pivotRecordsXML), pivotRecordsXML)]
wb$pivotTables <- character(nPivotTables)
wb$pivotTables.xml.rels <- character(nPivotTables)
wb$pivotDefinitions <- character(nPivotTables)
wb$pivotDefinitionsRels <- character(nPivotTables)
wb$pivotRecords <- character(nPivotTables)
wb$pivotTables[1:length(pivotTableXML)] <-
unlist(lapply(pivotTableXML, function(x) removeHeadTag(.Call("openxlsx_cppReadFile", x, PACKAGE = "openxlsx"))))
wb$pivotTables.xml.rels[1:length(pivotTableRelsXML)] <-
unlist(lapply(pivotTableRelsXML, function(x) removeHeadTag(.Call("openxlsx_cppReadFile", x, PACKAGE = "openxlsx"))))
wb$pivotDefinitions[1:length(pivotDefXML)] <-
unlist(lapply(pivotDefXML, function(x) removeHeadTag(.Call("openxlsx_cppReadFile", x, PACKAGE = "openxlsx"))))
wb$pivotDefinitionsRels[1:length(pivotDefRelsXML)] <-
unlist(lapply(pivotDefRelsXML, function(x) removeHeadTag(.Call("openxlsx_cppReadFile", x, PACKAGE = "openxlsx"))))
wb$pivotRecords[1:length(pivotRecordsXML)] <-
unlist(lapply(pivotRecordsXML, function(x) removeHeadTag(.Call("openxlsx_cppReadFile", x, PACKAGE = "openxlsx"))))
## update content_types
wb$Content_Types <- c(wb$Content_Types, unlist(lapply(1:nPivotTables, contentTypePivotXML)))
## workbook rels
wb$workbook.xml.rels <- c(wb$workbook.xml.rels,
sprintf('<Relationship Id="rId%s" Type="http://schemas.openxmlformats.org/officeDocument/2006/relationships/pivotCacheDefinition" Target="pivotCache/pivotCacheDefinition%s.xml"/>', rIds, 1:nPivotTables)
)
caches <- .Call("openxlsx_getChildlessNode", workbook, "<pivotCache ", PACKAGE = "openxlsx")
for(i in 1:length(caches))
caches[i] <- gsub('"rId[0-9]+"', sprintf('"rId%s"', rIds[i]), caches[i])
wb$workbook$pivotCaches <- paste0('<pivotCaches>', paste(caches, collapse = ""), '</pivotCaches>')
}
## xl\vbaProject
if(length(vbaProject) > 0){
wb$vbaProject <- vbaProject
wb$Content_Types[grepl('<Override PartName="/xl/workbook.xml" ', wb$Content_Types)] <- '<Override PartName="/xl/workbook.xml" ContentType="application/vnd.ms-excel.sheet.macroEnabled.main+xml"/>'
wb$Content_Types <- c(wb$Content_Types, '<Override PartName="/xl/vbaProject.bin" ContentType="application/vnd.ms-office.vbaProject"/>')
}
## xl\styles
if(length(stylesXML) > 0){
## Build style objects from the styles XML
styles <- readLines(stylesXML, warn = FALSE, encoding = "UTF-8")
styles <- removeHeadTag(styles)
## Indexed colours
vals <- .Call("openxlsx_getNodes", styles, "<indexedColors>", PACKAGE = "openxlsx")
if(length(vals) > 0)
wb$styles$indexedColors <- paste0("<colors>", vals, "</colors>")
## dxf (don't need these, I don't think)
dxf <- .Call("openxlsx_getNodes", styles, "<dxfs", PACKAGE = "openxlsx")
if(length(dxf) > 0){
dxf <- .Call("openxlsx_getNodes", dxf[[1]], "<dxf>", PACKAGE = "openxlsx")
if(length(dxf) > 0)
wb$styles$dxfs <- dxf
}
tableStyles <- .Call("openxlsx_getNodes", styles, "<tableStyles", PACKAGE = "openxlsx")
if(length(tableStyles) > 0)
wb$styles$tableStyles <- paste0(tableStyles, ">")
extLst <- .Call("openxlsx_getNodes", styles, "<extLst>", PACKAGE = "openxlsx")
if(length(extLst) > 0)
wb$styles$extLst <- extLst
## Number formats
numFmts <- .Call("openxlsx_getChildlessNode", styles, "<numFmt ", PACKAGE = "openxlsx")
numFmtFlag <- FALSE
if(length(numFmts) > 0){
numFmtsIds <- sapply(numFmts, function(x) .Call("openxlsx_getAttr", x, 'numFmtId="', PACKAGE = "openxlsx"), USE.NAMES = FALSE)
formatCodes <- sapply(numFmts, function(x) .Call("openxlsx_getAttr", x, 'formatCode="', PACKAGE = "openxlsx"), USE.NAMES = FALSE)
numFmts <-lapply(1:length(numFmts), function(i) list("numFmtId"= numFmtsIds[[i]], "formatCode"=formatCodes[[i]]))
numFmtFlag <- TRUE
}
## fonts will maintain, sz, color, name, family scheme
fonts <- .Call("openxlsx_getNodes", styles, "<font>", PACKAGE = "openxlsx")
wb$styles$fonts[[1]] <- fonts[[1]]
fonts <- buildFontList(fonts)
fills <- .Call("openxlsx_getNodes", styles, "<fill>", PACKAGE = "openxlsx")
fills <- buildFillList(fills)
borders <- .Call("openxlsx_getNodes", styles, "<border>", PACKAGE = "openxlsx")
borders <- sapply(borders, buildBorder, USE.NAMES = FALSE)
cellXfs <- .Call("openxlsx_getNodes", styles, "<cellXfs", PACKAGE = "openxlsx")
xf <- .Call("openxlsx_getChildlessNode", cellXfs, "<xf ", PACKAGE = "openxlsx")
xfAttrs <- regmatches(xf, gregexpr('[a-zA-Z]+=".*?"', xf))
xfNames <- lapply(xfAttrs, function(xfAttrs) regmatches(xfAttrs, regexpr('[a-zA-Z]+(?=\\=".*?")', xfAttrs, perl = TRUE)))
xfVals <- lapply(xfAttrs, function(xfAttrs) regmatches(xfAttrs, regexpr('(?<=").*?(?=")', xfAttrs, perl = TRUE)))
for(i in 1:length(xf))
names(xfVals[[i]]) <- xfNames[[i]]
styleObjects <- list()
flag <- FALSE
for(s in xfVals){
style <- createStyle()
if(any(s != "0")){
if(s[["fontId"]] != "0"){
thisFont <- fonts[[(as.integer(s[["fontId"]])+1)]]
if("sz" %in% names(thisFont))
style$fontSize <- thisFont$sz
if("name" %in% names(thisFont))
style$fontName <- thisFont$name
if("family" %in% names(thisFont))
style$fontFamily <- thisFont$family
if("color" %in% names(thisFont))
style$fontColour <- thisFont$color
if("scheme" %in% names(thisFont))
style$fontScheme <- thisFont$scheme
flags <- c("bold", "italic", "underline") %in% names(thisFont)
if(any(flags)){
style$fontDecoration <- NULL
if(flags[[1]])
style$fontDecoration <- append(style$fontDecoration, "BOLD")
if(flags[[2]])
style$fontDecoration <- append(style$fontDecoration, "ITALIC")
if(flags[[3]])
style$fontDecoration <- append(style$fontDecoration, "UNDERLINE")
}
}
if(s[["numFmtId"]] != "0"){
if(as.integer(s[["numFmtId"]]) < 164){
style$numFmt <- list(numFmtId = s[["numFmtId"]])
}else if(numFmtFlag){
style$numFmt <- numFmts[[which(s[["numFmtId"]] == numFmtsIds)[1]]]
}
}
## Border
if(s[["borderId"]] != "0"){# & "applyBorder" %in% names(s)){
thisBorder <- borders[[as.integer(s[["borderId"]]) + 1L]]
if("borderLeft" %in% names(thisBorder)){
style$borderLeft <- thisBorder$borderLeft
style$borderLeftColour <- thisBorder$borderLeftColour
}
if("borderRight" %in% names(thisBorder)){
style$borderRight <- thisBorder$borderRight
style$borderRightColour <- thisBorder$borderRightColour
}
if("borderTop" %in% names(thisBorder)){
style$borderTop <- thisBorder$borderTop
style$borderTopColour <- thisBorder$borderTopColour
}
if("borderBottom" %in% names(thisBorder)){
style$borderBottom <- thisBorder$borderBottom
style$borderBottomColour <- thisBorder$borderBottomColour
}
}
## alignment
applyAlignment <- "applyAlignment" %in% names(s)
if("horizontal" %in% names(s))# & applyAlignment)
style$halign <- s[["horizontal"]]
if("vertical" %in% names(s))
style$valign <- s[["vertical"]]
if("textRotation" %in% names(s))
style$textRotation <- s[["textRotation"]]
## wrap text
if("wrapText" %in% names(s)){
if(s[["wrapText"]] %in% c("1", "true"))
style$wrapText <- TRUE
}
if(s[["fillId"]] != "0"){# && "applyFill" %in% names(s)){
fillId <- as.integer(s[["fillId"]]) + 1L
if("fgColor" %in% names(fills[[fillId]])){
tmpFg <- fills[[fillId]]$fgColor
tmpBg <- fills[[fillId]]$bgColor
if(!is.null(tmpFg))
style$fill$fillFg <- tmpFg
if(!is.null(tmpFg))
style$fill$fillBg <- tmpBg
}else{
style$fill <- fills[[fillId]]
}
}
} ## end if !all(s == "0)
## we need to skip the first one as this is used as the base style
if(flag)
styleObjects <- append(styleObjects , list(style))
flag <- TRUE
} ## end of for loop through styles s in ...
} ## end of length(stylesXML) > 0
## xl\media
if(length(media) > 0){
mediaNames <- regmatches(media, regexpr("image[0-9]\\.[a-z]+$", media))
fileTypes <- unique(gsub("image[0-9]\\.", "", mediaNames))
contentNodes <- sprintf('<Default Extension="%s" ContentType="image/%s"/>', fileTypes, fileTypes)
contentNodes[fileTypes == "emf"] <- '<Default Extension="emf" ContentType="image/x-emf"/>'
wb$Content_Types <- c(contentNodes, wb$Content_Types)
names(media) <- mediaNames
wb$media <- media
}
## xl\chart
if(length(charts) > 0){
chartNames <- regmatches(charts, regexpr("chart[0-9]\\.[a-z]+$", charts))
names(charts) <- chartNames
wb$charts <- charts
wb$Content_Types <- c(wb$Content_Types, sprintf('<Override PartName="/xl/charts/chart%s.xml" ContentType="application/vnd.openxmlformats-officedocument.drawingml.chart+xml"/>', 1:length(charts)))
}
## xl\theme
if(length(themeXML) > 0)
wb$theme <- removeHeadTag(paste(unlist(lapply(sort(themeXML)[[1]], function(x) readLines(x, warn = FALSE, encoding = "UTF-8"))), collapse = ""))
## externalLinks
if(length(extLinksXML) > 0){
wb$externalLinks <- lapply(sort(extLinksXML), function(x) removeHeadTag(.Call("openxlsx_cppReadFile", x, PACKAGE = "openxlsx")))
wb$Content_Types <-c(wb$Content_Types,
sprintf('<Override PartName="/xl/externalLinks/externalLink%s.xml" ContentType="application/vnd.openxmlformats-officedocument.spreadsheetml.externalLink+xml"/>', 1:length(extLinksXML)))
wb$workbook.xml.rels <- c(wb$workbook.xml.rels, sprintf('<Relationship Id="rId4" Type="http://schemas.openxmlformats.org/officeDocument/2006/relationships/externalLink" Target="externalLinks/externalLink1.xml"/>',
1:length(extLinksXML)))
}
## externalLinksRels
if(length(extLinksRelsXML) > 0)
wb$externalLinksRels <- lapply(sort(extLinksRelsXML), function(x) removeHeadTag(.Call("openxlsx_cppReadFile", x, PACKAGE = "openxlsx")))
##*----------------------------------------------------------------------------------------------*##
### BEGIN READING IN WORKSHEET DATA
##*----------------------------------------------------------------------------------------------*##
## xl\worksheets
worksheetsXML <- file.path(dirname(worksheetsXML), sprintf("sheet%s.xml", sheetrId))
wb <- .Call("openxlsx_loadworksheets", wb, styleObjects, worksheetsXML)
## Fix styleobject encoding
if(length(wb$styleObjects) > 0){
style_names <- sapply(wb$styleObjects, "[[", "sheet")
Encoding(style_names) <- "UTF-8"
wb$styleObjects <- lapply(1:length(style_names), function(i) {wb$styleObjects[[i]]$sheet = style_names[[i]]; wb$styleObjects[[i]]})
}
##*----------------------------------------------------------------------------------------------*##
### READING IN WORKSHEET DATA COMPLETE
##*----------------------------------------------------------------------------------------------*##
## Next sheetRels to see which drawings_rels belongs to which sheet
if(length(sheetRelsXML) > 0){
## sheet.xml have been reordered to be in the order of sheetrId
## not every sheet has a worksheet rels
allRels <- file.path(dirname(sheetRelsXML), sprintf("sheet%s.xml.rels", sheetrId))
haveRels <- allRels %in% sheetRelsXML
xml <- lapply(1:length(allRels), function(i) {
if(haveRels[i])
return(readLines(allRels[[i]], warn = FALSE))
return("<Relationship >")
})
xml <- unlist(lapply(xml, removeHeadTag))
xml <- gsub("<Relationships .*?>", "", xml)
xml <- gsub("</Relationships>", "", xml)
xml <- lapply(xml, function(x) .Call("openxlsx_getChildlessNode", x, "<Relationship ", PACKAGE="openxlsx"))
if(length(slicerXML) > 0){
slicerXML <- slicerXML[order(nchar(slicerXML), slicerXML)]
slicersFiles <- lapply(xml, function(x) as.integer(regmatches(x, regexpr("(?<=slicer)[0-9]+(?=\\.xml)", x, perl = TRUE))))
inds <- sapply(slicersFiles, length) > 0
## worksheet_rels Id for slicer will be rId0
k <- 1L
wb$slicers <- rep("", nSheets)
for(i in 1:nSheets){
## read in slicer[j].XML sheets into sheet[i]
if(inds[i]){
wb$slicers[[i]] <- removeHeadTag(.Call("openxlsx_cppReadFile", slicerXML[k], PACKAGE = "openxlsx"))
k <- k + 1L
wb$worksheets_rels[[i]] <- unlist(c(wb$worksheets_rels[[i]],
sprintf('<Relationship Id="rId0" Type="http://schemas.microsoft.com/office/2007/relationships/slicer" Target="../slicers/slicer%s.xml"/>', i)))
wb$Content_Types <- c(wb$Content_Types,
sprintf('<Override PartName="/xl/slicers/slicer%s.xml" ContentType="application/vnd.ms-excel.slicer+xml"/>', i))
## Append slicer to worksheet extLst
wb$worksheets[[i]]$extLst <- c(wb$worksheets[[i]]$extLst, genBaseSlicerXML())
}
}
}
if(length(slicerCachesXML) > 0){
## ---- slicerCaches
inds <- 1:length(slicerCachesXML)
wb$Content_Types <- c(wb$Content_Types, sprintf('<Override PartName="/xl/slicerCaches/slicerCache%s.xml" ContentType="application/vnd.ms-excel.slicerCache+xml"/>', inds))
wb$slicerCaches <- sapply(slicerCachesXML[order(nchar(slicerCachesXML), slicerCachesXML)], function(x) removeHeadTag(.Call("openxlsx_cppReadFile", x, PACKAGE = "openxlsx")))
wb$workbook.xml.rels <- c(wb$workbook.xml.rels, sprintf('<Relationship Id="rId%s" Type="http://schemas.microsoft.com/office/2007/relationships/slicerCache" Target="slicerCaches/slicerCache%s.xml"/>', 1E5 + inds, inds))
wb$workbook$extLst <- c(wb$workbook$extLst, genSlicerCachesExtLst(1E5 + inds))
}
## tables
if(length(tablesXML) > 0){
tables <- lapply(xml, function(x) as.integer(regmatches(x, regexpr("(?<=table)[0-9]+(?=\\.xml)", x, perl = TRUE))))
tableSheets <- unlist(lapply(1:length(sheetrId), function(i) rep(i, length(tables[[i]]))))
if(length(unlist(tables)) > 0){
## get the tables that belong to each worksheet and create a worksheets_rels for each
tCount <- 2L ## table r:Ids start at 3
for(i in 1:length(tables)){
if(length(tables[[i]]) > 0){
k <- 1:length(tables[[i]]) + tCount
wb$worksheets_rels[[i]] <- unlist(c(wb$worksheets_rels[[i]],
sprintf('<Relationship Id="rId%s" Type="http://schemas.openxmlformats.org/officeDocument/2006/relationships/table" Target="../tables/table%s.xml"/>', k, k)))
wb$worksheets[[i]]$tableParts <- sprintf("<tablePart r:id=\"rId%s\"/>", k)
tCount <- tCount + length(k)
}
}
## sort the tables into the order they appear in the xml and tables variables
names(tablesXML) <- basename(tablesXML)
tablesXML <- tablesXML[sprintf("table%s.xml", unlist(tables))]
## tables are now in correct order so we can read them in as they are
wb$tables <- sapply(tablesXML, function(x) removeHeadTag(.Call("openxlsx_cppReadFile", x, PACKAGE = "openxlsx")))
## pull out refs and attach names
refs <- regmatches(wb$tables, regexpr('(?<=ref=")[0-9A-Z:]+', wb$tables, perl = TRUE))
names(wb$tables) <- refs
wb$Content_Types <- c(wb$Content_Types, sprintf('<Override PartName="/xl/tables/table%s.xml" ContentType="application/vnd.openxmlformats-officedocument.spreadsheetml.table+xml"/>', 1:length(wb$tables)+2))
## relabel ids
for(i in 1:length(wb$tables)){
newId <- sprintf(' id="%s" ', i+2)
wb$tables[[i]] <- sub(' id="[0-9]+" ' , newId, wb$tables[[i]])
}
displayNames <- unlist(regmatches(wb$tables, regexpr('(?<=displayName=").*?[^"]+', wb$tables, perl = TRUE)))
if(length(displayNames) != length(tablesXML))
displayNames <- paste0("Table", 1:length(tablesXML))
attr(wb$tables, "sheet") <- tableSheets
attr(wb$tables, "tableName") <- displayNames
}
} ## if(length(tablesXML) > 0)
## hyperlinks
hlinks <- lapply(xml, function(x) x[grepl("hyperlink", x) & grepl("External", x)])
hlinksInds <- which(sapply(hlinks, length) > 0)
if(length(hlinksInds) > 0){
hlinks <- hlinks[hlinksInds]
for(i in 1:length(hlinksInds)){
targets <- unlist(lapply(hlinks[[i]], function(x) regmatches(x, gregexpr('(?<=Target=").*?"', x, perl = TRUE))[[1]]))
targets <- gsub('"$', "", targets)
names(wb$hyperlinks[[hlinksInds[i]]]) <- targets
}
}
## xml is in the order of the sheets, drawIngs is toes to sheet position of hasDrawing
## Not every sheet has a drawing.xml
## drawings
drawXMLrelationship <- lapply(xml, function(x) x[grepl("drawings/drawing", x)])
hasDrawing <- sapply(drawXMLrelationship, length) > 0 ## which sheets have a drawing
if(length(drawingRelsXML) > 0){
drawingRelsXML <- drawingRelsXML
dRels <- lapply(drawingRelsXML, readLines, warn = FALSE)
dRels <- unlist(lapply(dRels, removeHeadTag))
dRels <- gsub("<Relationships .*?>", "", dRels)
dRels <- gsub("</Relationships>", "", dRels)
}
if(length(drawingsXML) > 0){
dXML <- lapply(drawingsXML, readLines, warn = FALSE)
dXML <- unlist(lapply(dXML, removeHeadTag))
dXML <- gsub("<xdr:wsDr .*?>", "", dXML)
dXML <- gsub("</xdr:wsDr>", "", dXML)
## split at one/two cell Anchor
dXML <- regmatches(dXML, gregexpr("<xdr:...CellAnchor.*?</xdr:...CellAnchor>", dXML))
}
## loop over all worksheets and assign drawing to sheet
for(i in 1:length(xml)){
if(hasDrawing[i]){
target <- unlist(lapply(drawXMLrelationship[[i]], function(x) regmatches(x, gregexpr('(?<=Target=").*?"', x, perl = TRUE))[[1]]))
target <- basename(gsub('"$', "", target))
## sheet_i has which(hasDrawing)[[i]]
relsInd <- grepl(target, drawingRelsXML)
if(any(relsInd))
wb$drawings_rels[i] <- dRels[relsInd]
drawingInd <- grepl(target, drawingsXML)
if(any(drawingInd))
wb$drawings[i] <- dXML[drawingInd]
}
}
## pivot tables
if(length(pivotTableXML) > 0){
pivotTableJ <- lapply(xml, function(x) as.integer(regmatches(x, regexpr("(?<=pivotTable)[0-9]+(?=\\.xml)", x, perl = TRUE))))
sheetWithPivot <- which(sapply(pivotTableJ, length) > 0)
pivotRels <- lapply(xml, function(x) {y <- x[grepl("pivotTable", x)]; y[order(nchar(y), y)]})
hasPivot <- sapply(pivotRels, length) > 0
## Modify rIds
for(i in 1:length(pivotRels)){
if(hasPivot[i]){
for(j in 1:length(pivotRels[[i]]))
pivotRels[[i]][j] <- gsub('"rId[0-9]+"', sprintf('"rId%s"', 20000L + j), pivotRels[[i]][j])
wb$worksheets_rels[[i]] <- c(wb$worksheets_rels[[i]] , pivotRels[[i]])
}
}
## remove any workbook_res references to pivot tables that are not being used in worksheet_rels
inds <- 1:length(wb$pivotTables.xml.rels)
fileNo <- as.integer(unlist(regmatches(unlist(wb$worksheets_rels), gregexpr('(?<=pivotTable)[0-9]+(?=\\.xml)', unlist(wb$worksheets_rels), perl = TRUE))))
inds <- inds[!inds %in% fileNo]
if(length(inds) > 0){
toRemove <- paste(sprintf("(pivotCacheDefinition%s\\.xml)", inds), collapse = "|")
fileNo <- which(grepl(toRemove, wb$pivotTables.xml.rels))
toRemove <- paste(sprintf("(pivotCacheDefinition%s\\.xml)", fileNo), collapse = "|")
## remove reference to file from workbook.xml.res
wb$workbook.xml.rels <- wb$workbook.xml.rels[!grepl(toRemove, wb$workbook.xml.rels)]
}
}
} ## end of worksheetRels
## queryTables
if(length(queryTablesXML) > 0){
ids <- as.numeric(regmatches(queryTablesXML, regexpr("[0-9]+(?=\\.xml)", queryTablesXML, perl = TRUE)))
wb$queryTables <- unlist(lapply(queryTablesXML[order(ids)], function(x) removeHeadTag(.Call("openxlsx_cppReadFile", x, PACKAGE = "openxlsx"))))
wb$Content_Types <- c(wb$Content_Types,
sprintf('<Override PartName="/xl/queryTables/queryTable%s.xml" ContentType="application/vnd.openxmlformats-officedocument.spreadsheetml.queryTable+xml"/>', 1:length(queryTablesXML)))
}
## connections
if(length(connectionsXML) > 0){
wb$connections <- removeHeadTag(.Call("openxlsx_cppReadFile", connectionsXML, PACKAGE = "openxlsx"))
wb$workbook.xml.rels <- c(wb$workbook.xml.rels, '<Relationship Id="rId3" Type="http://schemas.openxmlformats.org/officeDocument/2006/relationships/connections" Target="connections.xml"/>')
wb$Content_Types <- c(wb$Content_Types, '<Override PartName="/xl/connections.xml" ContentType="application/vnd.openxmlformats-officedocument.spreadsheetml.connections+xml"/>')
}
## table rels
if(length(tableRelsXML) > 0){
## table_i_might have tableRels_i but I am re-ordering the tables to be in order of worksheets
## I make every table have a table_rels so i need to fill in the gaps if any table_rels are missing
tmp <- paste0(basename(tablesXML), ".rels")
hasRels <- tmp %in% basename(tableRelsXML)
## order tableRelsXML
tableRelsXML <- tableRelsXML[match(tmp[hasRels], basename(tableRelsXML))]
##
wb$tables.xml.rels <- character(length=length(tablesXML))
## which sheet does it belong to
xml <- sapply(tableRelsXML, function(x) .Call("openxlsx_cppReadFile", x, PACKAGE = "openxlsx"), USE.NAMES = FALSE)
xml <- sapply(xml, removeHeadTag, USE.NAMES = FALSE)
wb$tables.xml.rels[hasRels] <- xml
}else if(length(tablesXML) > 0){
wb$tables.xml.rels <- rep("", length(tablesXML))
}
return(wb)
}
|
/R/loadWorkbook.R
|
no_license
|
tgwhite/openxlsx
|
R
| false
| false
| 30,358
|
r
|
#' @name loadWorkbook
#' @title Load an exisiting .xlsx file
#' @author Alexander Walker
#' @param file A path to an existing .xlsx or .xlsm file
#' @param xlsxFile alias for file
#' @description loadWorkbook returns a workbook object conserving styles and
#' formatting of the original .xlsx file.
#' @return Workbook object.
#' @export
#' @seealso \code{\link{removeWorksheet}}
#' @examples
#' ## load existing workbook from package folder
#' wb <- loadWorkbook(file = system.file("loadExample.xlsx", package= "openxlsx"))
#' names(wb) #list worksheets
#' wb ## view object
#' ## Add a worksheet
#' addWorksheet(wb, "A new worksheet")
#'
#' ## Save workbook
#' saveWorkbook(wb, "loadExample.xlsx", overwrite = TRUE)
loadWorkbook <- function(file, xlsxFile = NULL){
if(!is.null(xlsxFile))
file <- xlsxFile
if(!file.exists(file))
stop("File does not exist.")
wb <- createWorkbook()
## create temp dir
xmlDir <- file.path(tempdir(), paste0(tempfile(tmpdir = ""), "_openxlsx_loadWorkbook"))
## Unzip files to temp directory
xmlFiles <- unzip(file, exdir = xmlDir)
.relsXML <- xmlFiles[grepl("_rels/.rels$", xmlFiles, perl = TRUE)]
drawingsXML <- xmlFiles[grepl("drawings/drawing[0-9]+.xml$", xmlFiles, perl = TRUE)]
worksheetsXML <- xmlFiles[grepl("/worksheets/sheet[0-9]", xmlFiles, perl = TRUE)]
appXML <- xmlFiles[grepl("app.xml$", xmlFiles, perl = TRUE)]
coreXML <- xmlFiles[grepl("core.xml$", xmlFiles, perl = TRUE)]
workbookXML <- xmlFiles[grepl("workbook.xml$", xmlFiles, perl = TRUE)]
stylesXML <- xmlFiles[grepl("styles.xml$", xmlFiles, perl = TRUE)]
sharedStringsXML <- xmlFiles[grepl("sharedStrings.xml$", xmlFiles, perl = TRUE)]
themeXML <- xmlFiles[grepl("theme[0-9]+.xml$", xmlFiles, perl = TRUE)]
drawingRelsXML <- xmlFiles[grepl("drawing[0-9]+.xml.rels$", xmlFiles, perl = TRUE)]
sheetRelsXML <- xmlFiles[grepl("sheet[0-9]+.xml.rels$", xmlFiles, perl = TRUE)]
media <- xmlFiles[grepl("image[0-9]+.[a-z]+$", xmlFiles, perl = TRUE)]
charts <- xmlFiles[grepl("chart[0-9]+.[a-z]+$", xmlFiles, perl = TRUE)]
tablesXML <- xmlFiles[grepl("tables/table[0-9]+.xml$", xmlFiles, perl = TRUE)]
tableRelsXML <- xmlFiles[grepl("table[0-9]+.xml.rels$", xmlFiles, perl = TRUE)]
queryTablesXML <- xmlFiles[grepl("queryTable[0-9]+.xml$", xmlFiles, perl = TRUE)]
connectionsXML <- xmlFiles[grepl("connections.xml$", xmlFiles, perl = TRUE)]
extLinksXML <- xmlFiles[grepl("externalLink[0-9]+.xml$", xmlFiles, perl = TRUE)]
extLinksRelsXML <- xmlFiles[grepl("externalLink[0-9]+.xml.rels$", xmlFiles, perl = TRUE)]
# pivot tables
pivotTableXML <- xmlFiles[grepl("pivotTable[0-9]+.xml$", xmlFiles, perl = TRUE)]
pivotTableRelsXML <- xmlFiles[grepl("pivotTable[0-9]+.xml.rels$", xmlFiles, perl = TRUE)]
pivotDefXML <- xmlFiles[grepl("pivotCacheDefinition[0-9]+.xml$", xmlFiles, perl = TRUE)]
pivotDefRelsXML <- xmlFiles[grepl("pivotCacheDefinition[0-9]+.xml.rels$", xmlFiles, perl = TRUE)]
pivotRecordsXML <- xmlFiles[grepl("pivotCacheRecords[0-9]+.xml$", xmlFiles, perl = TRUE)]
## slicers
slicerXML <- xmlFiles[grepl("slicer[0-9]+.xml$", xmlFiles, perl = TRUE)]
slicerCachesXML <- xmlFiles[grepl("slicerCache[0-9]+.xml$", xmlFiles, perl = TRUE)]
## VBA Macro
vbaProject <- xmlFiles[grepl("vbaProject\\.bin$", xmlFiles, perl = TRUE)]
## remove all except media and charts
on.exit(expr = unlink(xmlFiles[!grepl("charts|media", xmlFiles, ignore.case = TRUE)], recursive = TRUE, force = TRUE), add = TRUE)
nSheets <- length(worksheetsXML)
## xl\
## xl\workbook
if(length(workbookXML) > 0){
workbook <- readLines(workbookXML, warn=FALSE, encoding="UTF-8")
workbook <- removeHeadTag(workbook)
sheets <- unlist(regmatches(workbook, gregexpr("<sheet .*/sheets>", workbook, perl = TRUE)))
## sheetId is meaningless
## sheet rId links to the worksheets/sheet(rId).xml file
sheetrId <- as.integer(unlist(regmatches(sheets, gregexpr('(?<=r:id="rId)[0-9]+', sheets, perl = TRUE))))
sheetId <- unlist(regmatches(sheets, gregexpr('(?<=sheetId=")[0-9]+', sheets, perl = TRUE)))
sheetNames <- unlist(regmatches(sheets, gregexpr('(?<=name=")[^"]+', sheets, perl = TRUE)))
sheetNames <- replaceXMLEntities(sheetNames)
## add worksheets to wb
invisible(lapply(sheetNames, function(sheetName) wb$addWorksheet(sheetName)))
## replace sheetId
for(i in 1:nSheets)
wb$workbook$sheets[[i]] <- gsub(sprintf(' sheetId="%s"', i), sprintf(' sheetId="%s"', sheetId[i]), wb$workbook$sheets[[i]])
## additional workbook attributes
calcPr <- .Call("openxlsx_getChildlessNode", workbook, "<calcPr ", PACKAGE = "openxlsx")
if(length(calcPr) > 0)
wb$workbook$calcPr <- calcPr
workbookPr <- .Call("openxlsx_getChildlessNode", workbook, "<workbookPr ", PACKAGE = "openxlsx")
if(length(calcPr) > 0)
wb$workbook$workbookPr <- workbookPr
## defined Names
dNames <- .Call("openxlsx_getNodes", workbook, "<definedNames>", PACKAGE = "openxlsx")
if(length(dNames) > 0){
dNames <- gsub("^<definedNames>|</definedNames>$", "", dNames)
wb$workbook$definedNames <- paste0(.Call("openxlsx_getNodes", dNames, "<definedName", PACKAGE = "openxlsx"), ">")
}
}
## xl\sharedStrings
if(length(sharedStringsXML) > 0){
sharedStrings <- readLines(sharedStringsXML, warn = FALSE, encoding = "UTF-8")
sharedStrings <- paste(sharedStrings, collapse = "\n")
sharedStrings <- removeHeadTag(sharedStrings)
uniqueCount <- as.integer(regmatches(sharedStrings, regexpr('(?<=uniqueCount=")[0-9]+', sharedStrings, perl = TRUE)))
## read in and get <si> nodes
vals <- .Call("openxlsx_getNodes", sharedStrings, "<si>", PACKAGE = "openxlsx")
Encoding(vals) <- "UTF-8"
attr(vals, "uniqueCount") <- uniqueCount
wb$sharedStrings <- vals
}
## xl\pivotTables & xl\pivotCache
if(length(pivotTableXML) > 0){
# pivotTable cacheId links to workbook.xml which links to workbook.xml.rels via rId
# we don't modify the cacheId, only the rId
nPivotTables <- length(pivotTableXML)
rIds <- 20000L + 1:nPivotTables
pivotTableXML <- pivotTableXML[order(nchar(pivotTableXML), pivotTableXML)]
pivotTableRelsXML <- pivotTableRelsXML[order(nchar(pivotTableRelsXML), pivotTableRelsXML)]
pivotDefXML <- pivotDefXML[order(nchar(pivotDefXML), pivotDefXML)]
pivotDefRelsXML <- pivotDefRelsXML[order(nchar(pivotDefRelsXML), pivotDefRelsXML)]
pivotRecordsXML <- pivotRecordsXML[order(nchar(pivotRecordsXML), pivotRecordsXML)]
wb$pivotTables <- character(nPivotTables)
wb$pivotTables.xml.rels <- character(nPivotTables)
wb$pivotDefinitions <- character(nPivotTables)
wb$pivotDefinitionsRels <- character(nPivotTables)
wb$pivotRecords <- character(nPivotTables)
wb$pivotTables[1:length(pivotTableXML)] <-
unlist(lapply(pivotTableXML, function(x) removeHeadTag(.Call("openxlsx_cppReadFile", x, PACKAGE = "openxlsx"))))
wb$pivotTables.xml.rels[1:length(pivotTableRelsXML)] <-
unlist(lapply(pivotTableRelsXML, function(x) removeHeadTag(.Call("openxlsx_cppReadFile", x, PACKAGE = "openxlsx"))))
wb$pivotDefinitions[1:length(pivotDefXML)] <-
unlist(lapply(pivotDefXML, function(x) removeHeadTag(.Call("openxlsx_cppReadFile", x, PACKAGE = "openxlsx"))))
wb$pivotDefinitionsRels[1:length(pivotDefRelsXML)] <-
unlist(lapply(pivotDefRelsXML, function(x) removeHeadTag(.Call("openxlsx_cppReadFile", x, PACKAGE = "openxlsx"))))
wb$pivotRecords[1:length(pivotRecordsXML)] <-
unlist(lapply(pivotRecordsXML, function(x) removeHeadTag(.Call("openxlsx_cppReadFile", x, PACKAGE = "openxlsx"))))
## update content_types
wb$Content_Types <- c(wb$Content_Types, unlist(lapply(1:nPivotTables, contentTypePivotXML)))
## workbook rels
wb$workbook.xml.rels <- c(wb$workbook.xml.rels,
sprintf('<Relationship Id="rId%s" Type="http://schemas.openxmlformats.org/officeDocument/2006/relationships/pivotCacheDefinition" Target="pivotCache/pivotCacheDefinition%s.xml"/>', rIds, 1:nPivotTables)
)
caches <- .Call("openxlsx_getChildlessNode", workbook, "<pivotCache ", PACKAGE = "openxlsx")
for(i in 1:length(caches))
caches[i] <- gsub('"rId[0-9]+"', sprintf('"rId%s"', rIds[i]), caches[i])
wb$workbook$pivotCaches <- paste0('<pivotCaches>', paste(caches, collapse = ""), '</pivotCaches>')
}
## xl\vbaProject
if(length(vbaProject) > 0){
wb$vbaProject <- vbaProject
wb$Content_Types[grepl('<Override PartName="/xl/workbook.xml" ', wb$Content_Types)] <- '<Override PartName="/xl/workbook.xml" ContentType="application/vnd.ms-excel.sheet.macroEnabled.main+xml"/>'
wb$Content_Types <- c(wb$Content_Types, '<Override PartName="/xl/vbaProject.bin" ContentType="application/vnd.ms-office.vbaProject"/>')
}
## xl\styles
if(length(stylesXML) > 0){
## Build style objects from the styles XML
styles <- readLines(stylesXML, warn = FALSE, encoding = "UTF-8")
styles <- removeHeadTag(styles)
## Indexed colours
vals <- .Call("openxlsx_getNodes", styles, "<indexedColors>", PACKAGE = "openxlsx")
if(length(vals) > 0)
wb$styles$indexedColors <- paste0("<colors>", vals, "</colors>")
## dxf (don't need these, I don't think)
dxf <- .Call("openxlsx_getNodes", styles, "<dxfs", PACKAGE = "openxlsx")
if(length(dxf) > 0){
dxf <- .Call("openxlsx_getNodes", dxf[[1]], "<dxf>", PACKAGE = "openxlsx")
if(length(dxf) > 0)
wb$styles$dxfs <- dxf
}
tableStyles <- .Call("openxlsx_getNodes", styles, "<tableStyles", PACKAGE = "openxlsx")
if(length(tableStyles) > 0)
wb$styles$tableStyles <- paste0(tableStyles, ">")
extLst <- .Call("openxlsx_getNodes", styles, "<extLst>", PACKAGE = "openxlsx")
if(length(extLst) > 0)
wb$styles$extLst <- extLst
## Number formats
numFmts <- .Call("openxlsx_getChildlessNode", styles, "<numFmt ", PACKAGE = "openxlsx")
numFmtFlag <- FALSE
if(length(numFmts) > 0){
numFmtsIds <- sapply(numFmts, function(x) .Call("openxlsx_getAttr", x, 'numFmtId="', PACKAGE = "openxlsx"), USE.NAMES = FALSE)
formatCodes <- sapply(numFmts, function(x) .Call("openxlsx_getAttr", x, 'formatCode="', PACKAGE = "openxlsx"), USE.NAMES = FALSE)
numFmts <-lapply(1:length(numFmts), function(i) list("numFmtId"= numFmtsIds[[i]], "formatCode"=formatCodes[[i]]))
numFmtFlag <- TRUE
}
## fonts will maintain, sz, color, name, family scheme
fonts <- .Call("openxlsx_getNodes", styles, "<font>", PACKAGE = "openxlsx")
wb$styles$fonts[[1]] <- fonts[[1]]
fonts <- buildFontList(fonts)
fills <- .Call("openxlsx_getNodes", styles, "<fill>", PACKAGE = "openxlsx")
fills <- buildFillList(fills)
borders <- .Call("openxlsx_getNodes", styles, "<border>", PACKAGE = "openxlsx")
borders <- sapply(borders, buildBorder, USE.NAMES = FALSE)
cellXfs <- .Call("openxlsx_getNodes", styles, "<cellXfs", PACKAGE = "openxlsx")
xf <- .Call("openxlsx_getChildlessNode", cellXfs, "<xf ", PACKAGE = "openxlsx")
xfAttrs <- regmatches(xf, gregexpr('[a-zA-Z]+=".*?"', xf))
xfNames <- lapply(xfAttrs, function(xfAttrs) regmatches(xfAttrs, regexpr('[a-zA-Z]+(?=\\=".*?")', xfAttrs, perl = TRUE)))
xfVals <- lapply(xfAttrs, function(xfAttrs) regmatches(xfAttrs, regexpr('(?<=").*?(?=")', xfAttrs, perl = TRUE)))
for(i in 1:length(xf))
names(xfVals[[i]]) <- xfNames[[i]]
styleObjects <- list()
flag <- FALSE
for(s in xfVals){
style <- createStyle()
if(any(s != "0")){
if(s[["fontId"]] != "0"){
thisFont <- fonts[[(as.integer(s[["fontId"]])+1)]]
if("sz" %in% names(thisFont))
style$fontSize <- thisFont$sz
if("name" %in% names(thisFont))
style$fontName <- thisFont$name
if("family" %in% names(thisFont))
style$fontFamily <- thisFont$family
if("color" %in% names(thisFont))
style$fontColour <- thisFont$color
if("scheme" %in% names(thisFont))
style$fontScheme <- thisFont$scheme
flags <- c("bold", "italic", "underline") %in% names(thisFont)
if(any(flags)){
style$fontDecoration <- NULL
if(flags[[1]])
style$fontDecoration <- append(style$fontDecoration, "BOLD")
if(flags[[2]])
style$fontDecoration <- append(style$fontDecoration, "ITALIC")
if(flags[[3]])
style$fontDecoration <- append(style$fontDecoration, "UNDERLINE")
}
}
if(s[["numFmtId"]] != "0"){
if(as.integer(s[["numFmtId"]]) < 164){
style$numFmt <- list(numFmtId = s[["numFmtId"]])
}else if(numFmtFlag){
style$numFmt <- numFmts[[which(s[["numFmtId"]] == numFmtsIds)[1]]]
}
}
## Border
if(s[["borderId"]] != "0"){# & "applyBorder" %in% names(s)){
thisBorder <- borders[[as.integer(s[["borderId"]]) + 1L]]
if("borderLeft" %in% names(thisBorder)){
style$borderLeft <- thisBorder$borderLeft
style$borderLeftColour <- thisBorder$borderLeftColour
}
if("borderRight" %in% names(thisBorder)){
style$borderRight <- thisBorder$borderRight
style$borderRightColour <- thisBorder$borderRightColour
}
if("borderTop" %in% names(thisBorder)){
style$borderTop <- thisBorder$borderTop
style$borderTopColour <- thisBorder$borderTopColour
}
if("borderBottom" %in% names(thisBorder)){
style$borderBottom <- thisBorder$borderBottom
style$borderBottomColour <- thisBorder$borderBottomColour
}
}
## alignment
applyAlignment <- "applyAlignment" %in% names(s)
if("horizontal" %in% names(s))# & applyAlignment)
style$halign <- s[["horizontal"]]
if("vertical" %in% names(s))
style$valign <- s[["vertical"]]
if("textRotation" %in% names(s))
style$textRotation <- s[["textRotation"]]
## wrap text
if("wrapText" %in% names(s)){
if(s[["wrapText"]] %in% c("1", "true"))
style$wrapText <- TRUE
}
if(s[["fillId"]] != "0"){# && "applyFill" %in% names(s)){
fillId <- as.integer(s[["fillId"]]) + 1L
if("fgColor" %in% names(fills[[fillId]])){
tmpFg <- fills[[fillId]]$fgColor
tmpBg <- fills[[fillId]]$bgColor
if(!is.null(tmpFg))
style$fill$fillFg <- tmpFg
if(!is.null(tmpFg))
style$fill$fillBg <- tmpBg
}else{
style$fill <- fills[[fillId]]
}
}
} ## end if !all(s == "0)
## we need to skip the first one as this is used as the base style
if(flag)
styleObjects <- append(styleObjects , list(style))
flag <- TRUE
} ## end of for loop through styles s in ...
} ## end of length(stylesXML) > 0
## xl\media
if(length(media) > 0){
mediaNames <- regmatches(media, regexpr("image[0-9]\\.[a-z]+$", media))
fileTypes <- unique(gsub("image[0-9]\\.", "", mediaNames))
contentNodes <- sprintf('<Default Extension="%s" ContentType="image/%s"/>', fileTypes, fileTypes)
contentNodes[fileTypes == "emf"] <- '<Default Extension="emf" ContentType="image/x-emf"/>'
wb$Content_Types <- c(contentNodes, wb$Content_Types)
names(media) <- mediaNames
wb$media <- media
}
## xl\chart
if(length(charts) > 0){
chartNames <- regmatches(charts, regexpr("chart[0-9]\\.[a-z]+$", charts))
names(charts) <- chartNames
wb$charts <- charts
wb$Content_Types <- c(wb$Content_Types, sprintf('<Override PartName="/xl/charts/chart%s.xml" ContentType="application/vnd.openxmlformats-officedocument.drawingml.chart+xml"/>', 1:length(charts)))
}
## xl\theme
if(length(themeXML) > 0)
wb$theme <- removeHeadTag(paste(unlist(lapply(sort(themeXML)[[1]], function(x) readLines(x, warn = FALSE, encoding = "UTF-8"))), collapse = ""))
## externalLinks
if(length(extLinksXML) > 0){
wb$externalLinks <- lapply(sort(extLinksXML), function(x) removeHeadTag(.Call("openxlsx_cppReadFile", x, PACKAGE = "openxlsx")))
wb$Content_Types <-c(wb$Content_Types,
sprintf('<Override PartName="/xl/externalLinks/externalLink%s.xml" ContentType="application/vnd.openxmlformats-officedocument.spreadsheetml.externalLink+xml"/>', 1:length(extLinksXML)))
wb$workbook.xml.rels <- c(wb$workbook.xml.rels, sprintf('<Relationship Id="rId4" Type="http://schemas.openxmlformats.org/officeDocument/2006/relationships/externalLink" Target="externalLinks/externalLink1.xml"/>',
1:length(extLinksXML)))
}
## externalLinksRels
if(length(extLinksRelsXML) > 0)
wb$externalLinksRels <- lapply(sort(extLinksRelsXML), function(x) removeHeadTag(.Call("openxlsx_cppReadFile", x, PACKAGE = "openxlsx")))
##*----------------------------------------------------------------------------------------------*##
### BEGIN READING IN WORKSHEET DATA
##*----------------------------------------------------------------------------------------------*##
## xl\worksheets
worksheetsXML <- file.path(dirname(worksheetsXML), sprintf("sheet%s.xml", sheetrId))
wb <- .Call("openxlsx_loadworksheets", wb, styleObjects, worksheetsXML)
## Fix styleobject encoding
if(length(wb$styleObjects) > 0){
style_names <- sapply(wb$styleObjects, "[[", "sheet")
Encoding(style_names) <- "UTF-8"
wb$styleObjects <- lapply(1:length(style_names), function(i) {wb$styleObjects[[i]]$sheet = style_names[[i]]; wb$styleObjects[[i]]})
}
##*----------------------------------------------------------------------------------------------*##
### READING IN WORKSHEET DATA COMPLETE
##*----------------------------------------------------------------------------------------------*##
## Next sheetRels to see which drawings_rels belongs to which sheet
if(length(sheetRelsXML) > 0){
## sheet.xml have been reordered to be in the order of sheetrId
## not every sheet has a worksheet rels
allRels <- file.path(dirname(sheetRelsXML), sprintf("sheet%s.xml.rels", sheetrId))
haveRels <- allRels %in% sheetRelsXML
xml <- lapply(1:length(allRels), function(i) {
if(haveRels[i])
return(readLines(allRels[[i]], warn = FALSE))
return("<Relationship >")
})
xml <- unlist(lapply(xml, removeHeadTag))
xml <- gsub("<Relationships .*?>", "", xml)
xml <- gsub("</Relationships>", "", xml)
xml <- lapply(xml, function(x) .Call("openxlsx_getChildlessNode", x, "<Relationship ", PACKAGE="openxlsx"))
if(length(slicerXML) > 0){
slicerXML <- slicerXML[order(nchar(slicerXML), slicerXML)]
slicersFiles <- lapply(xml, function(x) as.integer(regmatches(x, regexpr("(?<=slicer)[0-9]+(?=\\.xml)", x, perl = TRUE))))
inds <- sapply(slicersFiles, length) > 0
## worksheet_rels Id for slicer will be rId0
k <- 1L
wb$slicers <- rep("", nSheets)
for(i in 1:nSheets){
## read in slicer[j].XML sheets into sheet[i]
if(inds[i]){
wb$slicers[[i]] <- removeHeadTag(.Call("openxlsx_cppReadFile", slicerXML[k], PACKAGE = "openxlsx"))
k <- k + 1L
wb$worksheets_rels[[i]] <- unlist(c(wb$worksheets_rels[[i]],
sprintf('<Relationship Id="rId0" Type="http://schemas.microsoft.com/office/2007/relationships/slicer" Target="../slicers/slicer%s.xml"/>', i)))
wb$Content_Types <- c(wb$Content_Types,
sprintf('<Override PartName="/xl/slicers/slicer%s.xml" ContentType="application/vnd.ms-excel.slicer+xml"/>', i))
## Append slicer to worksheet extLst
wb$worksheets[[i]]$extLst <- c(wb$worksheets[[i]]$extLst, genBaseSlicerXML())
}
}
}
if(length(slicerCachesXML) > 0){
## ---- slicerCaches
inds <- 1:length(slicerCachesXML)
wb$Content_Types <- c(wb$Content_Types, sprintf('<Override PartName="/xl/slicerCaches/slicerCache%s.xml" ContentType="application/vnd.ms-excel.slicerCache+xml"/>', inds))
wb$slicerCaches <- sapply(slicerCachesXML[order(nchar(slicerCachesXML), slicerCachesXML)], function(x) removeHeadTag(.Call("openxlsx_cppReadFile", x, PACKAGE = "openxlsx")))
wb$workbook.xml.rels <- c(wb$workbook.xml.rels, sprintf('<Relationship Id="rId%s" Type="http://schemas.microsoft.com/office/2007/relationships/slicerCache" Target="slicerCaches/slicerCache%s.xml"/>', 1E5 + inds, inds))
wb$workbook$extLst <- c(wb$workbook$extLst, genSlicerCachesExtLst(1E5 + inds))
}
## tables
if(length(tablesXML) > 0){
tables <- lapply(xml, function(x) as.integer(regmatches(x, regexpr("(?<=table)[0-9]+(?=\\.xml)", x, perl = TRUE))))
tableSheets <- unlist(lapply(1:length(sheetrId), function(i) rep(i, length(tables[[i]]))))
if(length(unlist(tables)) > 0){
## get the tables that belong to each worksheet and create a worksheets_rels for each
tCount <- 2L ## table r:Ids start at 3
for(i in 1:length(tables)){
if(length(tables[[i]]) > 0){
k <- 1:length(tables[[i]]) + tCount
wb$worksheets_rels[[i]] <- unlist(c(wb$worksheets_rels[[i]],
sprintf('<Relationship Id="rId%s" Type="http://schemas.openxmlformats.org/officeDocument/2006/relationships/table" Target="../tables/table%s.xml"/>', k, k)))
wb$worksheets[[i]]$tableParts <- sprintf("<tablePart r:id=\"rId%s\"/>", k)
tCount <- tCount + length(k)
}
}
## sort the tables into the order they appear in the xml and tables variables
names(tablesXML) <- basename(tablesXML)
tablesXML <- tablesXML[sprintf("table%s.xml", unlist(tables))]
## tables are now in correct order so we can read them in as they are
wb$tables <- sapply(tablesXML, function(x) removeHeadTag(.Call("openxlsx_cppReadFile", x, PACKAGE = "openxlsx")))
## pull out refs and attach names
refs <- regmatches(wb$tables, regexpr('(?<=ref=")[0-9A-Z:]+', wb$tables, perl = TRUE))
names(wb$tables) <- refs
wb$Content_Types <- c(wb$Content_Types, sprintf('<Override PartName="/xl/tables/table%s.xml" ContentType="application/vnd.openxmlformats-officedocument.spreadsheetml.table+xml"/>', 1:length(wb$tables)+2))
## relabel ids
for(i in 1:length(wb$tables)){
newId <- sprintf(' id="%s" ', i+2)
wb$tables[[i]] <- sub(' id="[0-9]+" ' , newId, wb$tables[[i]])
}
displayNames <- unlist(regmatches(wb$tables, regexpr('(?<=displayName=").*?[^"]+', wb$tables, perl = TRUE)))
if(length(displayNames) != length(tablesXML))
displayNames <- paste0("Table", 1:length(tablesXML))
attr(wb$tables, "sheet") <- tableSheets
attr(wb$tables, "tableName") <- displayNames
}
} ## if(length(tablesXML) > 0)
## hyperlinks
hlinks <- lapply(xml, function(x) x[grepl("hyperlink", x) & grepl("External", x)])
hlinksInds <- which(sapply(hlinks, length) > 0)
if(length(hlinksInds) > 0){
hlinks <- hlinks[hlinksInds]
for(i in 1:length(hlinksInds)){
targets <- unlist(lapply(hlinks[[i]], function(x) regmatches(x, gregexpr('(?<=Target=").*?"', x, perl = TRUE))[[1]]))
targets <- gsub('"$', "", targets)
names(wb$hyperlinks[[hlinksInds[i]]]) <- targets
}
}
## xml is in the order of the sheets, drawIngs is toes to sheet position of hasDrawing
## Not every sheet has a drawing.xml
## drawings
drawXMLrelationship <- lapply(xml, function(x) x[grepl("drawings/drawing", x)])
hasDrawing <- sapply(drawXMLrelationship, length) > 0 ## which sheets have a drawing
if(length(drawingRelsXML) > 0){
drawingRelsXML <- drawingRelsXML
dRels <- lapply(drawingRelsXML, readLines, warn = FALSE)
dRels <- unlist(lapply(dRels, removeHeadTag))
dRels <- gsub("<Relationships .*?>", "", dRels)
dRels <- gsub("</Relationships>", "", dRels)
}
if(length(drawingsXML) > 0){
dXML <- lapply(drawingsXML, readLines, warn = FALSE)
dXML <- unlist(lapply(dXML, removeHeadTag))
dXML <- gsub("<xdr:wsDr .*?>", "", dXML)
dXML <- gsub("</xdr:wsDr>", "", dXML)
## split at one/two cell Anchor
dXML <- regmatches(dXML, gregexpr("<xdr:...CellAnchor.*?</xdr:...CellAnchor>", dXML))
}
## loop over all worksheets and assign drawing to sheet
for(i in 1:length(xml)){
if(hasDrawing[i]){
target <- unlist(lapply(drawXMLrelationship[[i]], function(x) regmatches(x, gregexpr('(?<=Target=").*?"', x, perl = TRUE))[[1]]))
target <- basename(gsub('"$', "", target))
## sheet_i has which(hasDrawing)[[i]]
relsInd <- grepl(target, drawingRelsXML)
if(any(relsInd))
wb$drawings_rels[i] <- dRels[relsInd]
drawingInd <- grepl(target, drawingsXML)
if(any(drawingInd))
wb$drawings[i] <- dXML[drawingInd]
}
}
## pivot tables
if(length(pivotTableXML) > 0){
pivotTableJ <- lapply(xml, function(x) as.integer(regmatches(x, regexpr("(?<=pivotTable)[0-9]+(?=\\.xml)", x, perl = TRUE))))
sheetWithPivot <- which(sapply(pivotTableJ, length) > 0)
pivotRels <- lapply(xml, function(x) {y <- x[grepl("pivotTable", x)]; y[order(nchar(y), y)]})
hasPivot <- sapply(pivotRels, length) > 0
## Modify rIds
for(i in 1:length(pivotRels)){
if(hasPivot[i]){
for(j in 1:length(pivotRels[[i]]))
pivotRels[[i]][j] <- gsub('"rId[0-9]+"', sprintf('"rId%s"', 20000L + j), pivotRels[[i]][j])
wb$worksheets_rels[[i]] <- c(wb$worksheets_rels[[i]] , pivotRels[[i]])
}
}
## remove any workbook_res references to pivot tables that are not being used in worksheet_rels
inds <- 1:length(wb$pivotTables.xml.rels)
fileNo <- as.integer(unlist(regmatches(unlist(wb$worksheets_rels), gregexpr('(?<=pivotTable)[0-9]+(?=\\.xml)', unlist(wb$worksheets_rels), perl = TRUE))))
inds <- inds[!inds %in% fileNo]
if(length(inds) > 0){
toRemove <- paste(sprintf("(pivotCacheDefinition%s\\.xml)", inds), collapse = "|")
fileNo <- which(grepl(toRemove, wb$pivotTables.xml.rels))
toRemove <- paste(sprintf("(pivotCacheDefinition%s\\.xml)", fileNo), collapse = "|")
## remove reference to file from workbook.xml.res
wb$workbook.xml.rels <- wb$workbook.xml.rels[!grepl(toRemove, wb$workbook.xml.rels)]
}
}
} ## end of worksheetRels
## queryTables
if(length(queryTablesXML) > 0){
ids <- as.numeric(regmatches(queryTablesXML, regexpr("[0-9]+(?=\\.xml)", queryTablesXML, perl = TRUE)))
wb$queryTables <- unlist(lapply(queryTablesXML[order(ids)], function(x) removeHeadTag(.Call("openxlsx_cppReadFile", x, PACKAGE = "openxlsx"))))
wb$Content_Types <- c(wb$Content_Types,
sprintf('<Override PartName="/xl/queryTables/queryTable%s.xml" ContentType="application/vnd.openxmlformats-officedocument.spreadsheetml.queryTable+xml"/>', 1:length(queryTablesXML)))
}
## connections
if(length(connectionsXML) > 0){
wb$connections <- removeHeadTag(.Call("openxlsx_cppReadFile", connectionsXML, PACKAGE = "openxlsx"))
wb$workbook.xml.rels <- c(wb$workbook.xml.rels, '<Relationship Id="rId3" Type="http://schemas.openxmlformats.org/officeDocument/2006/relationships/connections" Target="connections.xml"/>')
wb$Content_Types <- c(wb$Content_Types, '<Override PartName="/xl/connections.xml" ContentType="application/vnd.openxmlformats-officedocument.spreadsheetml.connections+xml"/>')
}
## table rels
if(length(tableRelsXML) > 0){
## table_i_might have tableRels_i but I am re-ordering the tables to be in order of worksheets
## I make every table have a table_rels so i need to fill in the gaps if any table_rels are missing
tmp <- paste0(basename(tablesXML), ".rels")
hasRels <- tmp %in% basename(tableRelsXML)
## order tableRelsXML
tableRelsXML <- tableRelsXML[match(tmp[hasRels], basename(tableRelsXML))]
##
wb$tables.xml.rels <- character(length=length(tablesXML))
## which sheet does it belong to
xml <- sapply(tableRelsXML, function(x) .Call("openxlsx_cppReadFile", x, PACKAGE = "openxlsx"), USE.NAMES = FALSE)
xml <- sapply(xml, removeHeadTag, USE.NAMES = FALSE)
wb$tables.xml.rels[hasRels] <- xml
}else if(length(tablesXML) > 0){
wb$tables.xml.rels <- rep("", length(tablesXML))
}
return(wb)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PoloniexPublic.R
\name{ReturnOrderBook}
\alias{ReturnOrderBook}
\title{Returns the order book for a given market,
as well as a sequence number for use with the Push API
and an indicator specifying whether the market is frozen.}
\usage{
ReturnOrderBook(theObject, pair = "all", depth = 10)
}
\arguments{
\item{theObject}{The public client API object on which the function should be called.}
\item{pair}{length one-character vector - The currencypair
for which orderbook information should be fetched.
You may set pair to "all" to fetch the order books of all markets.}
\item{depth}{numeric - depth of the orderbook.}
}
\value{
A list containing orderbook information.
if pair == "all": a list containing orderbook
information for all available markets.
Each list entry contains information for one
specific market.
if !pair == "all": a list containing orderbook
information for the requested markets.
Each market list contains following fields:
- ask: Orderbook sell side, Dataframe containing
ask prices and corresponding amounts.
- bid: Orderbook buy side. Dataframe containing
bid prices and corresponding amounts.
- frozen: indicator specifying wheather market
is frozen or not.
- seq: Sequence number for Push API.
}
\description{
Returns the order book for a given market,
as well as a sequence number for use with the Push API
and an indicator specifying whether the market is frozen.
}
\examples{
poloniex.public <- PoloniexPublicAPI()
pair <- "BTC_NXT"
depth <- 100
order.book <- ReturnOrderBook(poloniex.public,
pair = pair,
depth = 10)
order.book$bid
order.book$ask
order.book$frozen
order.book$seq
pair <- "all"
depth <- 10
order.book <- ReturnOrderBook(poloniex.public,
pair = pair,
depth = 10)
names(order.book)
order.book$BTC_ETH$ask
}
|
/man/ReturnOrderBook.Rd
|
permissive
|
cran/PoloniexR
|
R
| false
| true
| 2,071
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PoloniexPublic.R
\name{ReturnOrderBook}
\alias{ReturnOrderBook}
\title{Returns the order book for a given market,
as well as a sequence number for use with the Push API
and an indicator specifying whether the market is frozen.}
\usage{
ReturnOrderBook(theObject, pair = "all", depth = 10)
}
\arguments{
\item{theObject}{The public client API object on which the function should be called.}
\item{pair}{length one-character vector - The currencypair
for which orderbook information should be fetched.
You may set pair to "all" to fetch the order books of all markets.}
\item{depth}{numeric - depth of the orderbook.}
}
\value{
A list containing orderbook information.
if pair == "all": a list containing orderbook
information for all available markets.
Each list entry contains information for one
specific market.
if !pair == "all": a list containing orderbook
information for the requested markets.
Each market list contains following fields:
- ask: Orderbook sell side, Dataframe containing
ask prices and corresponding amounts.
- bid: Orderbook buy side. Dataframe containing
bid prices and corresponding amounts.
- frozen: indicator specifying wheather market
is frozen or not.
- seq: Sequence number for Push API.
}
\description{
Returns the order book for a given market,
as well as a sequence number for use with the Push API
and an indicator specifying whether the market is frozen.
}
\examples{
poloniex.public <- PoloniexPublicAPI()
pair <- "BTC_NXT"
depth <- 100
order.book <- ReturnOrderBook(poloniex.public,
pair = pair,
depth = 10)
order.book$bid
order.book$ask
order.book$frozen
order.book$seq
pair <- "all"
depth <- 10
order.book <- ReturnOrderBook(poloniex.public,
pair = pair,
depth = 10)
names(order.book)
order.book$BTC_ETH$ask
}
|
data <- read.table("household_power_consumption.txt", na.strings="?",
header=TRUE, sep=";", stringsAsFactors=FALSE)
data[, 1] <- as.Date(data[, 1], "%d/%m/%Y")
# Subset data
data <- subset(data, data$Date=="2007-02-01" | data$Date=="2007-02-02")
data$Time <- paste(data$Date, data$Time)
data$Time <- strptime(data$Time, "%F %H:%M:%S")
# Set mfcol for 2x2 plot area
par(mfcol=c(2,2))
# Plotting
plot(data$Time, data$Global_active_power, type="l", xlab="",
ylab="Global Active Power")
plot(data$Time, data$Sub_metering_1, type = "n", xlab="",
ylab="Energy sub metering")
lines(data$Time, data$Sub_metering_1)
lines(data$Time, data$Sub_metering_2, col = "red")
lines(data$Time, data$Sub_metering_3, col = "blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lty=c(1,1), col=c("black", "red", "blue"), cex=0.5, bty="n")
plot(data$Time, data$Voltage, type="l", xlab="datetime",
ylab="Voltage")
plot(data$Time, data$Global_reactive_power, type="l", xlab="datetime",
ylab="Global_reactive_power")
# Create PNG file with transparent background
par(bg=NA)
dev.copy(png, "plot4.png")
dev.off()
|
/plot4.R
|
no_license
|
dyaz93/ExData_Plotting1
|
R
| false
| false
| 1,174
|
r
|
data <- read.table("household_power_consumption.txt", na.strings="?",
header=TRUE, sep=";", stringsAsFactors=FALSE)
data[, 1] <- as.Date(data[, 1], "%d/%m/%Y")
# Subset data
data <- subset(data, data$Date=="2007-02-01" | data$Date=="2007-02-02")
data$Time <- paste(data$Date, data$Time)
data$Time <- strptime(data$Time, "%F %H:%M:%S")
# Set mfcol for 2x2 plot area
par(mfcol=c(2,2))
# Plotting
plot(data$Time, data$Global_active_power, type="l", xlab="",
ylab="Global Active Power")
plot(data$Time, data$Sub_metering_1, type = "n", xlab="",
ylab="Energy sub metering")
lines(data$Time, data$Sub_metering_1)
lines(data$Time, data$Sub_metering_2, col = "red")
lines(data$Time, data$Sub_metering_3, col = "blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lty=c(1,1), col=c("black", "red", "blue"), cex=0.5, bty="n")
plot(data$Time, data$Voltage, type="l", xlab="datetime",
ylab="Voltage")
plot(data$Time, data$Global_reactive_power, type="l", xlab="datetime",
ylab="Global_reactive_power")
# Create PNG file with transparent background
par(bg=NA)
dev.copy(png, "plot4.png")
dev.off()
|
## power for univariate and bivariate latent change score model
## Johnny Zhang
## Created on Sep 26, 2016
powerLCS<-function(N=100, T=5, R=1000,
betay=0, my0=0, mys=0, varey=1, vary0=1, varys=1, vary0ys=0, alpha=0.05, ...){
#if (sum(N < 2*T)>0) stop("The sample size has to be at least 2 times of the number of occasions")
pop.model <- function(T){
## latent y
## Intercept
model<-"y0 =~ 1*y1\n"
## path from y(t-1) to y(t) with path 1
for (i in 2:T){
model<-paste(model, "y",i,"~1*y",(i-1),"\n", sep="")
}
## loading from dy(t) to y(t) with path 1
for (i in 2:T){
model<-paste(model, "dy",i,"=~1*y",i,"\n", sep="")
}
## path from y(t) to dy(t+1) with path betay
for (i in 2:T){
model<-paste(model, "dy",i,"~", betay, "*y", (i-1), "\n", sep="")
}
## latent slope ys factor model
for (i in 2:T){
model<-paste(model, "ys=~1*dy", i, "\n", sep="")
}
## variance for dy constraints to 0
for (i in 2:T){
model<-paste(model, "dy",i,"~~0*dy",i,"\n", sep="")
}
## variance for y constraints to 0
for (i in 1:T){
model<-paste(model, "y",i,"~~0*y",i,"\n", sep="")
}
## variance and covariance for intercept and slope
model<-paste(model, "ys~~", vary0ys, "*y0\n", sep="")
model<-paste(model, "y0~~", vary0, "*y0\n", sep="")
model<-paste(model, "ys~~", varys, "*ys\n", sep="")
model<-paste(model, "ys~", mys, "*1\n", sep="")
model<-paste(model, "y0~", my0, "*1\n", sep="")
## constrain means of y and dy to be zero
for (i in 1:T){
model<-paste(model, "y",i,"~0*1\n", sep="")
}
for (i in 2:T){
model<-paste(model, "dy",i,"~0*1\n", sep="")
}
## for observed data part
## y(t) to Y(t)
for (i in 1:T){
model<-paste(model, "y",i,"=~1*", "Y",i, "\n", sep="")
}
## set means of Y to be zero
for (i in 1:T){
model<-paste(model, "Y",i, "~0*1\n", sep="")
}
## set the variance for Y
for (i in 1:T){
model<-paste(model, "Y",i, "~~", varey, "*", "Y",i, "\n", sep="")
}
model
}
fit.model <- function(T){
## latent y
## Intercept
model<-"y0 =~ 1*y1\n"
## path from y(t-1) to y(t) with path 1
for (i in 2:T){
model<-paste(model, "y",i,"~1*y",(i-1),"\n", sep="")
}
## loading from dy(t) to y(t) with path 1
for (i in 2:T){
model<-paste(model, "dy",i,"=~1*y",i,"\n", sep="")
}
## path from y(t) to dy(t+1) with path betay
for (i in 2:T){
model<-paste(model, "dy",i,"~start(", betay, ")*y", (i-1)," + betay*y", (i-1), "\n", sep="")
}
## latent slope ys factor model
for (i in 2:T){
model<-paste(model, "ys=~1*dy", i, "\n", sep="")
}
## variance for dy constraints to 0
for (i in 2:T){
model<-paste(model, "dy",i,"~~0*dy",i,"\n", sep="")
}
## variance for y constraints to 0
for (i in 1:T){
model<-paste(model, "y",i,"~~0*y",i,"\n", sep="")
}
## variance and covariance for intercept and slope
model<-paste(model, "ys~~start(", vary0ys, ")*y0 + vary0ys*y0\n", sep="")
model<-paste(model, "y0~~start(", vary0, ")*y0 + vary0*y0\n", sep="")
model<-paste(model, "ys~~start(", varys, ")*ys + varys*ys\n", sep="")
model<-paste(model, "ys~start(", mys, ")*1 + label('mys')*1\n", sep="")
model<-paste(model, "y0~start(", my0, ")*1 + label('my0')*1\n", sep="")
## constrain means of y and dy to be zero
for (i in 1:T){
model<-paste(model, "y",i,"~0*1\n", sep="")
}
for (i in 2:T){
model<-paste(model, "dy",i,"~0*1\n", sep="")
}
## for observed data part
## y(t) to Y(t)
for (i in 1:T){
model<-paste(model, "y",i,"=~1*", "Y",i, "\n", sep="")
}
## set means of Y to be zero
for (i in 1:T){
model<-paste(model, "Y",i, "~0*1\n", sep="")
}
## set the variance for Y
for (i in 1:T){
model<-paste(model, "Y",i, "~~start(", varey, ")*", "Y", i, " + varey*Y",i, "\n", sep="")
}
model
}
sem.est <- function(model, data){
temp.res <- sem(model=model, data=data)
label <- temp.res@ParTable$label
c(temp.res@ParTable$est[label!=""], temp.res@ParTable$se[label!=""])
}
## do it once for a given N and T
fit.once <- function(N, T){
## generate data
pop.model.T <- pop.model(T)
pop.model.T.res <- sem(pop.model.T, do.fit=FALSE)
pop.model.T.cov <- inspect(pop.model.T.res, "cov.ov")
pop.model.T.mean <- inspect(pop.model.T.res, "mean.ov")
ynames <- row.names(pop.model.T.cov)
gen.data <- lapply(1:R, mvrnorm, n=N, mu=pop.model.T.mean, Sigma=pop.model.T.cov)
## conduct the analysis
fit.model.T <- fit.model(T)
fit.res <- lapply(gen.data, sem.est, model=fit.model.T)
## run once to get the model information
model.info.res <- sem(fit.model.T, gen.data[[1]])
label <- model.info.res@ParTable$label
label <- label[label!=""]
label.unique <- !duplicated(label)
label <- label[label.unique]
npar <- length(label)
## get the parameter estimates, sd, se, power, CI of power
all.res <- do.call(rbind, fit.res)
all.res <- all.res[, c(label.unique, label.unique)]
mc.est <- colMeans(all.res[, 1:npar])
mc.se <- apply(all.res[, (npar+1):(2*npar)], 2, mean)
mc.sd <- apply(all.res[, 1:npar], 2, sd)
mc.z.score <- all.res[, 1:npar]/all.res[, (npar+1):(2*npar)]
mc.z.score.check <- abs(mc.z.score) >= qnorm(1-alpha/2)
mc.power <- colMeans(mc.z.score.check)
pop.par <- unlist(lapply(label, function(x){eval(parse(text=x))}))
mc.output <- cbind(pop.par, mc.est, mc.sd, mc.se, mc.power, N, T)
row.names(mc.output) <- label
label.sort <- sort(label)
mc.output[label.sort, ]
}
if (length(N)>1 | length(T)>1){
all.output <- list()
for (i in N){
for (j in T){
all.output [[paste('N',i,'-T',j, sep="")]]<- fit.once(i,j)
}
}
}else{
all.output <- fit.once(N,T)
}
class(all.output) <- "lcs.power"
all.output
}
plot.lcs.power <- function(x, parameter, ...){
## x is the output from power analysis
power.mat <- do.call('rbind', x)
power.par <- power.mat[rownames(power.mat)==parameter, ]
unique.N <- unique(power.par[ ,6])
unique.T <- unique(power.par[ ,7])
if (length(unique.N)==1 & length(unique.T)==1) stop("Multiple N or T is needed for power plot.")
if (length(unique.N)==1){
## plot the power along T
plot(power.par[, 7], power.par[, 5], type='l', xlab='Number of Occasions', ylab='Power', ylim=c(0,1))
points(power.par[, 7], power.par[, 5])
}
if (length(unique.T)==1){
plot(power.par[, 6], power.par[, 5], type='l', xlab='Sample size', ylab='Power', ylim=c(0,1))
points(power.par[, 6], power.par[, 5])
}
if (length(unique.N)>1 & length(unique.T)>1){
for (N in unique.N){
## plot power with time for a given sample size
temp.power <- power.par[power.par[, 6]==N, ]
plot(temp.power[, 7], temp.power[, 5], type='l', xlab='Number of Occasions', ylab='Power', ylim=c(0,1))
points(temp.power[, 7], temp.power[, 5])
cat ("Press [enter] to continue")
line <- readline()
}
for (T in unique.T){
## plot power with time for a given sample size
temp.power <- power.par[power.par[, 7]==T, ]
plot(temp.power[, 6], temp.power[, 5], type='l', xlab='Sample size', ylab='Power', ylim=c(0,1))
points(temp.power[, 6], temp.power[, 5])
cat ("Press [enter] to continue")
line <- readline()
}
}
}
powerBLCS<-function(N=100, T=5, R=1000,
betay=0, my0=0, mys=0, varey=1, vary0=1, varys=1, vary0ys=0, alpha=0.05,
betax=0, mx0=0, mxs=0, varex=1, varx0=1, varxs=1, varx0xs=0, varx0y0=0,
varx0ys=0, vary0xs=0, varxsys=0, gammax=0, gammay=0, ...){
pop.model <- function(T){
## for y
## latent y
## Intercept
model<-"y0 =~ 1*y1\n"
## path from y(t-1) to y(t) with path 1
for (i in 2:T){
model<-paste(model, "y",i,"~1*y",(i-1),"\n", sep="")
}
## loading from dy(t) to y(t) with path 1
for (i in 2:T){
model<-paste(model, "dy",i,"=~1*y",i,"\n", sep="")
}
## path from y(t) to dy(t+1) with path betay
for (i in 2:T){
model<-paste(model, "dy",i,"~", betay, "*y", (i-1), "\n", sep="")
}
## latent slope ys factor model
for (i in 2:T){
model<-paste(model, "ys=~1*dy", i, "\n", sep="")
}
## variance for dy constraints to 0
for (i in 2:T){
model<-paste(model, "dy",i,"~~0*dy",i,"\n", sep="")
}
## variance for y constraints to 0
for (i in 1:T){
model<-paste(model, "y",i,"~~0*y",i,"\n", sep="")
}
## variance and covariance for intercept and slope
model<-paste(model, "ys~~", vary0ys, "*y0\n", sep="")
model<-paste(model, "y0~~", vary0, "*y0\n", sep="")
model<-paste(model, "ys~~", varys, "*ys\n", sep="")
model<-paste(model, "ys~", mys, "*1\n", sep="")
model<-paste(model, "y0~", my0, "*1\n", sep="")
## constrain means of y and dy to be zero
for (i in 1:T){
model<-paste(model, "y",i,"~0*1\n", sep="")
}
for (i in 2:T){
model<-paste(model, "dy",i,"~0*1\n", sep="")
}
## for observed data part
## y(t) to Y(t)
for (i in 1:T){
model<-paste(model, "y",i,"=~1*", "Y",i, "\n", sep="")
}
## set means of Y to be zero
for (i in 1:T){
model<-paste(model, "Y",i, "~0*1\n", sep="")
}
## set the variance for Y
for (i in 1:T){
model<-paste(model, "Y",i, "~~", varey, "*", "Y",i, "\n", sep="")
}
## for x
## latent x
## Intercept
model<-paste(model, "x0 =~ 1*x1\n")
## path from x(t-1) to x(t) with path 1
for (i in 2:T){
model<-paste(model, "x",i,"~1*x",(i-1),"\n", sep="")
}
## loading from dx(t) to x(t) with path 1
for (i in 2:T){
model<-paste(model, "dx",i,"=~1*x",i,"\n", sep="")
}
## path from x(t) to dx(t+1) with path betax
for (i in 2:T){
model<-paste(model, "dx",i,"~", betax, "*x", (i-1), "\n", sep="")
}
## latent slope xs factor model
for (i in 2:T){
model<-paste(model, "xs=~1*dx", i, "\n", sep="")
}
## variance for dx constraints to 0
for (i in 2:T){
model<-paste(model, "dx",i,"~~0*dx",i,"\n", sep="")
}
## variance for x constraints to 0
for (i in 1:T){
model<-paste(model, "x",i,"~~0*x",i,"\n", sep="")
}
## variance and covariance for intercept and slope
model<-paste(model, "xs~~", varx0xs, "*x0\n", sep="")
model<-paste(model, "x0~~", varx0, "*x0\n", sep="")
model<-paste(model, "xs~~", varxs, "*xs\n", sep="")
model<-paste(model, "xs~", mxs, "*1\n", sep="")
model<-paste(model, "x0~", mx0, "*1\n", sep="")
## constrain means of x and dx to be zero
for (i in 1:T){
model<-paste(model, "x",i,"~0*1\n", sep="")
}
for (i in 2:T){
model<-paste(model, "dx",i,"~0*1\n", sep="")
}
## for observed data part
## x(t) to X(t)
for (i in 1:T){
model<-paste(model, "x",i,"=~1*", "X",i, "\n", sep="")
}
## set means of X to be zero
for (i in 1:T){
model<-paste(model, "X",i, "~0*1\n", sep="")
}
## set the variance for X
for (i in 1:T){
model<-paste(model, "X",i, "~~", varex, "*", "X",i, "\n", sep="")
}
## coupling effects
for (i in 2:T){
model<-paste(model, "dy",i,"~", gammax, "*x",i-1, "\n", sep="")
}
for (i in 2:T){
model<-paste(model, "dx",i,"~", gammay, "*y",i-1, "\n", sep="")
}
model<-paste(model, "x0~~", varx0y0, "*y0\n", sep="")
model<-paste(model, "x0~~", varx0ys, "*ys\n", sep="")
model<-paste(model, "y0~~", vary0xs, "*xs\n", sep="")
model<-paste(model, "xs~~", varxsys, "*ys\n", sep="")
model
}
fit.model <- function(T){
## latent y
## Intercept
model<-"y0 =~ 1*y1\n"
## path from y(t-1) to y(t) with path 1
for (i in 2:T){
model<-paste(model, "y",i,"~1*y",(i-1),"\n", sep="")
}
## loading from dy(t) to y(t) with path 1
for (i in 2:T){
model<-paste(model, "dy",i,"=~1*y",i,"\n", sep="")
}
## path from y(t) to dy(t+1) with path betay
for (i in 2:T){
model<-paste(model, "dy",i,"~start(", betay, ")*y", (i-1)," + betay*y", (i-1), "\n", sep="")
}
## latent slope ys factor model
for (i in 2:T){
model<-paste(model, "ys=~1*dy", i, "\n", sep="")
}
## variance for dy constraints to 0
for (i in 2:T){
model<-paste(model, "dy",i,"~~0*dy",i,"\n", sep="")
}
## variance for y constraints to 0
for (i in 1:T){
model<-paste(model, "y",i,"~~0*y",i,"\n", sep="")
}
## variance and covariance for intercept and slope
model<-paste(model, "ys~~start(", vary0ys, ")*y0 + vary0ys*y0\n", sep="")
model<-paste(model, "y0~~start(", vary0, ")*y0 + vary0*y0\n", sep="")
model<-paste(model, "ys~~start(", varys, ")*ys + varys*ys\n", sep="")
model<-paste(model, "ys~start(", mys, ")*1 + label('mys')*1\n", sep="")
model<-paste(model, "y0~start(", my0, ")*1 + label('my0')*1\n", sep="")
## constrain means of y and dy to be zero
for (i in 1:T){
model<-paste(model, "y",i,"~0*1\n", sep="")
}
for (i in 2:T){
model<-paste(model, "dy",i,"~0*1\n", sep="")
}
## for observed data part
## y(t) to Y(t)
for (i in 1:T){
model<-paste(model, "y",i,"=~1*", "Y",i, "\n", sep="")
}
## set means of Y to be zero
for (i in 1:T){
model<-paste(model, "Y",i, "~0*1\n", sep="")
}
## set the variance for Y
for (i in 1:T){
model<-paste(model, "Y",i, "~~start(", varey, ")*", "Y", i, " + varey*Y",i, "\n", sep="")
}
## latent x
## Intercept
model<-paste(model, "x0 =~ 1*x1\n")
## path from x(t-1) to x(t) with path 1
for (i in 2:T){
model<-paste(model, "x",i,"~1*x",(i-1),"\n", sep="")
}
## loading from dx(t) to x(t) with path 1
for (i in 2:T){
model<-paste(model, "dx",i,"=~1*x",i,"\n", sep="")
}
## path from x(t) to dx(t+1) with path betax
for (i in 2:T){
model<-paste(model, "dx",i,"~start(", betax, ")*x", (i-1)," + betax*x", (i-1), "\n", sep="")
}
## latent slope xs factor model
for (i in 2:T){
model<-paste(model, "xs=~1*dx", i, "\n", sep="")
}
## variance for dx constraints to 0
for (i in 2:T){
model<-paste(model, "dx",i,"~~0*dx",i,"\n", sep="")
}
## variance for x constraints to 0
for (i in 1:T){
model<-paste(model, "x",i,"~~0*x",i,"\n", sep="")
}
## variance and covariance for intercept and slope
model<-paste(model, "xs~~start(", varx0xs, ")*x0 + varx0xs*x0\n", sep="")
model<-paste(model, "x0~~start(", varx0, ")*x0 + varx0*x0\n", sep="")
model<-paste(model, "xs~~start(", varxs, ")*xs + varxs*xs\n", sep="")
model<-paste(model, "xs~start(", mxs, ")*1 + label('mxs')*1\n", sep="")
model<-paste(model, "x0~start(", mx0, ")*1 + label('mx0')*1\n", sep="")
## constrain means of x and dx to be zero
for (i in 1:T){
model<-paste(model, "x",i,"~0*1\n", sep="")
}
for (i in 2:T){
model<-paste(model, "dx",i,"~0*1\n", sep="")
}
## for observed data part
## x(t) to X(t)
for (i in 1:T){
model<-paste(model, "x",i,"=~1*", "X",i, "\n", sep="")
}
## set means of X to be zero
for (i in 1:T){
model<-paste(model, "X",i, "~0*1\n", sep="")
}
## set the variance for X
for (i in 1:T){
model<-paste(model, "X",i, "~~start(", varex, ")*", "X", i, " + varex*X",i, "\n", sep="")
}
## coupling effects
for (i in 2:T){
model<-paste(model, "dy",i,"~start(", gammax, ")*x", i-1, " + gammax*x", i-1, "\n", sep="")
}
for (i in 2:T){
model<-paste(model, "dx",i,"~start(", gammay, ")*y", i-1, " + gammay*y", i-1, "\n", sep="")
}
model<-paste(model, "x0~~start(", varx0y0, ")*y0 + varx0y0*y0\n", sep="")
model<-paste(model, "x0~~start(", varx0ys, ")*ys + varx0ys*ys\n", sep="")
model<-paste(model, "y0~~start(", vary0xs, ")*xs + vary0xs*xs\n", sep="")
model<-paste(model, "xs~~start(", varxsys, ")*ys + varxsys*ys\n", sep="")
model
}
sem.est <- function(model, data){
temp.res <- sem(model=model, data=data)
label <- temp.res@ParTable$label
c(temp.res@ParTable$est[label!=""], temp.res@ParTable$se[label!=""])
}
## do it once for a given N and T
fit.once <- function(N, T){
## generate data
pop.model.T <- pop.model(T)
pop.model.T.res <- sem(pop.model.T, do.fit=FALSE)
pop.model.T.cov <- inspect(pop.model.T.res, "cov.ov")
pop.model.T.mean <- inspect(pop.model.T.res, "mean.ov")
ynames <- row.names(pop.model.T.cov)
gen.data <- lapply(1:R, mvrnorm, n=N, mu=pop.model.T.mean, Sigma=pop.model.T.cov)
## conduct the analysis
fit.model.T <- fit.model(T)
fit.res <- lapply(gen.data, sem.est, model=fit.model.T)
## run once to get the model information
model.info.res <- sem(fit.model.T, gen.data[[1]])
label <- model.info.res@ParTable$label
label <- label[label!=""]
label.unique <- !duplicated(label)
label <- label[label.unique]
npar <- length(label)
## get the parameter estimates, sd, se, power, CI of power
all.res <- do.call(rbind, fit.res)
all.res <- all.res[, c(label.unique, label.unique)]
mc.est <- colMeans(all.res[, 1:npar])
mc.se <- apply(all.res[, (npar+1):(2*npar)], 2, mean)
mc.sd <- apply(all.res[, 1:npar], 2, sd)
mc.z.score <- all.res[, 1:npar]/all.res[, (npar+1):(2*npar)]
mc.z.score.check <- abs(mc.z.score) >= qnorm(1-alpha/2)
mc.power <- colMeans(mc.z.score.check)
pop.par <- unlist(lapply(label, function(x){eval(parse(text=x))}))
mc.output <- cbind(pop.par, mc.est, mc.sd, mc.se, mc.power, N, T)
row.names(mc.output) <- label
label.sort <- sort(label)
mc.output[label.sort, ]
}
if (length(N)>1 | length(T)>1){
all.output <- list()
for (i in N){
for (j in T){
all.output [[paste('N',i,'-T',j, sep="")]]<- fit.once(i,j)
}
}
}else{
all.output <- fit.once(N,T)
}
class(all.output) <- "lcs.power"
all.output
}
|
/R/power.R
|
no_license
|
cran/RAMpath
|
R
| false
| false
| 18,107
|
r
|
## power for univariate and bivariate latent change score model
## Johnny Zhang
## Created on Sep 26, 2016
powerLCS<-function(N=100, T=5, R=1000,
betay=0, my0=0, mys=0, varey=1, vary0=1, varys=1, vary0ys=0, alpha=0.05, ...){
#if (sum(N < 2*T)>0) stop("The sample size has to be at least 2 times of the number of occasions")
pop.model <- function(T){
## latent y
## Intercept
model<-"y0 =~ 1*y1\n"
## path from y(t-1) to y(t) with path 1
for (i in 2:T){
model<-paste(model, "y",i,"~1*y",(i-1),"\n", sep="")
}
## loading from dy(t) to y(t) with path 1
for (i in 2:T){
model<-paste(model, "dy",i,"=~1*y",i,"\n", sep="")
}
## path from y(t) to dy(t+1) with path betay
for (i in 2:T){
model<-paste(model, "dy",i,"~", betay, "*y", (i-1), "\n", sep="")
}
## latent slope ys factor model
for (i in 2:T){
model<-paste(model, "ys=~1*dy", i, "\n", sep="")
}
## variance for dy constraints to 0
for (i in 2:T){
model<-paste(model, "dy",i,"~~0*dy",i,"\n", sep="")
}
## variance for y constraints to 0
for (i in 1:T){
model<-paste(model, "y",i,"~~0*y",i,"\n", sep="")
}
## variance and covariance for intercept and slope
model<-paste(model, "ys~~", vary0ys, "*y0\n", sep="")
model<-paste(model, "y0~~", vary0, "*y0\n", sep="")
model<-paste(model, "ys~~", varys, "*ys\n", sep="")
model<-paste(model, "ys~", mys, "*1\n", sep="")
model<-paste(model, "y0~", my0, "*1\n", sep="")
## constrain means of y and dy to be zero
for (i in 1:T){
model<-paste(model, "y",i,"~0*1\n", sep="")
}
for (i in 2:T){
model<-paste(model, "dy",i,"~0*1\n", sep="")
}
## for observed data part
## y(t) to Y(t)
for (i in 1:T){
model<-paste(model, "y",i,"=~1*", "Y",i, "\n", sep="")
}
## set means of Y to be zero
for (i in 1:T){
model<-paste(model, "Y",i, "~0*1\n", sep="")
}
## set the variance for Y
for (i in 1:T){
model<-paste(model, "Y",i, "~~", varey, "*", "Y",i, "\n", sep="")
}
model
}
fit.model <- function(T){
## latent y
## Intercept
model<-"y0 =~ 1*y1\n"
## path from y(t-1) to y(t) with path 1
for (i in 2:T){
model<-paste(model, "y",i,"~1*y",(i-1),"\n", sep="")
}
## loading from dy(t) to y(t) with path 1
for (i in 2:T){
model<-paste(model, "dy",i,"=~1*y",i,"\n", sep="")
}
## path from y(t) to dy(t+1) with path betay
for (i in 2:T){
model<-paste(model, "dy",i,"~start(", betay, ")*y", (i-1)," + betay*y", (i-1), "\n", sep="")
}
## latent slope ys factor model
for (i in 2:T){
model<-paste(model, "ys=~1*dy", i, "\n", sep="")
}
## variance for dy constraints to 0
for (i in 2:T){
model<-paste(model, "dy",i,"~~0*dy",i,"\n", sep="")
}
## variance for y constraints to 0
for (i in 1:T){
model<-paste(model, "y",i,"~~0*y",i,"\n", sep="")
}
## variance and covariance for intercept and slope
model<-paste(model, "ys~~start(", vary0ys, ")*y0 + vary0ys*y0\n", sep="")
model<-paste(model, "y0~~start(", vary0, ")*y0 + vary0*y0\n", sep="")
model<-paste(model, "ys~~start(", varys, ")*ys + varys*ys\n", sep="")
model<-paste(model, "ys~start(", mys, ")*1 + label('mys')*1\n", sep="")
model<-paste(model, "y0~start(", my0, ")*1 + label('my0')*1\n", sep="")
## constrain means of y and dy to be zero
for (i in 1:T){
model<-paste(model, "y",i,"~0*1\n", sep="")
}
for (i in 2:T){
model<-paste(model, "dy",i,"~0*1\n", sep="")
}
## for observed data part
## y(t) to Y(t)
for (i in 1:T){
model<-paste(model, "y",i,"=~1*", "Y",i, "\n", sep="")
}
## set means of Y to be zero
for (i in 1:T){
model<-paste(model, "Y",i, "~0*1\n", sep="")
}
## set the variance for Y
for (i in 1:T){
model<-paste(model, "Y",i, "~~start(", varey, ")*", "Y", i, " + varey*Y",i, "\n", sep="")
}
model
}
sem.est <- function(model, data){
temp.res <- sem(model=model, data=data)
label <- temp.res@ParTable$label
c(temp.res@ParTable$est[label!=""], temp.res@ParTable$se[label!=""])
}
## do it once for a given N and T
fit.once <- function(N, T){
## generate data
pop.model.T <- pop.model(T)
pop.model.T.res <- sem(pop.model.T, do.fit=FALSE)
pop.model.T.cov <- inspect(pop.model.T.res, "cov.ov")
pop.model.T.mean <- inspect(pop.model.T.res, "mean.ov")
ynames <- row.names(pop.model.T.cov)
gen.data <- lapply(1:R, mvrnorm, n=N, mu=pop.model.T.mean, Sigma=pop.model.T.cov)
## conduct the analysis
fit.model.T <- fit.model(T)
fit.res <- lapply(gen.data, sem.est, model=fit.model.T)
## run once to get the model information
model.info.res <- sem(fit.model.T, gen.data[[1]])
label <- model.info.res@ParTable$label
label <- label[label!=""]
label.unique <- !duplicated(label)
label <- label[label.unique]
npar <- length(label)
## get the parameter estimates, sd, se, power, CI of power
all.res <- do.call(rbind, fit.res)
all.res <- all.res[, c(label.unique, label.unique)]
mc.est <- colMeans(all.res[, 1:npar])
mc.se <- apply(all.res[, (npar+1):(2*npar)], 2, mean)
mc.sd <- apply(all.res[, 1:npar], 2, sd)
mc.z.score <- all.res[, 1:npar]/all.res[, (npar+1):(2*npar)]
mc.z.score.check <- abs(mc.z.score) >= qnorm(1-alpha/2)
mc.power <- colMeans(mc.z.score.check)
pop.par <- unlist(lapply(label, function(x){eval(parse(text=x))}))
mc.output <- cbind(pop.par, mc.est, mc.sd, mc.se, mc.power, N, T)
row.names(mc.output) <- label
label.sort <- sort(label)
mc.output[label.sort, ]
}
if (length(N)>1 | length(T)>1){
all.output <- list()
for (i in N){
for (j in T){
all.output [[paste('N',i,'-T',j, sep="")]]<- fit.once(i,j)
}
}
}else{
all.output <- fit.once(N,T)
}
class(all.output) <- "lcs.power"
all.output
}
plot.lcs.power <- function(x, parameter, ...){
## x is the output from power analysis
power.mat <- do.call('rbind', x)
power.par <- power.mat[rownames(power.mat)==parameter, ]
unique.N <- unique(power.par[ ,6])
unique.T <- unique(power.par[ ,7])
if (length(unique.N)==1 & length(unique.T)==1) stop("Multiple N or T is needed for power plot.")
if (length(unique.N)==1){
## plot the power along T
plot(power.par[, 7], power.par[, 5], type='l', xlab='Number of Occasions', ylab='Power', ylim=c(0,1))
points(power.par[, 7], power.par[, 5])
}
if (length(unique.T)==1){
plot(power.par[, 6], power.par[, 5], type='l', xlab='Sample size', ylab='Power', ylim=c(0,1))
points(power.par[, 6], power.par[, 5])
}
if (length(unique.N)>1 & length(unique.T)>1){
for (N in unique.N){
## plot power with time for a given sample size
temp.power <- power.par[power.par[, 6]==N, ]
plot(temp.power[, 7], temp.power[, 5], type='l', xlab='Number of Occasions', ylab='Power', ylim=c(0,1))
points(temp.power[, 7], temp.power[, 5])
cat ("Press [enter] to continue")
line <- readline()
}
for (T in unique.T){
## plot power with time for a given sample size
temp.power <- power.par[power.par[, 7]==T, ]
plot(temp.power[, 6], temp.power[, 5], type='l', xlab='Sample size', ylab='Power', ylim=c(0,1))
points(temp.power[, 6], temp.power[, 5])
cat ("Press [enter] to continue")
line <- readline()
}
}
}
powerBLCS<-function(N=100, T=5, R=1000,
betay=0, my0=0, mys=0, varey=1, vary0=1, varys=1, vary0ys=0, alpha=0.05,
betax=0, mx0=0, mxs=0, varex=1, varx0=1, varxs=1, varx0xs=0, varx0y0=0,
varx0ys=0, vary0xs=0, varxsys=0, gammax=0, gammay=0, ...){
pop.model <- function(T){
## for y
## latent y
## Intercept
model<-"y0 =~ 1*y1\n"
## path from y(t-1) to y(t) with path 1
for (i in 2:T){
model<-paste(model, "y",i,"~1*y",(i-1),"\n", sep="")
}
## loading from dy(t) to y(t) with path 1
for (i in 2:T){
model<-paste(model, "dy",i,"=~1*y",i,"\n", sep="")
}
## path from y(t) to dy(t+1) with path betay
for (i in 2:T){
model<-paste(model, "dy",i,"~", betay, "*y", (i-1), "\n", sep="")
}
## latent slope ys factor model
for (i in 2:T){
model<-paste(model, "ys=~1*dy", i, "\n", sep="")
}
## variance for dy constraints to 0
for (i in 2:T){
model<-paste(model, "dy",i,"~~0*dy",i,"\n", sep="")
}
## variance for y constraints to 0
for (i in 1:T){
model<-paste(model, "y",i,"~~0*y",i,"\n", sep="")
}
## variance and covariance for intercept and slope
model<-paste(model, "ys~~", vary0ys, "*y0\n", sep="")
model<-paste(model, "y0~~", vary0, "*y0\n", sep="")
model<-paste(model, "ys~~", varys, "*ys\n", sep="")
model<-paste(model, "ys~", mys, "*1\n", sep="")
model<-paste(model, "y0~", my0, "*1\n", sep="")
## constrain means of y and dy to be zero
for (i in 1:T){
model<-paste(model, "y",i,"~0*1\n", sep="")
}
for (i in 2:T){
model<-paste(model, "dy",i,"~0*1\n", sep="")
}
## for observed data part
## y(t) to Y(t)
for (i in 1:T){
model<-paste(model, "y",i,"=~1*", "Y",i, "\n", sep="")
}
## set means of Y to be zero
for (i in 1:T){
model<-paste(model, "Y",i, "~0*1\n", sep="")
}
## set the variance for Y
for (i in 1:T){
model<-paste(model, "Y",i, "~~", varey, "*", "Y",i, "\n", sep="")
}
## for x
## latent x
## Intercept
model<-paste(model, "x0 =~ 1*x1\n")
## path from x(t-1) to x(t) with path 1
for (i in 2:T){
model<-paste(model, "x",i,"~1*x",(i-1),"\n", sep="")
}
## loading from dx(t) to x(t) with path 1
for (i in 2:T){
model<-paste(model, "dx",i,"=~1*x",i,"\n", sep="")
}
## path from x(t) to dx(t+1) with path betax
for (i in 2:T){
model<-paste(model, "dx",i,"~", betax, "*x", (i-1), "\n", sep="")
}
## latent slope xs factor model
for (i in 2:T){
model<-paste(model, "xs=~1*dx", i, "\n", sep="")
}
## variance for dx constraints to 0
for (i in 2:T){
model<-paste(model, "dx",i,"~~0*dx",i,"\n", sep="")
}
## variance for x constraints to 0
for (i in 1:T){
model<-paste(model, "x",i,"~~0*x",i,"\n", sep="")
}
## variance and covariance for intercept and slope
model<-paste(model, "xs~~", varx0xs, "*x0\n", sep="")
model<-paste(model, "x0~~", varx0, "*x0\n", sep="")
model<-paste(model, "xs~~", varxs, "*xs\n", sep="")
model<-paste(model, "xs~", mxs, "*1\n", sep="")
model<-paste(model, "x0~", mx0, "*1\n", sep="")
## constrain means of x and dx to be zero
for (i in 1:T){
model<-paste(model, "x",i,"~0*1\n", sep="")
}
for (i in 2:T){
model<-paste(model, "dx",i,"~0*1\n", sep="")
}
## for observed data part
## x(t) to X(t)
for (i in 1:T){
model<-paste(model, "x",i,"=~1*", "X",i, "\n", sep="")
}
## set means of X to be zero
for (i in 1:T){
model<-paste(model, "X",i, "~0*1\n", sep="")
}
## set the variance for X
for (i in 1:T){
model<-paste(model, "X",i, "~~", varex, "*", "X",i, "\n", sep="")
}
## coupling effects
for (i in 2:T){
model<-paste(model, "dy",i,"~", gammax, "*x",i-1, "\n", sep="")
}
for (i in 2:T){
model<-paste(model, "dx",i,"~", gammay, "*y",i-1, "\n", sep="")
}
model<-paste(model, "x0~~", varx0y0, "*y0\n", sep="")
model<-paste(model, "x0~~", varx0ys, "*ys\n", sep="")
model<-paste(model, "y0~~", vary0xs, "*xs\n", sep="")
model<-paste(model, "xs~~", varxsys, "*ys\n", sep="")
model
}
fit.model <- function(T){
## latent y
## Intercept
model<-"y0 =~ 1*y1\n"
## path from y(t-1) to y(t) with path 1
for (i in 2:T){
model<-paste(model, "y",i,"~1*y",(i-1),"\n", sep="")
}
## loading from dy(t) to y(t) with path 1
for (i in 2:T){
model<-paste(model, "dy",i,"=~1*y",i,"\n", sep="")
}
## path from y(t) to dy(t+1) with path betay
for (i in 2:T){
model<-paste(model, "dy",i,"~start(", betay, ")*y", (i-1)," + betay*y", (i-1), "\n", sep="")
}
## latent slope ys factor model
for (i in 2:T){
model<-paste(model, "ys=~1*dy", i, "\n", sep="")
}
## variance for dy constraints to 0
for (i in 2:T){
model<-paste(model, "dy",i,"~~0*dy",i,"\n", sep="")
}
## variance for y constraints to 0
for (i in 1:T){
model<-paste(model, "y",i,"~~0*y",i,"\n", sep="")
}
## variance and covariance for intercept and slope
model<-paste(model, "ys~~start(", vary0ys, ")*y0 + vary0ys*y0\n", sep="")
model<-paste(model, "y0~~start(", vary0, ")*y0 + vary0*y0\n", sep="")
model<-paste(model, "ys~~start(", varys, ")*ys + varys*ys\n", sep="")
model<-paste(model, "ys~start(", mys, ")*1 + label('mys')*1\n", sep="")
model<-paste(model, "y0~start(", my0, ")*1 + label('my0')*1\n", sep="")
## constrain means of y and dy to be zero
for (i in 1:T){
model<-paste(model, "y",i,"~0*1\n", sep="")
}
for (i in 2:T){
model<-paste(model, "dy",i,"~0*1\n", sep="")
}
## for observed data part
## y(t) to Y(t)
for (i in 1:T){
model<-paste(model, "y",i,"=~1*", "Y",i, "\n", sep="")
}
## set means of Y to be zero
for (i in 1:T){
model<-paste(model, "Y",i, "~0*1\n", sep="")
}
## set the variance for Y
for (i in 1:T){
model<-paste(model, "Y",i, "~~start(", varey, ")*", "Y", i, " + varey*Y",i, "\n", sep="")
}
## latent x
## Intercept
model<-paste(model, "x0 =~ 1*x1\n")
## path from x(t-1) to x(t) with path 1
for (i in 2:T){
model<-paste(model, "x",i,"~1*x",(i-1),"\n", sep="")
}
## loading from dx(t) to x(t) with path 1
for (i in 2:T){
model<-paste(model, "dx",i,"=~1*x",i,"\n", sep="")
}
## path from x(t) to dx(t+1) with path betax
for (i in 2:T){
model<-paste(model, "dx",i,"~start(", betax, ")*x", (i-1)," + betax*x", (i-1), "\n", sep="")
}
## latent slope xs factor model
for (i in 2:T){
model<-paste(model, "xs=~1*dx", i, "\n", sep="")
}
## variance for dx constraints to 0
for (i in 2:T){
model<-paste(model, "dx",i,"~~0*dx",i,"\n", sep="")
}
## variance for x constraints to 0
for (i in 1:T){
model<-paste(model, "x",i,"~~0*x",i,"\n", sep="")
}
## variance and covariance for intercept and slope
model<-paste(model, "xs~~start(", varx0xs, ")*x0 + varx0xs*x0\n", sep="")
model<-paste(model, "x0~~start(", varx0, ")*x0 + varx0*x0\n", sep="")
model<-paste(model, "xs~~start(", varxs, ")*xs + varxs*xs\n", sep="")
model<-paste(model, "xs~start(", mxs, ")*1 + label('mxs')*1\n", sep="")
model<-paste(model, "x0~start(", mx0, ")*1 + label('mx0')*1\n", sep="")
## constrain means of x and dx to be zero
for (i in 1:T){
model<-paste(model, "x",i,"~0*1\n", sep="")
}
for (i in 2:T){
model<-paste(model, "dx",i,"~0*1\n", sep="")
}
## for observed data part
## x(t) to X(t)
for (i in 1:T){
model<-paste(model, "x",i,"=~1*", "X",i, "\n", sep="")
}
## set means of X to be zero
for (i in 1:T){
model<-paste(model, "X",i, "~0*1\n", sep="")
}
## set the variance for X
for (i in 1:T){
model<-paste(model, "X",i, "~~start(", varex, ")*", "X", i, " + varex*X",i, "\n", sep="")
}
## coupling effects
for (i in 2:T){
model<-paste(model, "dy",i,"~start(", gammax, ")*x", i-1, " + gammax*x", i-1, "\n", sep="")
}
for (i in 2:T){
model<-paste(model, "dx",i,"~start(", gammay, ")*y", i-1, " + gammay*y", i-1, "\n", sep="")
}
model<-paste(model, "x0~~start(", varx0y0, ")*y0 + varx0y0*y0\n", sep="")
model<-paste(model, "x0~~start(", varx0ys, ")*ys + varx0ys*ys\n", sep="")
model<-paste(model, "y0~~start(", vary0xs, ")*xs + vary0xs*xs\n", sep="")
model<-paste(model, "xs~~start(", varxsys, ")*ys + varxsys*ys\n", sep="")
model
}
sem.est <- function(model, data){
temp.res <- sem(model=model, data=data)
label <- temp.res@ParTable$label
c(temp.res@ParTable$est[label!=""], temp.res@ParTable$se[label!=""])
}
## do it once for a given N and T
fit.once <- function(N, T){
## generate data
pop.model.T <- pop.model(T)
pop.model.T.res <- sem(pop.model.T, do.fit=FALSE)
pop.model.T.cov <- inspect(pop.model.T.res, "cov.ov")
pop.model.T.mean <- inspect(pop.model.T.res, "mean.ov")
ynames <- row.names(pop.model.T.cov)
gen.data <- lapply(1:R, mvrnorm, n=N, mu=pop.model.T.mean, Sigma=pop.model.T.cov)
## conduct the analysis
fit.model.T <- fit.model(T)
fit.res <- lapply(gen.data, sem.est, model=fit.model.T)
## run once to get the model information
model.info.res <- sem(fit.model.T, gen.data[[1]])
label <- model.info.res@ParTable$label
label <- label[label!=""]
label.unique <- !duplicated(label)
label <- label[label.unique]
npar <- length(label)
## get the parameter estimates, sd, se, power, CI of power
all.res <- do.call(rbind, fit.res)
all.res <- all.res[, c(label.unique, label.unique)]
mc.est <- colMeans(all.res[, 1:npar])
mc.se <- apply(all.res[, (npar+1):(2*npar)], 2, mean)
mc.sd <- apply(all.res[, 1:npar], 2, sd)
mc.z.score <- all.res[, 1:npar]/all.res[, (npar+1):(2*npar)]
mc.z.score.check <- abs(mc.z.score) >= qnorm(1-alpha/2)
mc.power <- colMeans(mc.z.score.check)
pop.par <- unlist(lapply(label, function(x){eval(parse(text=x))}))
mc.output <- cbind(pop.par, mc.est, mc.sd, mc.se, mc.power, N, T)
row.names(mc.output) <- label
label.sort <- sort(label)
mc.output[label.sort, ]
}
if (length(N)>1 | length(T)>1){
all.output <- list()
for (i in N){
for (j in T){
all.output [[paste('N',i,'-T',j, sep="")]]<- fit.once(i,j)
}
}
}else{
all.output <- fit.once(N,T)
}
class(all.output) <- "lcs.power"
all.output
}
|
#' robis: R client for the OBIS API
#'
#' Work in progress
#'
#' @docType package
#' @name robis
#' @import dplyr jsonlite leaflet ggplot2 tidyr tibble httr mapedit sf
#' @importFrom rlang .data
NULL
|
/R/robis.R
|
permissive
|
howlerMoonkey/robis
|
R
| false
| false
| 200
|
r
|
#' robis: R client for the OBIS API
#'
#' Work in progress
#'
#' @docType package
#' @name robis
#' @import dplyr jsonlite leaflet ggplot2 tidyr tibble httr mapedit sf
#' @importFrom rlang .data
NULL
|
#Getting and Cleaning Data Course Project
#Script for
# 0 - Download and create the working directory.
# 1 - Merges the training and the test sets to create one data set.
# 2 - Extracts only the measurements on the mean and standard deviation for each measurement.
# 3 - Uses descriptive activity names to name the activitys in the data set
# 4 - Appropriately labels the data set with descriptive variable names.
# 5 - From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
# Download files and set directory
download <- function()
{
DataDirectory <-"D:/CleaningDataProject"
zipURL<-"https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
filename<-"project.zip"
#Testing if directory exists
if (file.exists(DataDirectory))
{
setwd(DataDirectory)
} else {
dir.create(file.path(DataDirectory))
dir.create(file.path(paste0(DataDirectory,"/Output")))
setwd(DataDirectory)
}
#Testing if the file was dowloaded
if (!file.exists(paste0(DataDirectory,"/",filename)))
{
download.file(zipURL,destfile=filename, method="libcurl")
unzip(filename)
}
download<-paste0(DataDirectory,"/UCI HAR Dataset")
}
# Function to Start the analysis
go <- function()
{
#Question 1 - Merges the training and the test sets to create one data set.
#Defining Data Directory
setwd(download())
#Load activites label
activities_labels<-read.table("./activity_labels.txt",col.names=c("ActivityId","ActivityLabel"))
#Load features
features<-read.table("./features.txt",col.names=c("id","featuresfunc"))
#MAKE TRAIN DATASET WITH ALLSUBJECT AND DATA COLLECTED
#Load Subjects that participate on Train
subject_train<-read.table("./train/subject_train.txt",col.names=c("SubjectId"))
#Load data set of collected data
X_train<-read.table("./train/x_train.txt",col.names=features$featuresfunc)
#Load activitys of each collectad data
Y_train<-read.table("./train/y_train.txt",col.names=c("ActivityId"))
#check if the number of rows is equal at the datasets to bind columns.
if (nrow(subject_train) == nrow(X_train) & nrow(Y_train) == nrow(Y_train))
{
#bind activitys description and id coluns to data collected
train_ds<-cbind(Y_train,X_train)
#bind previous dataset the column to identify the subject that participated
train_ds<-cbind(subject_train,train_ds)
}
#rm(X_train,Y_train,subject_train)
#MAKE TEST DATASET WITH ALLSUBJECT AND DATA COLLECTED
#Load Subjects that participate on Train
subject_test<-read.table("./test/subject_test.txt",col.names=c("SubjectId"))
#Load data set of collected data
X_test<-read.table("./test/x_test.txt",col.names=features$featuresfunc)
#Load activitys of each collectad data
Y_test<-read.table("./test/y_test.txt",col.names=c("ActivityId"))
#check if the number of rows is equal at the datasets to bind columns.
if (nrow(subject_test) == nrow(X_test) & nrow(Y_test) == nrow(Y_test))
{
#bind activitys description and id columns to data collected
test_ds<-cbind(Y_test,X_test)
#bind previous dataset the column to identify the subject that participated
test_ds<-cbind(subject_test,test_ds)
}
#rm(X_test,Y_test,subject_test,features)
#merge Train and Test data in one dataset - answer for question 1
q1dataset <- rbind(train_ds,test_ds)
#carregando pacote dplyr
library(dplyr)
#Question 2 - Extracts only the measurements on the mean and standard deviation for each measurement.
#Selecting only the columns that will be needed for questions 3,4 and 5.
#Answer for question 2
q2dataset<-select(q1dataset,SubjectId,ActivityId,contains("mean"),contains("std"))
#Question 3 - Uses descriptive activity names to name the activities in the data set
#Answer for question 3 with activites descriptions.
q2dataset<-merge(q2dataset,activities_labels,by.x="ActivityId",by.y="ActivityId")
#Question 4 - Appropriately labels the data set with descriptive variable names.
#Acording to features_info.txt we have some abreviations that can be changed
# acc - Accelerometer, gyro - Gyroscope, prefix t - Time, prefix f - Frequency, mag - Magnitude
#I took the ".", "-" and "()" out of the name of the columns names,
# and captalize first letters of the functions mean and std.
names(q2dataset) <-gsub("Acc","Accelerometer",names(q2dataset))
names(q2dataset) <-gsub("Gyro","Gyroscope",names(q2dataset))
names(q2dataset) <-gsub("^t","Time",names(q2dataset))
names(q2dataset) <-gsub("^f","Frequency",names(q2dataset))
names(q2dataset) <-gsub("Mag","Magnitude",names(q2dataset))
names(q2dataset) <-gsub("-mean()","Mean",names(q2dataset))
names(q2dataset) <-gsub("-std()","STD",names(q2dataset))
names(q2dataset) <-gsub("-freq()","Freq",names(q2dataset))
names(q2dataset) <-gsub("tBody","TimeBody",names(q2dataset))
names(q2dataset) <-gsub("\\.","",names(q2dataset))
names(q2dataset) <-gsub("std","Std",names(q2dataset))
names(q2dataset) <-gsub("mean","Mean",names(q2dataset))
#Question 5 - From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
#Grouping information by activity and subject, and
tidy_dataset<-group_by(select(q2dataset,-ActivityId),ActivityLabel,SubjectId)
#making the average for each variable of the dataset
tidy_dataset<-summarise_all(tidy_dataset,"mean")
rm(q1dataset,q2dataset,activities_labels)
#check if the final dataset has 180 rows to generate the file
#it must have 180 rows that is: 30 subjects and times 6 activities = 180 combinations.
if (nrow(tidy_dataset)==180)
{
#generate txt file with the tidy dataset to upload to coursera
write.table(tidy_dataset,"D:/CleaningDataProject/Output/tidy_dataset.txt", row.name=FALSE)
}
rm(tidy_dataset)
}
|
/run_analysis.R
|
no_license
|
t2019br/CleaningDataCourseProject
|
R
| false
| false
| 7,050
|
r
|
#Getting and Cleaning Data Course Project
#Script for
# 0 - Download and create the working directory.
# 1 - Merges the training and the test sets to create one data set.
# 2 - Extracts only the measurements on the mean and standard deviation for each measurement.
# 3 - Uses descriptive activity names to name the activitys in the data set
# 4 - Appropriately labels the data set with descriptive variable names.
# 5 - From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
# Download files and set directory
download <- function()
{
DataDirectory <-"D:/CleaningDataProject"
zipURL<-"https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
filename<-"project.zip"
#Testing if directory exists
if (file.exists(DataDirectory))
{
setwd(DataDirectory)
} else {
dir.create(file.path(DataDirectory))
dir.create(file.path(paste0(DataDirectory,"/Output")))
setwd(DataDirectory)
}
#Testing if the file was dowloaded
if (!file.exists(paste0(DataDirectory,"/",filename)))
{
download.file(zipURL,destfile=filename, method="libcurl")
unzip(filename)
}
download<-paste0(DataDirectory,"/UCI HAR Dataset")
}
# Function to Start the analysis
go <- function()
{
#Question 1 - Merges the training and the test sets to create one data set.
#Defining Data Directory
setwd(download())
#Load activites label
activities_labels<-read.table("./activity_labels.txt",col.names=c("ActivityId","ActivityLabel"))
#Load features
features<-read.table("./features.txt",col.names=c("id","featuresfunc"))
#MAKE TRAIN DATASET WITH ALLSUBJECT AND DATA COLLECTED
#Load Subjects that participate on Train
subject_train<-read.table("./train/subject_train.txt",col.names=c("SubjectId"))
#Load data set of collected data
X_train<-read.table("./train/x_train.txt",col.names=features$featuresfunc)
#Load activitys of each collectad data
Y_train<-read.table("./train/y_train.txt",col.names=c("ActivityId"))
#check if the number of rows is equal at the datasets to bind columns.
if (nrow(subject_train) == nrow(X_train) & nrow(Y_train) == nrow(Y_train))
{
#bind activitys description and id coluns to data collected
train_ds<-cbind(Y_train,X_train)
#bind previous dataset the column to identify the subject that participated
train_ds<-cbind(subject_train,train_ds)
}
#rm(X_train,Y_train,subject_train)
#MAKE TEST DATASET WITH ALLSUBJECT AND DATA COLLECTED
#Load Subjects that participate on Train
subject_test<-read.table("./test/subject_test.txt",col.names=c("SubjectId"))
#Load data set of collected data
X_test<-read.table("./test/x_test.txt",col.names=features$featuresfunc)
#Load activitys of each collectad data
Y_test<-read.table("./test/y_test.txt",col.names=c("ActivityId"))
#check if the number of rows is equal at the datasets to bind columns.
if (nrow(subject_test) == nrow(X_test) & nrow(Y_test) == nrow(Y_test))
{
#bind activitys description and id columns to data collected
test_ds<-cbind(Y_test,X_test)
#bind previous dataset the column to identify the subject that participated
test_ds<-cbind(subject_test,test_ds)
}
#rm(X_test,Y_test,subject_test,features)
#merge Train and Test data in one dataset - answer for question 1
q1dataset <- rbind(train_ds,test_ds)
#carregando pacote dplyr
library(dplyr)
#Question 2 - Extracts only the measurements on the mean and standard deviation for each measurement.
#Selecting only the columns that will be needed for questions 3,4 and 5.
#Answer for question 2
q2dataset<-select(q1dataset,SubjectId,ActivityId,contains("mean"),contains("std"))
#Question 3 - Uses descriptive activity names to name the activities in the data set
#Answer for question 3 with activites descriptions.
q2dataset<-merge(q2dataset,activities_labels,by.x="ActivityId",by.y="ActivityId")
#Question 4 - Appropriately labels the data set with descriptive variable names.
#Acording to features_info.txt we have some abreviations that can be changed
# acc - Accelerometer, gyro - Gyroscope, prefix t - Time, prefix f - Frequency, mag - Magnitude
#I took the ".", "-" and "()" out of the name of the columns names,
# and captalize first letters of the functions mean and std.
names(q2dataset) <-gsub("Acc","Accelerometer",names(q2dataset))
names(q2dataset) <-gsub("Gyro","Gyroscope",names(q2dataset))
names(q2dataset) <-gsub("^t","Time",names(q2dataset))
names(q2dataset) <-gsub("^f","Frequency",names(q2dataset))
names(q2dataset) <-gsub("Mag","Magnitude",names(q2dataset))
names(q2dataset) <-gsub("-mean()","Mean",names(q2dataset))
names(q2dataset) <-gsub("-std()","STD",names(q2dataset))
names(q2dataset) <-gsub("-freq()","Freq",names(q2dataset))
names(q2dataset) <-gsub("tBody","TimeBody",names(q2dataset))
names(q2dataset) <-gsub("\\.","",names(q2dataset))
names(q2dataset) <-gsub("std","Std",names(q2dataset))
names(q2dataset) <-gsub("mean","Mean",names(q2dataset))
#Question 5 - From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
#Grouping information by activity and subject, and
tidy_dataset<-group_by(select(q2dataset,-ActivityId),ActivityLabel,SubjectId)
#making the average for each variable of the dataset
tidy_dataset<-summarise_all(tidy_dataset,"mean")
rm(q1dataset,q2dataset,activities_labels)
#check if the final dataset has 180 rows to generate the file
#it must have 180 rows that is: 30 subjects and times 6 activities = 180 combinations.
if (nrow(tidy_dataset)==180)
{
#generate txt file with the tidy dataset to upload to coursera
write.table(tidy_dataset,"D:/CleaningDataProject/Output/tidy_dataset.txt", row.name=FALSE)
}
rm(tidy_dataset)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helpers.r
\name{get_interactions}
\alias{get_interactions}
\title{Get interactions}
\usage{
get_interactions(net1, net2)
}
\arguments{
\item{net1, net2}{Two data frames of interactions, for example constructed
with \code{\link[bootdissim]{reshape_net}}. Each data frame must have only
two columns/variables, e.g. first column for plant names, second column for
insect names.}
}
\value{
Returns a list of two character vectors.
}
\description{
Helper function which constructs two vectors of species
interactions (e.g. plant - pollinator) for two network interactions
provided as data frames.
}
\examples{
library(bootdissim)
library(bipartite)
net1 <- reshape_net(vazarr, seed = 1)
net2 <- reshape_net(vazcer, seed = 1)
vect <- get_interactions(net1, net2)
}
|
/man/get_interactions.Rd
|
permissive
|
valentinitnelav/bootdissim
|
R
| false
| true
| 841
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helpers.r
\name{get_interactions}
\alias{get_interactions}
\title{Get interactions}
\usage{
get_interactions(net1, net2)
}
\arguments{
\item{net1, net2}{Two data frames of interactions, for example constructed
with \code{\link[bootdissim]{reshape_net}}. Each data frame must have only
two columns/variables, e.g. first column for plant names, second column for
insect names.}
}
\value{
Returns a list of two character vectors.
}
\description{
Helper function which constructs two vectors of species
interactions (e.g. plant - pollinator) for two network interactions
provided as data frames.
}
\examples{
library(bootdissim)
library(bipartite)
net1 <- reshape_net(vazarr, seed = 1)
net2 <- reshape_net(vazcer, seed = 1)
vect <- get_interactions(net1, net2)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Utils_adduct_rules.R
\name{adduct_rules_neg}
\alias{adduct_rules_neg}
\title{Create list with all adduct calculation rules (negative ion model)
This function returns a list with rules for the calculation of adducts. It is required for the calculation of adduct m/z values from exact masses and the other way round. This list contains all rules for the negative ionization mode.}
\usage{
adduct_rules_neg()
}
\description{
Create list with all adduct calculation rules (negative ion model)
This function returns a list with rules for the calculation of adducts. It is required for the calculation of adduct m/z values from exact masses and the other way round. This list contains all rules for the negative ionization mode.
}
\seealso{
\code{\link{adduct_rules}}
\code{\link{adduct_rules_pos}}
\code{\link{get_adduct_names}}
}
\author{
Michael Witting, \email{michael.witting@helmholtz-muenchen.de}
}
|
/man/adduct_rules_neg.Rd
|
no_license
|
michaelwitting/lipidomicsUtils
|
R
| false
| true
| 982
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Utils_adduct_rules.R
\name{adduct_rules_neg}
\alias{adduct_rules_neg}
\title{Create list with all adduct calculation rules (negative ion model)
This function returns a list with rules for the calculation of adducts. It is required for the calculation of adduct m/z values from exact masses and the other way round. This list contains all rules for the negative ionization mode.}
\usage{
adduct_rules_neg()
}
\description{
Create list with all adduct calculation rules (negative ion model)
This function returns a list with rules for the calculation of adducts. It is required for the calculation of adduct m/z values from exact masses and the other way round. This list contains all rules for the negative ionization mode.
}
\seealso{
\code{\link{adduct_rules}}
\code{\link{adduct_rules_pos}}
\code{\link{get_adduct_names}}
}
\author{
Michael Witting, \email{michael.witting@helmholtz-muenchen.de}
}
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{S3_get_acl}
\alias{S3_get_acl}
\title{Get ACL of bucket location}
\usage{
S3_get_acl(bucketName, key, filename = 0L)
}
\arguments{
\item{bucketName}{The name of the bucket}
\item{key}{The location of the folder/file.}
\item{filename}{The location you want to store the file.}
}
\value{
Places ACL information into the file you chose and
returns a 1
}
\description{
Get ACL of bucket location
}
\details{
This function gets the ACL of a file and puts all
the information into a file. View the AWS documentation on what ACL is \href{http://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html}{here.}
}
|
/man/S3_get_acl.Rd
|
no_license
|
leohklee/RS3
|
R
| false
| false
| 668
|
rd
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{S3_get_acl}
\alias{S3_get_acl}
\title{Get ACL of bucket location}
\usage{
S3_get_acl(bucketName, key, filename = 0L)
}
\arguments{
\item{bucketName}{The name of the bucket}
\item{key}{The location of the folder/file.}
\item{filename}{The location you want to store the file.}
}
\value{
Places ACL information into the file you chose and
returns a 1
}
\description{
Get ACL of bucket location
}
\details{
This function gets the ACL of a file and puts all
the information into a file. View the AWS documentation on what ACL is \href{http://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html}{here.}
}
|
/Covid19/World_covid/World_covid.R
|
no_license
|
orlindowagner/R-Studies
|
R
| false
| false
| 3,269
|
r
| ||
"rimtrf" <-
function(x,n=nrow(x),intch=.dFvGet()$ith,tau=.dFvGet()$tua) {
np <- ncol(x)
mdx <- nrow(x)
if (missing(x)) x <- matrix(single(1),mdx,np)
k <- integer(1)
sf <- single(np)
sg <- single(np)
sh <- single(np)
ip <- integer(np)
f.res <- .Fortran("rimtrf",
x=to.single(x),
n=to.integer(n),
np=to.integer(np),
mdx=to.integer(mdx),
intch=to.integer(intch),
tau=to.single(tau),
k=to.integer(k),
sf=to.single(sf),
sg=to.single(sg),
sh=to.single(sh),
ip=to.integer(ip))
list(x=f.res$x,k=f.res$k,sf=f.res$sf,sg=f.res$sg,sh=f.res$sh,ip=f.res$ip)
}
|
/robeth/R/rimtrf.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 570
|
r
|
"rimtrf" <-
function(x,n=nrow(x),intch=.dFvGet()$ith,tau=.dFvGet()$tua) {
np <- ncol(x)
mdx <- nrow(x)
if (missing(x)) x <- matrix(single(1),mdx,np)
k <- integer(1)
sf <- single(np)
sg <- single(np)
sh <- single(np)
ip <- integer(np)
f.res <- .Fortran("rimtrf",
x=to.single(x),
n=to.integer(n),
np=to.integer(np),
mdx=to.integer(mdx),
intch=to.integer(intch),
tau=to.single(tau),
k=to.integer(k),
sf=to.single(sf),
sg=to.single(sg),
sh=to.single(sh),
ip=to.integer(ip))
list(x=f.res$x,k=f.res$k,sf=f.res$sf,sg=f.res$sg,sh=f.res$sh,ip=f.res$ip)
}
|
\name{poison.text}
\alias{poison.text}
\docType{data}
\title{Poison}
\description{
The data used here refer to a survey carried out on a sample of children of primary school
who suffered from food poisoning. They were asked about their symptoms and about what they ate.
}
\usage{data(poison)}
\format{
A data frame with 55 rows and 3 columns (the sex, if they are sick or not, and a textual variable with their symptom and what they eat).
}
\examples{
data(poison.text)
res.text <- textual(poison.text, num.text = 3, contingence.by = c(1,2))
## Contingence table for the sex variable, the sich variable and the couple
## of variable sick-sex
res.text2 <- textual(poison.text, num.text = 3, contingence.by = list(1,2,c(1,2)))
}
\keyword{datasets}
|
/man/poison.text.Rd
|
no_license
|
husson/FactoMineR
|
R
| false
| false
| 754
|
rd
|
\name{poison.text}
\alias{poison.text}
\docType{data}
\title{Poison}
\description{
The data used here refer to a survey carried out on a sample of children of primary school
who suffered from food poisoning. They were asked about their symptoms and about what they ate.
}
\usage{data(poison)}
\format{
A data frame with 55 rows and 3 columns (the sex, if they are sick or not, and a textual variable with their symptom and what they eat).
}
\examples{
data(poison.text)
res.text <- textual(poison.text, num.text = 3, contingence.by = c(1,2))
## Contingence table for the sex variable, the sich variable and the couple
## of variable sick-sex
res.text2 <- textual(poison.text, num.text = 3, contingence.by = list(1,2,c(1,2)))
}
\keyword{datasets}
|
.bme_env = new.env(parent=emptyenv())
.bme_env$results = NULL
|
/R/zzz.R
|
no_license
|
Bhanditz/benchmarkmeData
|
R
| false
| false
| 62
|
r
|
.bme_env = new.env(parent=emptyenv())
.bme_env$results = NULL
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/reagent_barplot.r
\name{reagent_barplot}
\alias{reagent_barplot}
\title{Plot the reagent means as a barplot}
\usage{
reagent_barplot(result, includeIntervals = F)
}
\arguments{
\item{result}{The BAMBAResult object.}
\item{includeIntervals}{A boolean indicating whether to
include sampling intervals for the reagent means.
Defaults to \code{FALSE}.}
}
\value{
A ggplot barplot object.
}
\description{
This function plots the reagent means from a model fit as a
barplot.
}
|
/man/reagent_barplot.Rd
|
no_license
|
RGLab/BAMBA
|
R
| false
| true
| 552
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/reagent_barplot.r
\name{reagent_barplot}
\alias{reagent_barplot}
\title{Plot the reagent means as a barplot}
\usage{
reagent_barplot(result, includeIntervals = F)
}
\arguments{
\item{result}{The BAMBAResult object.}
\item{includeIntervals}{A boolean indicating whether to
include sampling intervals for the reagent means.
Defaults to \code{FALSE}.}
}
\value{
A ggplot barplot object.
}
\description{
This function plots the reagent means from a model fit as a
barplot.
}
|
rmCheckedTF <-
function(prefix = "rcbValue", envir = KTSEnv) {
rsel <- rep(FALSE, KTSEnv$dSList$nRM)
for (ind in 1:KTSEnv$dSList$nRM) {
rcbValueind <- paste0(prefix, ind)
if (tcltk::tclvalue(get(rcbValueind, envir = envir)) == "1") {
rsel[ind] = TRUE
}
rm(rcbValueind)
}
rsel
}
|
/R/rmCheckedTF.R
|
no_license
|
cran/KarsTS
|
R
| false
| false
| 350
|
r
|
rmCheckedTF <-
function(prefix = "rcbValue", envir = KTSEnv) {
rsel <- rep(FALSE, KTSEnv$dSList$nRM)
for (ind in 1:KTSEnv$dSList$nRM) {
rcbValueind <- paste0(prefix, ind)
if (tcltk::tclvalue(get(rcbValueind, envir = envir)) == "1") {
rsel[ind] = TRUE
}
rm(rcbValueind)
}
rsel
}
|
attach(ToyotaCorolla)
ToyotaCorolla <- read.csv("E:/Datasets/Multi linear Regression/ToyotaCorolla.csv")
View(ToyotaCorolla)
install.packages("e1071")
library(e1071)
install.packages("ggstatplot")
library(ggstatplot)
library(psych)
ToyotaCorolla<-ToyotaCorolla[,-c(1:2)]
View(ToyotaCorolla)
str(ToyotaCorolla)
knitr::kable(summary(ToyotaCorolla))
summary(ToyotaCorolla)
describe(ToyotaCorolla)
View(ToyotaCorolla)
unique_fuel <- unique(ToyotaCorolla$Fuel_Type)
unique(ToyotaCorolla$Fuel_Type)
ToyotaCorolla$Fuel_Type = factor(ToyotaCorolla$Fuel_Type,
levels = c(unique_fuel),
labels = c(0,1,2))
unique_Color <- unique(ToyotaCorolla$Color)
unique(ToyotaCorolla$Color)
ToyotaCorolla$Color = factor(ToyotaCorolla$Color,
levels = c(unique_Color),
labels = c(0,1,2,3,4,5,6,7,8,9))
ToyotaCorolla<-ToyotaCorolla[c("Price","Age_08_04","KM","HP","cc","Doors","Gears","Quarterly_Tax","Weight")]
str(ToyotaCorolla)
boxplot(ToyotaCorolla)$out
Q <- quantile(ToyotaCorolla$KM, probs=c(.25, .75), na.rm = FALSE)
iqr <- IQR(ToyotaCorolla$KM)
up <- Q[2]+1.5*iqr # Upper Range
low<- Q[1]-1.5*iqr # Lower Range
Q1 <- quantile(ToyotaCorolla$Price, probs=c(.25, .75), na.rm = FALSE)
iqr1 <- IQR(ToyotaCorolla$Price)
up1 <- Q1[2]+1.5*iqr1 # Upper Range
low1<- Q1[1]-1.5*iqr1 # Lower Range
eliminated1<- subset(ToyotaCorolla, ToyotaCorolla$Price > (low1)& ToyotaCorolla$Price < (up1))
eliminated<- subset(eliminated1, eliminated1$KM > (low) & eliminated1$KM < (up))
boxplot(eliminated)$out
model <- lm(Price ~ Age_08_04+KM+HP+cc+Doors+Gears+Quarterly_Tax+Weight, data = eliminated)
summary(model)
#model <- lm(Price ~ poly(Age_08_04,2)+KM+poly(HP,1)+cc+poly(Doors,1)+Gears+Quarterly_Tax+Weight, data = eliminated[-c(77,480,105,272,104,270),])
#summary(model)
graphics.off()
par(mar=c(1,1,1,1))
graphics.off()
par("mar")
par(mar=c(1,1,1,1))
pairs(eliminated[,-c(6,9)])
cor(eliminated)
install.packages("car")
library(car)
car::vif(model)
library(MASS)
stepAIC(model)
plot(model)
residualPlots(model)
avPlots(model)
qqPlot(model)
influenceIndexPlot(model)
View(eliminated)
model1<-lm(Price~Age_08_04+KM+HP+cc+Doors+Gears+Quarterly_Tax+poly(Weight,2),data=eliminated[-c(193,222,77,106,394,602,269,476,268,473,471,267,961),])
summary(model1)
car::vif(model1)
library(MASS)
stepAIC(model1)
?residualPlots
plot(model1)
residualPlots(model1)
avPlots(model1)
qqPlot(model1)
influenceIndexPlot(model1)
|
/ToyottaMLR.R
|
no_license
|
Ashmita20/Data-Science-R-files
|
R
| false
| false
| 2,580
|
r
|
attach(ToyotaCorolla)
ToyotaCorolla <- read.csv("E:/Datasets/Multi linear Regression/ToyotaCorolla.csv")
View(ToyotaCorolla)
install.packages("e1071")
library(e1071)
install.packages("ggstatplot")
library(ggstatplot)
library(psych)
ToyotaCorolla<-ToyotaCorolla[,-c(1:2)]
View(ToyotaCorolla)
str(ToyotaCorolla)
knitr::kable(summary(ToyotaCorolla))
summary(ToyotaCorolla)
describe(ToyotaCorolla)
View(ToyotaCorolla)
unique_fuel <- unique(ToyotaCorolla$Fuel_Type)
unique(ToyotaCorolla$Fuel_Type)
ToyotaCorolla$Fuel_Type = factor(ToyotaCorolla$Fuel_Type,
levels = c(unique_fuel),
labels = c(0,1,2))
unique_Color <- unique(ToyotaCorolla$Color)
unique(ToyotaCorolla$Color)
ToyotaCorolla$Color = factor(ToyotaCorolla$Color,
levels = c(unique_Color),
labels = c(0,1,2,3,4,5,6,7,8,9))
ToyotaCorolla<-ToyotaCorolla[c("Price","Age_08_04","KM","HP","cc","Doors","Gears","Quarterly_Tax","Weight")]
str(ToyotaCorolla)
boxplot(ToyotaCorolla)$out
Q <- quantile(ToyotaCorolla$KM, probs=c(.25, .75), na.rm = FALSE)
iqr <- IQR(ToyotaCorolla$KM)
up <- Q[2]+1.5*iqr # Upper Range
low<- Q[1]-1.5*iqr # Lower Range
Q1 <- quantile(ToyotaCorolla$Price, probs=c(.25, .75), na.rm = FALSE)
iqr1 <- IQR(ToyotaCorolla$Price)
up1 <- Q1[2]+1.5*iqr1 # Upper Range
low1<- Q1[1]-1.5*iqr1 # Lower Range
eliminated1<- subset(ToyotaCorolla, ToyotaCorolla$Price > (low1)& ToyotaCorolla$Price < (up1))
eliminated<- subset(eliminated1, eliminated1$KM > (low) & eliminated1$KM < (up))
boxplot(eliminated)$out
model <- lm(Price ~ Age_08_04+KM+HP+cc+Doors+Gears+Quarterly_Tax+Weight, data = eliminated)
summary(model)
#model <- lm(Price ~ poly(Age_08_04,2)+KM+poly(HP,1)+cc+poly(Doors,1)+Gears+Quarterly_Tax+Weight, data = eliminated[-c(77,480,105,272,104,270),])
#summary(model)
graphics.off()
par(mar=c(1,1,1,1))
graphics.off()
par("mar")
par(mar=c(1,1,1,1))
pairs(eliminated[,-c(6,9)])
cor(eliminated)
install.packages("car")
library(car)
car::vif(model)
library(MASS)
stepAIC(model)
plot(model)
residualPlots(model)
avPlots(model)
qqPlot(model)
influenceIndexPlot(model)
View(eliminated)
model1<-lm(Price~Age_08_04+KM+HP+cc+Doors+Gears+Quarterly_Tax+poly(Weight,2),data=eliminated[-c(193,222,77,106,394,602,269,476,268,473,471,267,961),])
summary(model1)
car::vif(model1)
library(MASS)
stepAIC(model1)
?residualPlots
plot(model1)
residualPlots(model1)
avPlots(model1)
qqPlot(model1)
influenceIndexPlot(model1)
|
### ========================================================
### Práctica con el debuging de funciones (31-10-2017)
### ========================================================
ejemplo1 <- function(x)
{
print("hola")
}
debug(ejemplo1) # iniciamos el proceso de debuging
ejemplo1() # la ejecutamos
# Q para salir del debugging a lo bruto
# undebug(funcion) # para salir de 'manera controlada'
# función temporal timpo lamba:
# function(x) is.factor(x) | is.character(x)
|
/Computación en Estadística y Optimización/Clase 11/Debug.R
|
no_license
|
MGijon/Learning-R
|
R
| false
| false
| 478
|
r
|
### ========================================================
### Práctica con el debuging de funciones (31-10-2017)
### ========================================================
ejemplo1 <- function(x)
{
print("hola")
}
debug(ejemplo1) # iniciamos el proceso de debuging
ejemplo1() # la ejecutamos
# Q para salir del debugging a lo bruto
# undebug(funcion) # para salir de 'manera controlada'
# función temporal timpo lamba:
# function(x) is.factor(x) | is.character(x)
|
# ###################
# SpringRK4.R
#
library(rODE)
setClass("SpringRK4", slots = c(
# we should improve this by letting the user entered these values
K = "numeric",
mu = "numeric",
mass = "numeric",
state = "numeric",
odeSolver = "RK4"
),
prototype = prototype(
K = 1,
state = c(0, 0, 0)
),
contains = c("ODE")
)
setMethod("initialize", "SpringRK4", function(.Object) {
# we should improve this by letting the user entered these values
.Object@K <- 1.0
.Object@mu <- 1.5
.Object@mass <- 20
.Object@odeSolver <- RK4(.Object)
return(.Object)
})
setMethod("setStepSize", signature("SpringRK4"), function(object, dt, ...) {
# use explicit parameter declaration
# setStepSize generic may use two different step parameters: stepSize and dt
object@odeSolver <- setStepSize(object@odeSolver, dt)
object
})
setMethod("step", "SpringRK4", function(object) {
object@odeSolver <- step(object@odeSolver)
object@rate <- object@odeSolver@ode@rate
object@state <- object@odeSolver@ode@state
object
})
setMethod("setState", "SpringRK4", function(object, theta, thetaDot) {
object@state[1] <- theta # angle
object@state[2] <- thetaDot # derivative of the angle
# state[3] is time
object@odeSolver@ode@state <- object@state # set state on solver
object
})
setMethod("getState", "SpringRK4", function(object) {
object@state
})
setMethod("getRate", "SpringRK4", function(object, state, ...) {
# enter the derivatives here
object@rate[1] <- state[2] # rate of change of angle # diff 11
object@rate[2] <- -object@mu / object@mass * state[2] - object@K * state[1]
object@rate[3] <- 1 # rate of change of time, dt/dt
object@rate
})
# constructor
SpringRK4 <- function() new("SpringRK4")
# main
SpringRK4App <- function(verbose = FALSE) {
ode <- new("ODE")
spring <- SpringRK4()
dt <- 0.1
theta <- 0
thetaDot <- -0.2
tmax <- 22
spring@state[3] <- 0 # set time to zero, t = 0
spring <- setState(spring, theta, thetaDot)
spring <- setStepSize(spring, dt = dt) # using stepSize in RK4
spring@odeSolver <- setStepSize(spring@odeSolver, dt) # set new step size
rowvec <- vector("list")
i <- 1
while (spring@state[3] <= tmax) {
rowvec[[i]] <- list(t = spring@state[3], # angle
y1 = spring@state[1], # derivative of the angle
y2 = spring@state[2]) # time
if (verbose)
cat(sprintf("time=%12f state1=%12f state2=%12f \n",
spring@state[3], spring@state[1], spring@state[2]))
i <- i + 1
spring <- step(spring)
}
DT <- data.table::rbindlist(rowvec)
return(DT)
}
DT <- SpringRK4App(TRUE)
# multiplot
plot(DT)
# plot lines for time, y1, y2
plot(y1~t, data = DT, type ="l", col = "blue", pch = 1)
lines(y2~t, data = DT, col = "green", pch = 2)
legend("topright", legend=c("y1","y2"), pch=c(1,2,3),
col = c("blue", "green", "red"))
# ggplot
library(ggplot2)
library(dplyr)
library(tidyr)
DTplot <- DT %>% gather(key, value, -t)
g <- ggplot(DTplot, mapping = aes(x = t, y = value, color = key))
g <- g + geom_line()
print(g)
|
/SpringRK4.R
|
no_license
|
AlfonsoRReyes/rODEExamples
|
R
| false
| false
| 3,445
|
r
|
# ###################
# SpringRK4.R
#
library(rODE)
setClass("SpringRK4", slots = c(
# we should improve this by letting the user entered these values
K = "numeric",
mu = "numeric",
mass = "numeric",
state = "numeric",
odeSolver = "RK4"
),
prototype = prototype(
K = 1,
state = c(0, 0, 0)
),
contains = c("ODE")
)
setMethod("initialize", "SpringRK4", function(.Object) {
# we should improve this by letting the user entered these values
.Object@K <- 1.0
.Object@mu <- 1.5
.Object@mass <- 20
.Object@odeSolver <- RK4(.Object)
return(.Object)
})
setMethod("setStepSize", signature("SpringRK4"), function(object, dt, ...) {
# use explicit parameter declaration
# setStepSize generic may use two different step parameters: stepSize and dt
object@odeSolver <- setStepSize(object@odeSolver, dt)
object
})
setMethod("step", "SpringRK4", function(object) {
object@odeSolver <- step(object@odeSolver)
object@rate <- object@odeSolver@ode@rate
object@state <- object@odeSolver@ode@state
object
})
setMethod("setState", "SpringRK4", function(object, theta, thetaDot) {
object@state[1] <- theta # angle
object@state[2] <- thetaDot # derivative of the angle
# state[3] is time
object@odeSolver@ode@state <- object@state # set state on solver
object
})
setMethod("getState", "SpringRK4", function(object) {
object@state
})
setMethod("getRate", "SpringRK4", function(object, state, ...) {
# enter the derivatives here
object@rate[1] <- state[2] # rate of change of angle # diff 11
object@rate[2] <- -object@mu / object@mass * state[2] - object@K * state[1]
object@rate[3] <- 1 # rate of change of time, dt/dt
object@rate
})
# constructor
SpringRK4 <- function() new("SpringRK4")
# main
SpringRK4App <- function(verbose = FALSE) {
ode <- new("ODE")
spring <- SpringRK4()
dt <- 0.1
theta <- 0
thetaDot <- -0.2
tmax <- 22
spring@state[3] <- 0 # set time to zero, t = 0
spring <- setState(spring, theta, thetaDot)
spring <- setStepSize(spring, dt = dt) # using stepSize in RK4
spring@odeSolver <- setStepSize(spring@odeSolver, dt) # set new step size
rowvec <- vector("list")
i <- 1
while (spring@state[3] <= tmax) {
rowvec[[i]] <- list(t = spring@state[3], # angle
y1 = spring@state[1], # derivative of the angle
y2 = spring@state[2]) # time
if (verbose)
cat(sprintf("time=%12f state1=%12f state2=%12f \n",
spring@state[3], spring@state[1], spring@state[2]))
i <- i + 1
spring <- step(spring)
}
DT <- data.table::rbindlist(rowvec)
return(DT)
}
DT <- SpringRK4App(TRUE)
# multiplot
plot(DT)
# plot lines for time, y1, y2
plot(y1~t, data = DT, type ="l", col = "blue", pch = 1)
lines(y2~t, data = DT, col = "green", pch = 2)
legend("topright", legend=c("y1","y2"), pch=c(1,2,3),
col = c("blue", "green", "red"))
# ggplot
library(ggplot2)
library(dplyr)
library(tidyr)
DTplot <- DT %>% gather(key, value, -t)
g <- ggplot(DTplot, mapping = aes(x = t, y = value, color = key))
g <- g + geom_line()
print(g)
|
##################################
## Single Line Geocode Function ##
##################################
# The function takes:
# - one address at a time as one string (SingleLine)
# - token
# - allow to return Postal codes if a full street address match cannot be found (default is TRUE)
#
# The function returns:
# lon, lat - The primary x/y coordinates of the address returned by the geocoding service in WGS84
# score - The accuracy of the address match between 0 and 100.
# locName - The component locator used to return a particular match result
# status - Whether a batch geocode request results in a match (M), tie (T), or unmatch (U)
# matchAddr - Complete address returned for the geocode request.
# side - The side of the street where an address resides relative to the direction
# of feature digitization
# addressType - The match level for a geocode request. "PointAddress" is typically the
# most spatially accurate match level. "StreetAddress" differs from PointAddress
# because the house number is interpolated from a range of numbers. "StreetName" is similar,
# but without the house number.
geocodeSL <- function (address, token, postal = TRUE){
require(httr)
# Stanford geolocator
gserver <- "http://locator.stanford.edu/arcgis/rest/services/geocode/Composite_NorthAmerica/GeocodeServer/geocodeAddresses"
# template for SingleLine format
pref <- "{'records':[{'attributes':{'OBJECTID':1,'SingleLine':'"
suff <- "'}}]}"
# url
url <- URLencode(paste0(gserver, "?addresses=", pref, address, suff, "&token=", token, ifelse(postal, "&f=json", "&f=json&category=Address")))
# submit
rawdata <- GET(url)
# parse JSON and process result
res <- content(rawdata, "parsed", "application/json")
resdf <- with(res$locations[[1]], {data.frame(lon = as.numeric(location$x),
lat = as.numeric(location$y),
score = score,
locName = attributes$Loc_name,
status = attributes$Status,
matchAddr = attributes$Match_addr,
side = attributes$Side,
addressType = attributes$Addr_type)})
return(resdf)
}
#######################################
## Multi Line Batch Geocode Function ##
#######################################
# The function takes:
# - ID variable to identify records, must be numeric and should be unique
# - multiple addresses as vectors, separated into: Street, City, State, Zip
# - token
#
# It can take a maximum of 1000 addresses. If more, it returns an error.
#
# The function returns a data frame with the following fields:
# ID - Result ID can be used to join the output fields in the response to the attributes
# in the original address table.
# lon, lat - The primary x/y coordinates of the address returned by the geocoding service in WGS84
# score - The accuracy of the address match between 0 and 100.
# locName - The component locator used to return a particular match result
# status - Whether a batch geocode request results in a match (M), tie (T), or unmatch (U)
# matchAddr - Complete address returned for the geocode request.
# side - The side of the street where an address resides relative to the direction
# of feature digitization
# addressType - The match level for a geocode request. "PointAddress" is typically the
# most spatially accurate match level. "StreetAddress" differs from PointAddress
# because the house number is interpolated from a range of numbers. "StreetName" is similar,
# but without the house number.
geocodeML_batch <- function(id, street, city, state, zip, token){
require(httr)
require(rjson)
# check if we have more than 1000, if so stop.
if (length(id) > 1000){
print(paste("length is: ", length(id)))
stop("Can only process up to 1000 addresses at a time.")}
# check if id is numeric
if (!is.numeric(id)) {
stop("id variable needs to be numeric.")
}
# make data frame
adr_df <- data.frame(OBJECTID = id,
Street = street,
City = city,
State = state,
Zip = zip)
# make json
tmp_list <- apply(adr_df, 1, function(i) list(attributes = as.list(i)))
# need to coerce ID back to numeric
tmp_list <- lapply(tmp_list, function(i) { i$attributes$OBJECTID <- as.numeric(i$attributes$OBJECTID); i })
adr_json <- toJSON(list(records = tmp_list))
gserver <- "http://locator.stanford.edu/arcgis/rest/services/geocode/Composite_NorthAmerica/GeocodeServer/geocodeAddresses"
# submit
req <- POST(
url = gserver,
body = list(addresses = adr_json, f="json", token=token),
encode = "form")
#stop_for_status(req) # error check
# process and parse
res <- content(req, "parsed", "application/json")
resdfr <- data.frame()
for (i in seq_len(length(res$locations))){
d <- with(res$locations[[i]], {data.frame(ID = attributes$ResultID,
lon = as.numeric(location$x),
lat = as.numeric(location$y),
score = score,
locName = attributes$Loc_name,
status = attributes$Status,
matchAddr = attributes$Match_addr,
side = attributes$Side,
addressType = attributes$Addr_type)})
resdfr <- rbind(resdfr, d)
}
return(resdfr)
}
|
/SUL_gcFunctions.R
|
no_license
|
RoyalTS/ArcGIS_geocoding
|
R
| false
| false
| 5,983
|
r
|
##################################
## Single Line Geocode Function ##
##################################
# The function takes:
# - one address at a time as one string (SingleLine)
# - token
# - allow to return Postal codes if a full street address match cannot be found (default is TRUE)
#
# The function returns:
# lon, lat - The primary x/y coordinates of the address returned by the geocoding service in WGS84
# score - The accuracy of the address match between 0 and 100.
# locName - The component locator used to return a particular match result
# status - Whether a batch geocode request results in a match (M), tie (T), or unmatch (U)
# matchAddr - Complete address returned for the geocode request.
# side - The side of the street where an address resides relative to the direction
# of feature digitization
# addressType - The match level for a geocode request. "PointAddress" is typically the
# most spatially accurate match level. "StreetAddress" differs from PointAddress
# because the house number is interpolated from a range of numbers. "StreetName" is similar,
# but without the house number.
geocodeSL <- function (address, token, postal = TRUE){
require(httr)
# Stanford geolocator
gserver <- "http://locator.stanford.edu/arcgis/rest/services/geocode/Composite_NorthAmerica/GeocodeServer/geocodeAddresses"
# template for SingleLine format
pref <- "{'records':[{'attributes':{'OBJECTID':1,'SingleLine':'"
suff <- "'}}]}"
# url
url <- URLencode(paste0(gserver, "?addresses=", pref, address, suff, "&token=", token, ifelse(postal, "&f=json", "&f=json&category=Address")))
# submit
rawdata <- GET(url)
# parse JSON and process result
res <- content(rawdata, "parsed", "application/json")
resdf <- with(res$locations[[1]], {data.frame(lon = as.numeric(location$x),
lat = as.numeric(location$y),
score = score,
locName = attributes$Loc_name,
status = attributes$Status,
matchAddr = attributes$Match_addr,
side = attributes$Side,
addressType = attributes$Addr_type)})
return(resdf)
}
#######################################
## Multi Line Batch Geocode Function ##
#######################################
# The function takes:
# - ID variable to identify records, must be numeric and should be unique
# - multiple addresses as vectors, separated into: Street, City, State, Zip
# - token
#
# It can take a maximum of 1000 addresses. If more, it returns an error.
#
# The function returns a data frame with the following fields:
# ID - Result ID can be used to join the output fields in the response to the attributes
# in the original address table.
# lon, lat - The primary x/y coordinates of the address returned by the geocoding service in WGS84
# score - The accuracy of the address match between 0 and 100.
# locName - The component locator used to return a particular match result
# status - Whether a batch geocode request results in a match (M), tie (T), or unmatch (U)
# matchAddr - Complete address returned for the geocode request.
# side - The side of the street where an address resides relative to the direction
# of feature digitization
# addressType - The match level for a geocode request. "PointAddress" is typically the
# most spatially accurate match level. "StreetAddress" differs from PointAddress
# because the house number is interpolated from a range of numbers. "StreetName" is similar,
# but without the house number.
geocodeML_batch <- function(id, street, city, state, zip, token){
require(httr)
require(rjson)
# check if we have more than 1000, if so stop.
if (length(id) > 1000){
print(paste("length is: ", length(id)))
stop("Can only process up to 1000 addresses at a time.")}
# check if id is numeric
if (!is.numeric(id)) {
stop("id variable needs to be numeric.")
}
# make data frame
adr_df <- data.frame(OBJECTID = id,
Street = street,
City = city,
State = state,
Zip = zip)
# make json
tmp_list <- apply(adr_df, 1, function(i) list(attributes = as.list(i)))
# need to coerce ID back to numeric
tmp_list <- lapply(tmp_list, function(i) { i$attributes$OBJECTID <- as.numeric(i$attributes$OBJECTID); i })
adr_json <- toJSON(list(records = tmp_list))
gserver <- "http://locator.stanford.edu/arcgis/rest/services/geocode/Composite_NorthAmerica/GeocodeServer/geocodeAddresses"
# submit
req <- POST(
url = gserver,
body = list(addresses = adr_json, f="json", token=token),
encode = "form")
#stop_for_status(req) # error check
# process and parse
res <- content(req, "parsed", "application/json")
resdfr <- data.frame()
for (i in seq_len(length(res$locations))){
d <- with(res$locations[[i]], {data.frame(ID = attributes$ResultID,
lon = as.numeric(location$x),
lat = as.numeric(location$y),
score = score,
locName = attributes$Loc_name,
status = attributes$Status,
matchAddr = attributes$Match_addr,
side = attributes$Side,
addressType = attributes$Addr_type)})
resdfr <- rbind(resdfr, d)
}
return(resdfr)
}
|
#-----------------------------------------------------------------------------
# S1 Tab
tabItem(tabName = "s1_dow",
fluidRow(
# Include the line below in ui.R so you can send messages
tags$head(tags$script(HTML('Shiny.addCustomMessageHandler("jsCode",function(message) {eval(message.value);});'))),
#----------------------------------------------------------------------------------
# Processing Panel Sentinel-1
box(
# Title
title = "Processing Panel", status = "success", solidHeader= TRUE,
tags$h4("Sentinel-1 data download"),
hr(),
tags$b("1) Output directory"),
p("Note: A new folder named \"DATA\" will be created within the chosen Output directory.
Within this folder the downloaded data files will be stored and further sorted by satellite track and acquistion date."),
#div(style="display:inline-block",shinyDirButton('directory', 'Browse', 'Select a folder')),
#div(style="display:inline-block",verbatimTextOutput("project_dir")),
shinyDirButton('S1_dow_directory', 'Browse', 'Select a folder'),
br(),
br(),
verbatimTextOutput("S1_dow_project_dir"),
hr(),
tags$b("2) OST S1 inventory file"),
radioButtons("S1_DOWNFILE", "",
c("OST Inventory Shapefile (local/on server)" = "S1_AOI_shape_local",
"OST Inventory Shapefile (upload zipped archive)" = "S1_AOI_zip_upload")),
conditionalPanel(
"input.S1_DOWNFILE == 'S1_AOI_shape_local'",
p("Note: This browse should point to an OST inventory file created under the data inventory tab."),
br(),
shinyFilesButton("S1_dow_shapefile","Choose file","Choose one or more files",FALSE),
br(),
br(),
verbatimTextOutput("S1_dow_filepath")
),
conditionalPanel(
"input.S1_DOWNFILE == 'S1_AOI_zip_upload'",
fileInput('S1_zipfile_path', label = 'Browse',accept = c(".zip"))
),
hr(),
tags$b("3) Provide your NASA Earthdata username/password."),
p("If you are not in possess of a user account you can create one ",a(href = "https://urs.earthdata.nasa.gov/",target="_blank", "here"),"."),
br(),
textInput(inputId = "s1_asf_uname",
label = "Username",
value = "Type in your username"
),
passwordInput(inputId = "s1_asf_piwo",
label = "Password",
value = "Type in your password"
),
hr(),
# div(style="display:inline-block",actionButton("s1_kc_process", "Start processing")),
# div(style="display:inline-block",actionButton("s1_kc_abort", "Abort processing")),
actionButton("S1_download", "Start downloading"),
br(),
# "Command Line Syntax:",
textOutput("S1_down")
) # close box
)
)
|
/shiny/ui/S1_dow_tab_ui.R
|
permissive
|
imagingearth/opensarkit
|
R
| false
| false
| 3,210
|
r
|
#-----------------------------------------------------------------------------
# S1 Tab
tabItem(tabName = "s1_dow",
fluidRow(
# Include the line below in ui.R so you can send messages
tags$head(tags$script(HTML('Shiny.addCustomMessageHandler("jsCode",function(message) {eval(message.value);});'))),
#----------------------------------------------------------------------------------
# Processing Panel Sentinel-1
box(
# Title
title = "Processing Panel", status = "success", solidHeader= TRUE,
tags$h4("Sentinel-1 data download"),
hr(),
tags$b("1) Output directory"),
p("Note: A new folder named \"DATA\" will be created within the chosen Output directory.
Within this folder the downloaded data files will be stored and further sorted by satellite track and acquistion date."),
#div(style="display:inline-block",shinyDirButton('directory', 'Browse', 'Select a folder')),
#div(style="display:inline-block",verbatimTextOutput("project_dir")),
shinyDirButton('S1_dow_directory', 'Browse', 'Select a folder'),
br(),
br(),
verbatimTextOutput("S1_dow_project_dir"),
hr(),
tags$b("2) OST S1 inventory file"),
radioButtons("S1_DOWNFILE", "",
c("OST Inventory Shapefile (local/on server)" = "S1_AOI_shape_local",
"OST Inventory Shapefile (upload zipped archive)" = "S1_AOI_zip_upload")),
conditionalPanel(
"input.S1_DOWNFILE == 'S1_AOI_shape_local'",
p("Note: This browse should point to an OST inventory file created under the data inventory tab."),
br(),
shinyFilesButton("S1_dow_shapefile","Choose file","Choose one or more files",FALSE),
br(),
br(),
verbatimTextOutput("S1_dow_filepath")
),
conditionalPanel(
"input.S1_DOWNFILE == 'S1_AOI_zip_upload'",
fileInput('S1_zipfile_path', label = 'Browse',accept = c(".zip"))
),
hr(),
tags$b("3) Provide your NASA Earthdata username/password."),
p("If you are not in possess of a user account you can create one ",a(href = "https://urs.earthdata.nasa.gov/",target="_blank", "here"),"."),
br(),
textInput(inputId = "s1_asf_uname",
label = "Username",
value = "Type in your username"
),
passwordInput(inputId = "s1_asf_piwo",
label = "Password",
value = "Type in your password"
),
hr(),
# div(style="display:inline-block",actionButton("s1_kc_process", "Start processing")),
# div(style="display:inline-block",actionButton("s1_kc_abort", "Abort processing")),
actionButton("S1_download", "Start downloading"),
br(),
# "Command Line Syntax:",
textOutput("S1_down")
) # close box
)
)
|
if(!require("R.matlab")){
install.packages("R.matlab")
stopifnot(require("R.matlab"))
}
extract_basis <- function(Es, k){
Matlab$startServer(port = 9999)
matlab <- Matlab(port = 9999)
open(matlab)
setVariable(matlab, Es = Es)
evaluate(matlab, "[Q,~] = qr(Es);")
matlab_result <- getVariable(matlab, "Q")
Q <- matlab_result$Q
close(matlab)
# qr_result <- qr(Es)
# Q <- qr.Q(qr_result)
Esbasis <- Q[,1:k]
Fsbasis <- Q[,(k+1):ncol(Q)]
output <- list()
output$Esbasis <- Esbasis
output$Fsbasis <- Fsbasis
return(output)
}
|
/sdp/extract_basis.R
|
no_license
|
HaroldSu/Overcomplete-ICA
|
R
| false
| false
| 554
|
r
|
if(!require("R.matlab")){
install.packages("R.matlab")
stopifnot(require("R.matlab"))
}
extract_basis <- function(Es, k){
Matlab$startServer(port = 9999)
matlab <- Matlab(port = 9999)
open(matlab)
setVariable(matlab, Es = Es)
evaluate(matlab, "[Q,~] = qr(Es);")
matlab_result <- getVariable(matlab, "Q")
Q <- matlab_result$Q
close(matlab)
# qr_result <- qr(Es)
# Q <- qr.Q(qr_result)
Esbasis <- Q[,1:k]
Fsbasis <- Q[,(k+1):ncol(Q)]
output <- list()
output$Esbasis <- Esbasis
output$Fsbasis <- Fsbasis
return(output)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.R
\name{plot}
\alias{plot}
\alias{plot.seq_marg_monitor}
\alias{plot.CD}
\alias{plot.seq_cond_monitor}
\alias{plot.node_monitor}
\alias{plot.influential_obs}
\alias{plot.jeffreys}
\alias{plot.kl}
\alias{plot.final_node_monitor}
\alias{plot.seq_pa_ch_monitor}
\alias{plot.sensitivity}
\alias{plot.fro}
\title{Plotting methods}
\usage{
\method{plot}{seq_marg_monitor}(x, ...)
\method{plot}{CD}(x, ...)
\method{plot}{seq_cond_monitor}(x, ...)
\method{plot}{node_monitor}(x, ...)
\method{plot}{influential_obs}(x, ...)
\method{plot}{jeffreys}(x, ...)
\method{plot}{kl}(x, ...)
\method{plot}{final_node_monitor}(x, which, ...)
\method{plot}{seq_pa_ch_monitor}(x, ...)
\method{plot}{sensitivity}(x, ...)
\method{plot}{fro}(x, ...)
}
\arguments{
\item{x}{The output of node_monitor.}
\item{...}{for compatibility}
\item{which}{select the monitor to plot, either "marginal" or "conditional" (for output of \code{node_monitor} only).}
}
\value{
A plot specific to the object it is applied to.
}
\description{
Plotting methods for outputs of \code{bnmonitor} functions
}
|
/man/plot.Rd
|
no_license
|
manueleleonelli/bnmonitor
|
R
| false
| true
| 1,156
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.R
\name{plot}
\alias{plot}
\alias{plot.seq_marg_monitor}
\alias{plot.CD}
\alias{plot.seq_cond_monitor}
\alias{plot.node_monitor}
\alias{plot.influential_obs}
\alias{plot.jeffreys}
\alias{plot.kl}
\alias{plot.final_node_monitor}
\alias{plot.seq_pa_ch_monitor}
\alias{plot.sensitivity}
\alias{plot.fro}
\title{Plotting methods}
\usage{
\method{plot}{seq_marg_monitor}(x, ...)
\method{plot}{CD}(x, ...)
\method{plot}{seq_cond_monitor}(x, ...)
\method{plot}{node_monitor}(x, ...)
\method{plot}{influential_obs}(x, ...)
\method{plot}{jeffreys}(x, ...)
\method{plot}{kl}(x, ...)
\method{plot}{final_node_monitor}(x, which, ...)
\method{plot}{seq_pa_ch_monitor}(x, ...)
\method{plot}{sensitivity}(x, ...)
\method{plot}{fro}(x, ...)
}
\arguments{
\item{x}{The output of node_monitor.}
\item{...}{for compatibility}
\item{which}{select the monitor to plot, either "marginal" or "conditional" (for output of \code{node_monitor} only).}
}
\value{
A plot specific to the object it is applied to.
}
\description{
Plotting methods for outputs of \code{bnmonitor} functions
}
|
library(pAnalysis)
### Name: cap1
### Title: cap1
### Aliases: cap1
### ** Examples
uncappedtitle <- "this title"
cappedtitle <- cap1(uncappedtitle)
|
/data/genthat_extracted_code/pAnalysis/examples/cap1.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 159
|
r
|
library(pAnalysis)
### Name: cap1
### Title: cap1
### Aliases: cap1
### ** Examples
uncappedtitle <- "this title"
cappedtitle <- cap1(uncappedtitle)
|
\name{smooth.construct.mdcx.smooth.spec}
%\Rdversion{1.0}
\alias{smooth.construct.mdcx.smooth.spec}
\alias{smooth.construct.mdcxBy.smooth.spec}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Constructor for monotone decreasing and convex P-splines in SCAMs
}
\description{This is a special method function
for creating smooths subject to both monotone decreasing and convexity constraints which is built by
the \code{mgcv} constructor function for smooth terms, \code{smooth.construct}.
It is constructed using mixed constrained P-splines. This smooth is specified via model terms such as
\code{s(x,k,bs="mdcx",m=2)},
where \code{k} denotes the basis dimension and \code{m+1} is the order of the B-spline basis.
\code{mdcxBy.smooth.spec} works similar to \code{mdcx.smooth.spec} but without applying an identifiability constraint ('zero intercept' constraint). \code{mdcxBy.smooth.spec} should be used when the smooth term has a numeric \code{by} variable that takes more than one value. In such cases, the smooth terms are fully identifiable without a 'zero intercept' constraint, so they are left unconstrained. This smooth is specified as
\code{s(x,by=z,bs="mdcxBy")}. See an example below.
However a factor \code{by} variable requires identifiability constraints, so \code{s(x,by=fac,bs="mdcx")} is used in this case.
}
\usage{
\method{smooth.construct}{mdcx.smooth.spec}(object, data, knots)
\method{smooth.construct}{mdcxBy.smooth.spec}(object, data, knots)
}
\arguments{
\item{object}{A smooth specification object, generated by an \code{s} term in a GAM formula.}
\item{data}{A data frame or list containing the data required by this term,
with names given by \code{object$term}. The \code{by} variable is the last element.}
\item{knots}{An optional list containing the knots supplied for basis setup.
If it is \code{NULL} then the knot locations are generated automatically.}
}
% \details{
%% ~~ If necessary, more details than the description above ~~
% }
\value{An object of class \code{"mdcx.smooth"}, \code{"mdcxBy.smooth"}.
}
\references{
Pya, N. and Wood, S.N. (2015) Shape constrained additive models. Statistics and Computing, 25(3), 543-559
Pya, N. (2010) Additive models with shape constraints. PhD thesis. University of Bath. Department of Mathematical Sciences
}
\author{
Natalya Pya <nat.pya@gmail.com>
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{\link{smooth.construct.mpi.smooth.spec}},
\code{\link{smooth.construct.mpd.smooth.spec}},
\code{\link{smooth.construct.cx.smooth.spec}},
\code{\link{smooth.construct.cv.smooth.spec}},
\code{\link{smooth.construct.mdcv.smooth.spec}},
\code{\link{smooth.construct.micx.smooth.spec}},
\code{\link{smooth.construct.micv.smooth.spec}}
}
\examples{
\dontrun{
## Monotone decreasing and convex SCOP-splines example
## simulating data...
require(scam)
set.seed(2)
n <- 100
x <- sort(runif(n)*3-1)
f <- (x-3)^6/1000 # monotone decreasing and convex smooth
y <- f+rnorm(n)*.4
dat <- data.frame(x=x,y=y)
## fit model ...
b <- scam(y~s(x,k=15,bs="mdcx"),family=gaussian(link="identity"),data=dat)
## fit unconstrained model ...
b1 <- scam(y~s(x,k=15,bs="ps"),family=gaussian(link="identity"),data=dat)
## plot results ...
plot(x,y,xlab="x",ylab="y")
lines(x,f) ## the true function
lines(x,b$fitted.values,col=2) ## mixed constrained fit
lines(x,b1$fitted.values,col=3) ## unconstrained fit
## numeric 'by' variable example...
set.seed(6)
n <- 100
x <- sort(runif(n)*3-1)
z <- runif(n,-2,3)
f <- (x-3)^6/1000
y <- f*z + rnorm(n)*.4
dat <- data.frame(x=x,z=z,y=y)
b <- scam(y~s(x,k=15,by=z,bs="mdcxBy"),data=dat)
summary(b)
par(mfrow=c(1,2))
plot(b,shade=TRUE)
## unconstrained fit...
b1 <- scam(y~s(x,k=15,by=z),data=dat)
plot(b1,shade=TRUE)
summary(b1)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{models} \keyword{regression}%-- one or more ..
|
/man/smooth.construct.mdcx.smooth.spec.Rd
|
no_license
|
cran/scam
|
R
| false
| false
| 4,198
|
rd
|
\name{smooth.construct.mdcx.smooth.spec}
%\Rdversion{1.0}
\alias{smooth.construct.mdcx.smooth.spec}
\alias{smooth.construct.mdcxBy.smooth.spec}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Constructor for monotone decreasing and convex P-splines in SCAMs
}
\description{This is a special method function
for creating smooths subject to both monotone decreasing and convexity constraints which is built by
the \code{mgcv} constructor function for smooth terms, \code{smooth.construct}.
It is constructed using mixed constrained P-splines. This smooth is specified via model terms such as
\code{s(x,k,bs="mdcx",m=2)},
where \code{k} denotes the basis dimension and \code{m+1} is the order of the B-spline basis.
\code{mdcxBy.smooth.spec} works similar to \code{mdcx.smooth.spec} but without applying an identifiability constraint ('zero intercept' constraint). \code{mdcxBy.smooth.spec} should be used when the smooth term has a numeric \code{by} variable that takes more than one value. In such cases, the smooth terms are fully identifiable without a 'zero intercept' constraint, so they are left unconstrained. This smooth is specified as
\code{s(x,by=z,bs="mdcxBy")}. See an example below.
However a factor \code{by} variable requires identifiability constraints, so \code{s(x,by=fac,bs="mdcx")} is used in this case.
}
\usage{
\method{smooth.construct}{mdcx.smooth.spec}(object, data, knots)
\method{smooth.construct}{mdcxBy.smooth.spec}(object, data, knots)
}
\arguments{
\item{object}{A smooth specification object, generated by an \code{s} term in a GAM formula.}
\item{data}{A data frame or list containing the data required by this term,
with names given by \code{object$term}. The \code{by} variable is the last element.}
\item{knots}{An optional list containing the knots supplied for basis setup.
If it is \code{NULL} then the knot locations are generated automatically.}
}
% \details{
%% ~~ If necessary, more details than the description above ~~
% }
\value{An object of class \code{"mdcx.smooth"}, \code{"mdcxBy.smooth"}.
}
\references{
Pya, N. and Wood, S.N. (2015) Shape constrained additive models. Statistics and Computing, 25(3), 543-559
Pya, N. (2010) Additive models with shape constraints. PhD thesis. University of Bath. Department of Mathematical Sciences
}
\author{
Natalya Pya <nat.pya@gmail.com>
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{\link{smooth.construct.mpi.smooth.spec}},
\code{\link{smooth.construct.mpd.smooth.spec}},
\code{\link{smooth.construct.cx.smooth.spec}},
\code{\link{smooth.construct.cv.smooth.spec}},
\code{\link{smooth.construct.mdcv.smooth.spec}},
\code{\link{smooth.construct.micx.smooth.spec}},
\code{\link{smooth.construct.micv.smooth.spec}}
}
\examples{
\dontrun{
## Monotone decreasing and convex SCOP-splines example
## simulating data...
require(scam)
set.seed(2)
n <- 100
x <- sort(runif(n)*3-1)
f <- (x-3)^6/1000 # monotone decreasing and convex smooth
y <- f+rnorm(n)*.4
dat <- data.frame(x=x,y=y)
## fit model ...
b <- scam(y~s(x,k=15,bs="mdcx"),family=gaussian(link="identity"),data=dat)
## fit unconstrained model ...
b1 <- scam(y~s(x,k=15,bs="ps"),family=gaussian(link="identity"),data=dat)
## plot results ...
plot(x,y,xlab="x",ylab="y")
lines(x,f) ## the true function
lines(x,b$fitted.values,col=2) ## mixed constrained fit
lines(x,b1$fitted.values,col=3) ## unconstrained fit
## numeric 'by' variable example...
set.seed(6)
n <- 100
x <- sort(runif(n)*3-1)
z <- runif(n,-2,3)
f <- (x-3)^6/1000
y <- f*z + rnorm(n)*.4
dat <- data.frame(x=x,z=z,y=y)
b <- scam(y~s(x,k=15,by=z,bs="mdcxBy"),data=dat)
summary(b)
par(mfrow=c(1,2))
plot(b,shade=TRUE)
## unconstrained fit...
b1 <- scam(y~s(x,k=15,by=z),data=dat)
plot(b1,shade=TRUE)
summary(b1)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{models} \keyword{regression}%-- one or more ..
|
## "INFOF422 Statistical foundations of machine learning" course
## R package gbcode
## Author: G. Bontempi
rm(list=ls())
library("quadprog")
library("MASS")
set.seed(0)
normv<-function(x,p=2){
sum(x^p)^(1/p)
}
separable<-TRUE
if (!separable){
gam<-0.05
} else {
gam<-Inf
}
eps<-0.001
for ( rep in 1:1){
N<-150 #number of samples per class
x1<-cbind(rnorm(N,0,sd=0.2),rnorm(N,0,sd=0.2))
y1<-numeric(N)+1
x2<-cbind(rnorm(N,3,sd=0.5),rnorm(N,3,sd=0.5))
y2<-numeric(N)-1
X<-rbind(x1,x2)
Y<-c(y1,y2)
## PLOT Training set
plot(-2:6, -2:6, type = "n",xlab="x1",ylab="x2")
points(X[1:N,1],X[1:N,2],col="red")
points(X[(N+1):(2*N),1],X[(N+1):(2*N),2],col="blue")
par(ask=TRUE)
##########################################
########## SVM parametric identification
Dmat<-array(NA,c(2*N,2*N))
for (i in 1:(2*N)){
for (j in 1:(2*N)){
Dmat[i,j]<-Y[i]*Y[j]*(X[i,]%*%X[j,])
}
}
Dmat<-Dmat+1e-3*diag(2*N)
d<-array(1,c(2*N,1))
A<-cbind(Y,diag(2*N))
b0<-numeric(2*N+1)
if (! separable){
A<-cbind(A,-diag(2*N))
b0<-c(b0,numeric(2*N))
b0[(2*N+2):(4*N+1)]<--gam
## min_b(-d^T b + 1/2 b^T D b) with the constraints A^T b >= bvec.
## b-> alpha [2N,1]
## 1st constraint sum_i y_i*alpha_i=0
## 2:(2N+1) constraint alpha_i >=0
## (2N+2):(4N+1) constraint -alpha_i>=-gam
}
S<-solve.QP(Dmat,dvec=d,Amat=A,meq=1,bvec=b0)
## min_b(-d^T b + 1/2 b^T D b) with the constraints A^T b >= bvec.
## b-> alpha [2N,1]
## 1st contraint sum_i y_i*alpha_i=0
## 2:(2N+1) constraint alpha_i >=0
alpha<-S$solution
alpha[alpha<eps]<-0
ind.j<-which(alpha>eps & alpha<gam-eps)
if (all(alpha<=gam+eps) & length(ind.j)>0){
cat("min value=",S$value,"\n")
cat("min value2=",-t(d)%*%alpha+(1/2*t(alpha)%*%Dmat%*%alpha),"\n")
cat("sum_i y_i*alpha_i=0:",alpha%*%Y,"\n")
beta<-numeric(2)
for ( i in 1:(2*N))
beta<-beta+alpha[i]*Y[i]*X[i,]
ind1<-which(alpha[1:N]>eps)
ind2<-which(alpha[(N+1):(2*N)]>eps)
## PLOT Support Vector
points(X[ind1,1],X[ind1,2],col="black")
points(X[(N+ind2),1],X[(N+ind2),2],col="black")
if (separable){
beta0<--0.5*(beta%*%X[ind1[1],]+beta%*%X[N+ind2[1],])
marg<-1/normv(beta)
} else {
L<-0
for (i in 1:(2*N)){
for (j in 1:(2*N)){
L=L+Y[i]*Y[j]*alpha[i]*alpha[j]*(X[i,]%*%X[j,])
}
}
beta0<-0
beta0<-(1-Y[ind.j[1]]*beta%*%X[ind.j[1],])/Y[ind.j[1]]
marg<-1/sqrt(L)
## points whose slack variable is positive
ind3<-which(abs(alpha[1:N]-gam)<eps)
ind4<-which(abs(alpha[(N+1):(2*N)]-gam)<eps)
points(X[ind3,1],X[ind3,2],col="yellow") ## red->yellow
points(X[(N+ind4),1],X[(N+ind4),2],col="green")
}
cat("beta=",beta,"\n")
theta<-atan(-beta[1]/beta[2])
cat("theta=",theta,"\n")
## PLOT Separating Hyperplane
abline(b=-beta[1]/beta[2],a=-beta0/beta[2])
## PLOT Margin
abline(b=-beta[1]/beta[2],
a=-beta0/beta[2]+ marg/(cos(pi-theta)),lty=2)
abline(b=-beta[1]/beta[2],
a=-beta0/beta[2]- marg/(cos(pi-theta)),lty=2)
title(paste("margin=",marg, ", gamma=",gam))
print(marg)
par(ask=TRUE)
plot(alpha)
title("Alpha values")
} else
title("Missing solution")
}
|
/inst/scripts/Linear/svm.R
|
no_license
|
gbonte/gbcode
|
R
| false
| false
| 3,393
|
r
|
## "INFOF422 Statistical foundations of machine learning" course
## R package gbcode
## Author: G. Bontempi
rm(list=ls())
library("quadprog")
library("MASS")
set.seed(0)
normv<-function(x,p=2){
sum(x^p)^(1/p)
}
separable<-TRUE
if (!separable){
gam<-0.05
} else {
gam<-Inf
}
eps<-0.001
for ( rep in 1:1){
N<-150 #number of samples per class
x1<-cbind(rnorm(N,0,sd=0.2),rnorm(N,0,sd=0.2))
y1<-numeric(N)+1
x2<-cbind(rnorm(N,3,sd=0.5),rnorm(N,3,sd=0.5))
y2<-numeric(N)-1
X<-rbind(x1,x2)
Y<-c(y1,y2)
## PLOT Training set
plot(-2:6, -2:6, type = "n",xlab="x1",ylab="x2")
points(X[1:N,1],X[1:N,2],col="red")
points(X[(N+1):(2*N),1],X[(N+1):(2*N),2],col="blue")
par(ask=TRUE)
##########################################
########## SVM parametric identification
Dmat<-array(NA,c(2*N,2*N))
for (i in 1:(2*N)){
for (j in 1:(2*N)){
Dmat[i,j]<-Y[i]*Y[j]*(X[i,]%*%X[j,])
}
}
Dmat<-Dmat+1e-3*diag(2*N)
d<-array(1,c(2*N,1))
A<-cbind(Y,diag(2*N))
b0<-numeric(2*N+1)
if (! separable){
A<-cbind(A,-diag(2*N))
b0<-c(b0,numeric(2*N))
b0[(2*N+2):(4*N+1)]<--gam
## min_b(-d^T b + 1/2 b^T D b) with the constraints A^T b >= bvec.
## b-> alpha [2N,1]
## 1st constraint sum_i y_i*alpha_i=0
## 2:(2N+1) constraint alpha_i >=0
## (2N+2):(4N+1) constraint -alpha_i>=-gam
}
S<-solve.QP(Dmat,dvec=d,Amat=A,meq=1,bvec=b0)
## min_b(-d^T b + 1/2 b^T D b) with the constraints A^T b >= bvec.
## b-> alpha [2N,1]
## 1st contraint sum_i y_i*alpha_i=0
## 2:(2N+1) constraint alpha_i >=0
alpha<-S$solution
alpha[alpha<eps]<-0
ind.j<-which(alpha>eps & alpha<gam-eps)
if (all(alpha<=gam+eps) & length(ind.j)>0){
cat("min value=",S$value,"\n")
cat("min value2=",-t(d)%*%alpha+(1/2*t(alpha)%*%Dmat%*%alpha),"\n")
cat("sum_i y_i*alpha_i=0:",alpha%*%Y,"\n")
beta<-numeric(2)
for ( i in 1:(2*N))
beta<-beta+alpha[i]*Y[i]*X[i,]
ind1<-which(alpha[1:N]>eps)
ind2<-which(alpha[(N+1):(2*N)]>eps)
## PLOT Support Vector
points(X[ind1,1],X[ind1,2],col="black")
points(X[(N+ind2),1],X[(N+ind2),2],col="black")
if (separable){
beta0<--0.5*(beta%*%X[ind1[1],]+beta%*%X[N+ind2[1],])
marg<-1/normv(beta)
} else {
L<-0
for (i in 1:(2*N)){
for (j in 1:(2*N)){
L=L+Y[i]*Y[j]*alpha[i]*alpha[j]*(X[i,]%*%X[j,])
}
}
beta0<-0
beta0<-(1-Y[ind.j[1]]*beta%*%X[ind.j[1],])/Y[ind.j[1]]
marg<-1/sqrt(L)
## points whose slack variable is positive
ind3<-which(abs(alpha[1:N]-gam)<eps)
ind4<-which(abs(alpha[(N+1):(2*N)]-gam)<eps)
points(X[ind3,1],X[ind3,2],col="yellow") ## red->yellow
points(X[(N+ind4),1],X[(N+ind4),2],col="green")
}
cat("beta=",beta,"\n")
theta<-atan(-beta[1]/beta[2])
cat("theta=",theta,"\n")
## PLOT Separating Hyperplane
abline(b=-beta[1]/beta[2],a=-beta0/beta[2])
## PLOT Margin
abline(b=-beta[1]/beta[2],
a=-beta0/beta[2]+ marg/(cos(pi-theta)),lty=2)
abline(b=-beta[1]/beta[2],
a=-beta0/beta[2]- marg/(cos(pi-theta)),lty=2)
title(paste("margin=",marg, ", gamma=",gam))
print(marg)
par(ask=TRUE)
plot(alpha)
title("Alpha values")
} else
title("Missing solution")
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_inpe_data.R
\name{inpe_station_data}
\alias{inpe_station_data}
\title{Get climate data from stations INPE}
\usage{
inpe_station_data(station_id = 31973, start_date = "2005/01/01",
end_date = "2005/02/02")
}
\arguments{
\item{station_id}{A numeric vector with the station id.}
\item{start_date}{Start date.}
\item{end_date}{End date (Maximum of one year between start and end dates).}
}
\value{
A data frame containing the climate data and attributes.
}
\description{
Get climate data from stations INPE
}
|
/man/inpe_station_data.Rd
|
no_license
|
gustavobio/brclimate
|
R
| false
| true
| 592
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_inpe_data.R
\name{inpe_station_data}
\alias{inpe_station_data}
\title{Get climate data from stations INPE}
\usage{
inpe_station_data(station_id = 31973, start_date = "2005/01/01",
end_date = "2005/02/02")
}
\arguments{
\item{station_id}{A numeric vector with the station id.}
\item{start_date}{Start date.}
\item{end_date}{End date (Maximum of one year between start and end dates).}
}
\value{
A data frame containing the climate data and attributes.
}
\description{
Get climate data from stations INPE
}
|
#' @title Rounding of Null Imaginary Part of a Complex Number
#' @description imaginary parts with values very close to 0 are 'zapped', i.e. treated as 0.
#' Therefore, the number becomes real and changes its class from complex to numeric.
#' @param x a scalar or vector, real or complex.
#' @param tol a tolerance, \eqn{10^{-6}}{10^-6} by default. Prevents possible numerical problems.
#' Can be set to 0 if desired.
#' @author Albert Dorador
#' @export
#' @examples
#' x1 <- 1:100
#' x2 <- c(1:98,2+3i,0-5i)
#' x3 <- c(1:98,2+0i,7-0i)
#' x4 <- complex(real = rnorm(100), imaginary = rnorm(100))
#'
#' Imzap(x1) # inocuous with real vectors
#' Imzap(x2) # 1 single element is enough to turn the vector into complex
#' Imzap(x3) # removes extra 0i and changes class from from complex to numeric
#' Imzap(x4) # inocuous with complex vectors with non-null complex part
#'
Imzap <- function(x, tol = 1e-6) {
if (all(abs(Im(z <- zapsmall(x))) <= tol)) as.double(x) else x
}
|
/R/Imzap.R
|
no_license
|
cran/complexplus
|
R
| false
| false
| 998
|
r
|
#' @title Rounding of Null Imaginary Part of a Complex Number
#' @description imaginary parts with values very close to 0 are 'zapped', i.e. treated as 0.
#' Therefore, the number becomes real and changes its class from complex to numeric.
#' @param x a scalar or vector, real or complex.
#' @param tol a tolerance, \eqn{10^{-6}}{10^-6} by default. Prevents possible numerical problems.
#' Can be set to 0 if desired.
#' @author Albert Dorador
#' @export
#' @examples
#' x1 <- 1:100
#' x2 <- c(1:98,2+3i,0-5i)
#' x3 <- c(1:98,2+0i,7-0i)
#' x4 <- complex(real = rnorm(100), imaginary = rnorm(100))
#'
#' Imzap(x1) # inocuous with real vectors
#' Imzap(x2) # 1 single element is enough to turn the vector into complex
#' Imzap(x3) # removes extra 0i and changes class from from complex to numeric
#' Imzap(x4) # inocuous with complex vectors with non-null complex part
#'
Imzap <- function(x, tol = 1e-6) {
if (all(abs(Im(z <- zapsmall(x))) <= tol)) as.double(x) else x
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/solr_all.r
\name{solr_all}
\alias{solr_all}
\title{Solr search.}
\usage{
solr_all(q = "*:*", sort = NULL, start = 0, rows = NULL,
pageDoc = NULL, pageScore = NULL, fq = NULL, fl = NULL,
defType = NULL, timeAllowed = NULL, qt = NULL, wt = "json",
NOW = NULL, TZ = NULL, echoHandler = NULL, echoParams = NULL,
key = NULL, base = NULL, callopts = list(), raw = FALSE,
parsetype = "df", concat = ",", ..., verbose = TRUE)
}
\arguments{
\item{q}{Query terms, defaults to '*:*', or everything.}
\item{sort}{Field to sort on. You can specify ascending (e.g., score desc) or
descending (e.g., score asc), sort by two fields (e.g., score desc, price asc),
or sort by a function (e.g., sum(x_f, y_f) desc, which sorts by the sum of
x_f and y_f in a descending order).}
\item{start}{Record to start at, default to beginning.}
\item{rows}{Number of records to return. Defaults to 10.}
\item{pageDoc}{If you expect to be paging deeply into the results (say beyond page 10,
assuming rows=10) and you are sorting by score, you may wish to add the pageDoc
and pageScore parameters to your request. These two parameters tell Solr (and Lucene)
what the last result (Lucene internal docid and score) of the previous page was,
so that when scoring the query for the next set of pages, it can ignore any results
that occur higher than that item. To get the Lucene internal doc id, you will need
to add [docid] to the &fl list.
e.g., q=*:*&start=10&pageDoc=5&pageScore=1.345&fl=[docid],score}
\item{pageScore}{See pageDoc notes.}
\item{fq}{Filter query, this does not affect the search, only what gets returned}
\item{fl}{Fields to return}
\item{defType}{Specify the query parser to use with this request.}
\item{timeAllowed}{The time allowed for a search to finish. This value only applies
to the search and not to requests in general. Time is in milliseconds. Values <= 0
mean no time restriction. Partial results may be returned (if there are any).}
\item{qt}{Which query handler used.}
\item{wt}{Data type returned, defaults to 'json'}
\item{NOW}{Set a fixed time for evaluating Date based expresions}
\item{TZ}{Time zone, you can override the default.}
\item{echoHandler}{If the echoHandler parameter is true, Solr places the name of
the handle used in the response to the client for debugging purposes.}
\item{echoParams}{The echoParams parameter tells Solr what kinds of Request
parameters should be included in the response for debugging purposes, legal values
include:
\itemize{
\item none - don't include any request parameters for debugging
\item explicit - include the parameters explicitly specified by the client in the request
\item all - include all parameters involved in this request, either specified explicitly
by the client, or implicit because of the request handler configuration.
}}
\item{key}{API key, if needed.}
\item{base}{URL endpoint.}
\item{callopts}{Call options passed on to httr::GET}
\item{raw}{(logical) If TRUE, returns raw data in format specified by wt param}
\item{parsetype}{(character) One of 'list' or 'df'}
\item{concat}{(character) Character to concatenate elements of longer than length 1.
Note that this only works reliably when data format is json (wt='json'). The parsing
is more complicated in XML format, but you can do that on your own.}
\item{...}{Further args.}
\item{verbose}{If TRUE (default) the url call used printed to console.}
}
\value{
XML, JSON, a list, or data.frame
}
\description{
Solr search.
}
\examples{
\dontrun{
url <- 'http://api.plos.org/search'
solr_all(q='*:*', rows=2, fl='id', base=url)
}
}
\references{
See \url{http://wiki.apache.org/solr/#Search_and_Indexing} for
more information.
}
\seealso{
\code{\link{solr_highlight}}, \code{\link{solr_facet}}
}
|
/man/solr_all.Rd
|
permissive
|
cran/solr
|
R
| false
| false
| 3,833
|
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/solr_all.r
\name{solr_all}
\alias{solr_all}
\title{Solr search.}
\usage{
solr_all(q = "*:*", sort = NULL, start = 0, rows = NULL,
pageDoc = NULL, pageScore = NULL, fq = NULL, fl = NULL,
defType = NULL, timeAllowed = NULL, qt = NULL, wt = "json",
NOW = NULL, TZ = NULL, echoHandler = NULL, echoParams = NULL,
key = NULL, base = NULL, callopts = list(), raw = FALSE,
parsetype = "df", concat = ",", ..., verbose = TRUE)
}
\arguments{
\item{q}{Query terms, defaults to '*:*', or everything.}
\item{sort}{Field to sort on. You can specify ascending (e.g., score desc) or
descending (e.g., score asc), sort by two fields (e.g., score desc, price asc),
or sort by a function (e.g., sum(x_f, y_f) desc, which sorts by the sum of
x_f and y_f in a descending order).}
\item{start}{Record to start at, default to beginning.}
\item{rows}{Number of records to return. Defaults to 10.}
\item{pageDoc}{If you expect to be paging deeply into the results (say beyond page 10,
assuming rows=10) and you are sorting by score, you may wish to add the pageDoc
and pageScore parameters to your request. These two parameters tell Solr (and Lucene)
what the last result (Lucene internal docid and score) of the previous page was,
so that when scoring the query for the next set of pages, it can ignore any results
that occur higher than that item. To get the Lucene internal doc id, you will need
to add [docid] to the &fl list.
e.g., q=*:*&start=10&pageDoc=5&pageScore=1.345&fl=[docid],score}
\item{pageScore}{See pageDoc notes.}
\item{fq}{Filter query, this does not affect the search, only what gets returned}
\item{fl}{Fields to return}
\item{defType}{Specify the query parser to use with this request.}
\item{timeAllowed}{The time allowed for a search to finish. This value only applies
to the search and not to requests in general. Time is in milliseconds. Values <= 0
mean no time restriction. Partial results may be returned (if there are any).}
\item{qt}{Which query handler used.}
\item{wt}{Data type returned, defaults to 'json'}
\item{NOW}{Set a fixed time for evaluating Date based expresions}
\item{TZ}{Time zone, you can override the default.}
\item{echoHandler}{If the echoHandler parameter is true, Solr places the name of
the handle used in the response to the client for debugging purposes.}
\item{echoParams}{The echoParams parameter tells Solr what kinds of Request
parameters should be included in the response for debugging purposes, legal values
include:
\itemize{
\item none - don't include any request parameters for debugging
\item explicit - include the parameters explicitly specified by the client in the request
\item all - include all parameters involved in this request, either specified explicitly
by the client, or implicit because of the request handler configuration.
}}
\item{key}{API key, if needed.}
\item{base}{URL endpoint.}
\item{callopts}{Call options passed on to httr::GET}
\item{raw}{(logical) If TRUE, returns raw data in format specified by wt param}
\item{parsetype}{(character) One of 'list' or 'df'}
\item{concat}{(character) Character to concatenate elements of longer than length 1.
Note that this only works reliably when data format is json (wt='json'). The parsing
is more complicated in XML format, but you can do that on your own.}
\item{...}{Further args.}
\item{verbose}{If TRUE (default) the url call used printed to console.}
}
\value{
XML, JSON, a list, or data.frame
}
\description{
Solr search.
}
\examples{
\dontrun{
url <- 'http://api.plos.org/search'
solr_all(q='*:*', rows=2, fl='id', base=url)
}
}
\references{
See \url{http://wiki.apache.org/solr/#Search_and_Indexing} for
more information.
}
\seealso{
\code{\link{solr_highlight}}, \code{\link{solr_facet}}
}
|
############################
#Step 0: configure parameters
#Change the following parameters based on your onw machine
#Also change the raw .xlsx file name because this name will be used among
#the whole pipeline and for generating final results
############################
rm(list=ls())
#directory of raw .xlsx files
raw_dir <- "~/Box/HES7oscillation/Human_HES7_data/H1-HES7mutationTest/01_C73T_testRcode"
#directory of output results
output_dir <- "~/Box/HES7oscillation/Human_HES7_data/H1-HES7mutationTest/01_C73T_testRcode"
#file position of Perl script "bandpass.pl"
bp_script <- "~/Box/HES7oscillation/Human_HES7_data/H1-HES7mutationTest/TBX6mutation/bandpass.pl"
#first bandwidth parameter
bw1 <- 45
#second bandwidth parameter
bw2 <- 3
###########################
#Step 1: load packages and functions
###########################
if (!requireNamespace("ggplot2", quietly = TRUE)) install.packages("ggplot2")
if (!requireNamespace("readxl", quietly = TRUE)) install.packages("readxl")
if (!requireNamespace("chron", quietly = TRUE)) install.packages("chron")
library(ggplot2)
library(readxl)
library(chron)
dat_modify <- function(dat){ #modify data structure to fit the input format
dat[,1] <- seq(from=0,to=5*(nrow(dat)-1),length.out = nrow(dat))
colnames(dat)[1] <- "mins"
return(dat)
}
find_peak_valley <- function(exp,time,degree=20,interval=c(100,1500)){ #function to call peak/valley positions and values
obj <- lm(exp~poly(time, degree))
x <- min(time):max(time)
F.V <- predict(obj,data.frame(time=x))
dif.F.V <- sign(diff(F.V))
#dif.mat <- cbind((min(time)+1):max(time),dif.F.V)
peak <- c()
valley <- c()
for(i in 1:(length(dif.F.V)-1)){
is.valley <- all(dif.F.V[i:(i+1)]==c(-1,1))||all(dif.F.V[i:(i+1)]==c(-1,0))
is.peak <- all(dif.F.V[i:(i+1)]==c(1,-1))||all(dif.F.V[i:(i+1)]==c(1,0))
if(is.valley)
valley <- c(valley,x[i+1])
if(is.peak)
peak <- c(peak,x[i+1])
}
p_res <- peak[(peak>=interval[1])&(peak<=interval[2])]
v_res <- valley[(valley>=interval[1])&(valley<=interval[2])]
return(list(peak=p_res,valley=v_res, peak_diff=diff(p_res),valley_diff=diff(v_res),fitted_value=F.V))
}
range_11 <- function(x){x/(max(x)-min(x))} #for data scaling
plot_osc <- function(exp,time,plot_raw=F,raw=NULL,degree=20,interval=c(100,1500),ask=T,...){ #for plotting
pv <- find_peak_valley(exp=exp,time=time,degree=degree,interval=interval)
x <- min(time):max(time)
obj <- lm(exp~poly(time,degree))
F.V <- predict(obj,data.frame(time=x))
plot(exp~time,type="l",ylab="Detrended expression",xlab="mins",
main="Detrended data and polynomial-fitted curve",...)
lines(F.V~x,col="red")
for(i in pv$peak){
abline(v=i,col="blue",lty=2)
}
for(i in pv$valley){
abline(v=i,col="green",lty=2)
}
legend("topright",lty=c(1,1,2,2),col=c("black","red","blue","green"),
legend=c("detrended data","fitted value","peak","valley"))
par(ask=ask)
plot(range_11(F.V)~x,type="l",xlab="mins",ylab="scaled expression",col="red",
main="Polynomial-fitted curve and lagged difference",...)
lines(range_11(diff(F.V))~x[-1],col="pink")
abline(0,0,lty="dashed")
legend("topright",lty=c(1,1,2,2),col=c("red","pink","blue","green"),
legend=c("fitted value","lagged diff","peak","valley"))
for(i in pv$peak){
abline(v=i,col="blue",lty=2)
}
for(i in pv$valley){
abline(v=i,col="green",lty=2)
}
if(plot_raw){
plot(raw~time,type="l",xlab="mins",ylab="raw expression",col="black",
main="Raw data without detrending",...)
#lines(range_11(diff(F.V))~x[-1],col="pink")
#abline(0,0,lty="dashed")
legend("topright",lty=c(1,2,2),col=c("black","blue","green"),
legend=c("raw","peak","valley"))
for(i in pv$peak){
abline(v=i,col="blue",lty=2)
}
for(i in pv$valley){
abline(v=i,col="green",lty=2)
}
}
par(ask=F)
}
###########################
#Step 2: pre-processing raw .xlsx files
###########################
raw_dir <- gsub("/$", "", raw_dir)
raw_files <- list.files(raw_dir,pattern = ".xlsx")
for(i in seq_along(raw_files)){
raw_temp <- read_xlsx(file.path(raw_dir,raw_files[i]),sheet = "Results Table",skip = 6)
raw_temp <- raw_temp[,5:(which(colnames(raw_temp)=="Part")-1)]
colnames(raw_temp)[1] <- "#Steps"
tm <- round(raw_temp$`#Steps`,0)
raw_temp[,1] <- paste0(tm%/%60,":",ifelse(tm%%60<10,paste0(0,tm%%60),tm%%60),":00")
write.table(raw_temp,file = paste0(raw_dir,"/",gsub(".xlsx\\>","_raw.txt",raw_files[i])),
quote = F, row.names = F,col.names = T,sep = "\t")
}
##########################
#Step 3: Run bandpass.pl
##########################
bp_file <- list.files(raw_dir,pattern = "_raw.txt")
bp_out <- gsub("_raw.txt","_detrended.txt",bp_file)
for(i in seq_along(bp_file)){
cmd <- paste(bp_script, "<", file.path(raw_dir,bp_file[i]), bw1, bw2, ">", file.path(raw_dir,bp_out[i]), sep = " ")
system(cmd)
}
#########################
#Step 4: Polynomial fitting and final results
#########################
Exp <- list()
Ind <- gsub(pattern = "(.*)_raw.txt" ,replacement ="\\1", x= bp_file)
for(i in seq_along(bp_file)){
raw_dat <- read.delim(file.path(raw_dir,bp_file[i]),sep = "\t",header = T)
detrend_dat <- read.delim(file.path(raw_dir,bp_out[i]),sep = "\t",header = T)
Exp[[i]] <- list(raw=raw_dat,detrended=detrend_dat)
}
names(Exp) <- Ind
for(j in 1:length(Exp)){
###modify data to fit input format
isRD <- is.data.frame(Exp[[j]][[1]])
if(isRD){
raw_temp <- Exp[[j]][[1]]
raw_temp <- dat_modify(raw_temp)
dat_temp <- Exp[[j]][[2]]
dat_temp <- dat_modify(dat_temp)
}else{
dat_temp <- Exp[[j]]
if(!is.factor(dat_temp[,1])){
dat_temp[[1]] <- NULL
}
dat_temp <- dat_modify(dat_temp)
}
###get peak/valley information (saved in `pvpv`)
if(dim(dat_temp)[2]==2){
pvpv <- list()
pvpv[[1]] <- find_peak_valley(dat_temp[,2],time=dat_temp[,1],
degree=25,interval = c(100,dat_temp[0.9*nrow(dat_temp),1]))
names(pvpv) <- colnames(dat_temp)[2]
}else{
pvpv <- apply(dat_temp[,-1],2,find_peak_valley,time=dat_temp[,1],
degree=25,interval = c(100,dat_temp[0.9*nrow(dat_temp),1]))
}
###pre-define empty variables to save peak/value information
Exp_Peak <- matrix(NA,10,length(pvpv))
colnames(Exp_Peak) <- names(pvpv)
Exp_Peakval <- Exp_Peak
Exp_Valley <- matrix(NA,10,length(pvpv))
colnames(Exp_Valley) <- names(pvpv)
Exp_Valleyval <- Exp_Valley
Ave_Period <- numeric(length(pvpv))
names(Ave_Period) <- names(pvpv)
Exp_FV <- matrix(NA,max(dat_temp$mins)-min(dat_temp$mins)+1,length(pvpv)+1)
Exp_FV[,1] <- min(dat_temp$mins):max(dat_temp$mins)
colnames(Exp_FV) <- c("time",names(pvpv))
###extract information from `pvpv`
for(i in 1:length(pvpv)){
p_temp <- pvpv[[i]]$peak
v_temp <- pvpv[[i]]$valley
period_temp <- c(diff(p_temp),diff(v_temp))
###peak position
Exp_Peak[1:length(p_temp),i] <- p_temp
###valley position
Exp_Valley[1:length(v_temp),i] <- v_temp
Ave_Period[i] <- mean(period_temp)
###fitted value of polynomial regression
Exp_FV[,(i+1)] <- pvpv[[i]]$fitted_value
}
for(d1 in 1:10){ #assume no more than 10 peaks(valleys)
###peak values
for(d2 in 1:ncol(Exp_Peakval)){
if(!is.na(Exp_Peak[d1,d2])){
Exp_Peakval[d1,d2] <- Exp_FV[which(Exp_FV[,1]==Exp_Peak[d1,d2]),d2+1]
}
}
###valley values
for(d3 in 1:ncol(Exp_Valleyval)){
if(!is.na(Exp_Valley[d1,d3])){
Exp_Valleyval[d1,d3] <- Exp_FV[which(Exp_FV[,1]==Exp_Valley[d1,d3]),d3+1]
}
}
}
###peak oscillation period
Exp_Peak_Period <- apply(Exp_Peak,2,diff)
rownames(Exp_Peak_Period) <- paste0("Period ",1:nrow(Exp_Peak_Period))
###valley oscillation period
Exp_Valley_Period <- apply(Exp_Valley,2,diff)
rownames(Exp_Valley_Period) <- paste0("Period ",1:nrow(Exp_Valley_Period))
###save results to output directory
dir.create(file.path(output_dir,Ind[j]))
write.table(Exp_Valley,file=paste0(output_dir,"/",Ind[j],"/",Ind[j],"_Valley.csv"),quote = F, na="",col.names = T,row.names = F,sep = ",")
write.table(Exp_Peak,file=paste0(output_dir,"/",Ind[j],"/",Ind[j],"_Peak.csv"),quote = F, na="",col.names = T,row.names = F,sep = ",")
write.table(Exp_Valleyval,file=paste0(output_dir,"/",Ind[j],"/",Ind[j],"_Valleyval.csv"),quote = F, na="",col.names = T,row.names = F,sep = ",")
write.table(Exp_Peakval,file=paste0(output_dir,"/",Ind[j],"/",Ind[j],"_Peakval.csv"),quote = F, na="",col.names = T,row.names = F,sep = ",")
write.table(Exp_Peak_Period,file=paste0(output_dir,"/",Ind[j],"/",Ind[j],"_Peak_Period.csv"),quote = F, na="",col.names = T,row.names = F,sep = ",")
write.table(Exp_Valley_Period,file=paste0(output_dir,"/",Ind[j],"/",Ind[j],"_Valley_Period.csv"),quote = F, na="",col.names = T,row.names = F,sep = ",")
write.table(Exp_FV,file=paste0(output_dir,"/",Ind[j],"/",Ind[j],"_fitted_values.csv"),quote = F, na="",col.names = T,row.names = F,sep = ",")
###generate plot to output directory
fname <- paste0(output_dir,"/",Ind[j],"/",Ind[j],".pdf")
pdf(fname,onefile = T,width = 10,height = 7)
for(i in 1:(ncol(dat_temp)-1)){
if(isRD){
plot_osc(dat_temp[,i+1],dat_temp[,1],plot_raw = T,
raw = raw_temp[,i+1],degree = 25,interval = c(100,dat_temp[0.9*nrow(dat_temp),1]),ask=F,sub=colnames(dat_temp)[i+1])
}else{
plot_osc(dat_temp[,i+1],dat_temp[,1],plot_raw = F,
degree = 25,interval = c(100,dat_temp[0.9*nrow(dat_temp),1]),ask=F,sub=colnames(dat_temp)[i+1])
}
}
dev.off()
}
|
/oscillation_pipeline.R
|
no_license
|
zijianni/RNA-oscillation-pipeline
|
R
| false
| false
| 10,319
|
r
|
############################
#Step 0: configure parameters
#Change the following parameters based on your onw machine
#Also change the raw .xlsx file name because this name will be used among
#the whole pipeline and for generating final results
############################
rm(list=ls())
#directory of raw .xlsx files
raw_dir <- "~/Box/HES7oscillation/Human_HES7_data/H1-HES7mutationTest/01_C73T_testRcode"
#directory of output results
output_dir <- "~/Box/HES7oscillation/Human_HES7_data/H1-HES7mutationTest/01_C73T_testRcode"
#file position of Perl script "bandpass.pl"
bp_script <- "~/Box/HES7oscillation/Human_HES7_data/H1-HES7mutationTest/TBX6mutation/bandpass.pl"
#first bandwidth parameter
bw1 <- 45
#second bandwidth parameter
bw2 <- 3
###########################
#Step 1: load packages and functions
###########################
if (!requireNamespace("ggplot2", quietly = TRUE)) install.packages("ggplot2")
if (!requireNamespace("readxl", quietly = TRUE)) install.packages("readxl")
if (!requireNamespace("chron", quietly = TRUE)) install.packages("chron")
library(ggplot2)
library(readxl)
library(chron)
dat_modify <- function(dat){ #modify data structure to fit the input format
dat[,1] <- seq(from=0,to=5*(nrow(dat)-1),length.out = nrow(dat))
colnames(dat)[1] <- "mins"
return(dat)
}
find_peak_valley <- function(exp,time,degree=20,interval=c(100,1500)){ #function to call peak/valley positions and values
obj <- lm(exp~poly(time, degree))
x <- min(time):max(time)
F.V <- predict(obj,data.frame(time=x))
dif.F.V <- sign(diff(F.V))
#dif.mat <- cbind((min(time)+1):max(time),dif.F.V)
peak <- c()
valley <- c()
for(i in 1:(length(dif.F.V)-1)){
is.valley <- all(dif.F.V[i:(i+1)]==c(-1,1))||all(dif.F.V[i:(i+1)]==c(-1,0))
is.peak <- all(dif.F.V[i:(i+1)]==c(1,-1))||all(dif.F.V[i:(i+1)]==c(1,0))
if(is.valley)
valley <- c(valley,x[i+1])
if(is.peak)
peak <- c(peak,x[i+1])
}
p_res <- peak[(peak>=interval[1])&(peak<=interval[2])]
v_res <- valley[(valley>=interval[1])&(valley<=interval[2])]
return(list(peak=p_res,valley=v_res, peak_diff=diff(p_res),valley_diff=diff(v_res),fitted_value=F.V))
}
range_11 <- function(x){x/(max(x)-min(x))} #for data scaling
plot_osc <- function(exp,time,plot_raw=F,raw=NULL,degree=20,interval=c(100,1500),ask=T,...){ #for plotting
pv <- find_peak_valley(exp=exp,time=time,degree=degree,interval=interval)
x <- min(time):max(time)
obj <- lm(exp~poly(time,degree))
F.V <- predict(obj,data.frame(time=x))
plot(exp~time,type="l",ylab="Detrended expression",xlab="mins",
main="Detrended data and polynomial-fitted curve",...)
lines(F.V~x,col="red")
for(i in pv$peak){
abline(v=i,col="blue",lty=2)
}
for(i in pv$valley){
abline(v=i,col="green",lty=2)
}
legend("topright",lty=c(1,1,2,2),col=c("black","red","blue","green"),
legend=c("detrended data","fitted value","peak","valley"))
par(ask=ask)
plot(range_11(F.V)~x,type="l",xlab="mins",ylab="scaled expression",col="red",
main="Polynomial-fitted curve and lagged difference",...)
lines(range_11(diff(F.V))~x[-1],col="pink")
abline(0,0,lty="dashed")
legend("topright",lty=c(1,1,2,2),col=c("red","pink","blue","green"),
legend=c("fitted value","lagged diff","peak","valley"))
for(i in pv$peak){
abline(v=i,col="blue",lty=2)
}
for(i in pv$valley){
abline(v=i,col="green",lty=2)
}
if(plot_raw){
plot(raw~time,type="l",xlab="mins",ylab="raw expression",col="black",
main="Raw data without detrending",...)
#lines(range_11(diff(F.V))~x[-1],col="pink")
#abline(0,0,lty="dashed")
legend("topright",lty=c(1,2,2),col=c("black","blue","green"),
legend=c("raw","peak","valley"))
for(i in pv$peak){
abline(v=i,col="blue",lty=2)
}
for(i in pv$valley){
abline(v=i,col="green",lty=2)
}
}
par(ask=F)
}
###########################
#Step 2: pre-processing raw .xlsx files
###########################
raw_dir <- gsub("/$", "", raw_dir)
raw_files <- list.files(raw_dir,pattern = ".xlsx")
for(i in seq_along(raw_files)){
raw_temp <- read_xlsx(file.path(raw_dir,raw_files[i]),sheet = "Results Table",skip = 6)
raw_temp <- raw_temp[,5:(which(colnames(raw_temp)=="Part")-1)]
colnames(raw_temp)[1] <- "#Steps"
tm <- round(raw_temp$`#Steps`,0)
raw_temp[,1] <- paste0(tm%/%60,":",ifelse(tm%%60<10,paste0(0,tm%%60),tm%%60),":00")
write.table(raw_temp,file = paste0(raw_dir,"/",gsub(".xlsx\\>","_raw.txt",raw_files[i])),
quote = F, row.names = F,col.names = T,sep = "\t")
}
##########################
#Step 3: Run bandpass.pl
##########################
bp_file <- list.files(raw_dir,pattern = "_raw.txt")
bp_out <- gsub("_raw.txt","_detrended.txt",bp_file)
for(i in seq_along(bp_file)){
cmd <- paste(bp_script, "<", file.path(raw_dir,bp_file[i]), bw1, bw2, ">", file.path(raw_dir,bp_out[i]), sep = " ")
system(cmd)
}
#########################
#Step 4: Polynomial fitting and final results
#########################
Exp <- list()
Ind <- gsub(pattern = "(.*)_raw.txt" ,replacement ="\\1", x= bp_file)
for(i in seq_along(bp_file)){
raw_dat <- read.delim(file.path(raw_dir,bp_file[i]),sep = "\t",header = T)
detrend_dat <- read.delim(file.path(raw_dir,bp_out[i]),sep = "\t",header = T)
Exp[[i]] <- list(raw=raw_dat,detrended=detrend_dat)
}
names(Exp) <- Ind
for(j in 1:length(Exp)){
###modify data to fit input format
isRD <- is.data.frame(Exp[[j]][[1]])
if(isRD){
raw_temp <- Exp[[j]][[1]]
raw_temp <- dat_modify(raw_temp)
dat_temp <- Exp[[j]][[2]]
dat_temp <- dat_modify(dat_temp)
}else{
dat_temp <- Exp[[j]]
if(!is.factor(dat_temp[,1])){
dat_temp[[1]] <- NULL
}
dat_temp <- dat_modify(dat_temp)
}
###get peak/valley information (saved in `pvpv`)
if(dim(dat_temp)[2]==2){
pvpv <- list()
pvpv[[1]] <- find_peak_valley(dat_temp[,2],time=dat_temp[,1],
degree=25,interval = c(100,dat_temp[0.9*nrow(dat_temp),1]))
names(pvpv) <- colnames(dat_temp)[2]
}else{
pvpv <- apply(dat_temp[,-1],2,find_peak_valley,time=dat_temp[,1],
degree=25,interval = c(100,dat_temp[0.9*nrow(dat_temp),1]))
}
###pre-define empty variables to save peak/value information
Exp_Peak <- matrix(NA,10,length(pvpv))
colnames(Exp_Peak) <- names(pvpv)
Exp_Peakval <- Exp_Peak
Exp_Valley <- matrix(NA,10,length(pvpv))
colnames(Exp_Valley) <- names(pvpv)
Exp_Valleyval <- Exp_Valley
Ave_Period <- numeric(length(pvpv))
names(Ave_Period) <- names(pvpv)
Exp_FV <- matrix(NA,max(dat_temp$mins)-min(dat_temp$mins)+1,length(pvpv)+1)
Exp_FV[,1] <- min(dat_temp$mins):max(dat_temp$mins)
colnames(Exp_FV) <- c("time",names(pvpv))
###extract information from `pvpv`
for(i in 1:length(pvpv)){
p_temp <- pvpv[[i]]$peak
v_temp <- pvpv[[i]]$valley
period_temp <- c(diff(p_temp),diff(v_temp))
###peak position
Exp_Peak[1:length(p_temp),i] <- p_temp
###valley position
Exp_Valley[1:length(v_temp),i] <- v_temp
Ave_Period[i] <- mean(period_temp)
###fitted value of polynomial regression
Exp_FV[,(i+1)] <- pvpv[[i]]$fitted_value
}
for(d1 in 1:10){ #assume no more than 10 peaks(valleys)
###peak values
for(d2 in 1:ncol(Exp_Peakval)){
if(!is.na(Exp_Peak[d1,d2])){
Exp_Peakval[d1,d2] <- Exp_FV[which(Exp_FV[,1]==Exp_Peak[d1,d2]),d2+1]
}
}
###valley values
for(d3 in 1:ncol(Exp_Valleyval)){
if(!is.na(Exp_Valley[d1,d3])){
Exp_Valleyval[d1,d3] <- Exp_FV[which(Exp_FV[,1]==Exp_Valley[d1,d3]),d3+1]
}
}
}
###peak oscillation period
Exp_Peak_Period <- apply(Exp_Peak,2,diff)
rownames(Exp_Peak_Period) <- paste0("Period ",1:nrow(Exp_Peak_Period))
###valley oscillation period
Exp_Valley_Period <- apply(Exp_Valley,2,diff)
rownames(Exp_Valley_Period) <- paste0("Period ",1:nrow(Exp_Valley_Period))
###save results to output directory
dir.create(file.path(output_dir,Ind[j]))
write.table(Exp_Valley,file=paste0(output_dir,"/",Ind[j],"/",Ind[j],"_Valley.csv"),quote = F, na="",col.names = T,row.names = F,sep = ",")
write.table(Exp_Peak,file=paste0(output_dir,"/",Ind[j],"/",Ind[j],"_Peak.csv"),quote = F, na="",col.names = T,row.names = F,sep = ",")
write.table(Exp_Valleyval,file=paste0(output_dir,"/",Ind[j],"/",Ind[j],"_Valleyval.csv"),quote = F, na="",col.names = T,row.names = F,sep = ",")
write.table(Exp_Peakval,file=paste0(output_dir,"/",Ind[j],"/",Ind[j],"_Peakval.csv"),quote = F, na="",col.names = T,row.names = F,sep = ",")
write.table(Exp_Peak_Period,file=paste0(output_dir,"/",Ind[j],"/",Ind[j],"_Peak_Period.csv"),quote = F, na="",col.names = T,row.names = F,sep = ",")
write.table(Exp_Valley_Period,file=paste0(output_dir,"/",Ind[j],"/",Ind[j],"_Valley_Period.csv"),quote = F, na="",col.names = T,row.names = F,sep = ",")
write.table(Exp_FV,file=paste0(output_dir,"/",Ind[j],"/",Ind[j],"_fitted_values.csv"),quote = F, na="",col.names = T,row.names = F,sep = ",")
###generate plot to output directory
fname <- paste0(output_dir,"/",Ind[j],"/",Ind[j],".pdf")
pdf(fname,onefile = T,width = 10,height = 7)
for(i in 1:(ncol(dat_temp)-1)){
if(isRD){
plot_osc(dat_temp[,i+1],dat_temp[,1],plot_raw = T,
raw = raw_temp[,i+1],degree = 25,interval = c(100,dat_temp[0.9*nrow(dat_temp),1]),ask=F,sub=colnames(dat_temp)[i+1])
}else{
plot_osc(dat_temp[,i+1],dat_temp[,1],plot_raw = F,
degree = 25,interval = c(100,dat_temp[0.9*nrow(dat_temp),1]),ask=F,sub=colnames(dat_temp)[i+1])
}
}
dev.off()
}
|
\name{mapPheWAStoExclusions}
\alias{mapPheWAStoExclusions}
\title{
Map PheWAS codes to their exclusions
}
\description{
This function maps phewas codes (optionally with ids for individuals) to a set of PheWAS code exclusions.
}
\usage{
mapPheWAStoExclusions(phewas.codes, ids=NA)
}
\arguments{
\item{phewas.codes}{
A vector of PheWAS codes.
}
\item{ids}{
An optional vector of ids to pair with the provided PheWAS codes.
}
}
\value{
A data frame containing phewas codes and their exclusions. IDs for those codes and exclusions are included if they were supplied.
\item{id}{If ids were provided, the individual ids are included as the first column}
\item{exclusion_criteria}{Input PheWAS codes}
\item{exclusion}{The exclusion PheWAS codes for the codes provided}
}
\author{
Robert Carroll
}
\keyword{ utilities }
|
/man/mapPheWAStoExclusions.Rd
|
no_license
|
laurakwiley/PheWAS
|
R
| false
| false
| 818
|
rd
|
\name{mapPheWAStoExclusions}
\alias{mapPheWAStoExclusions}
\title{
Map PheWAS codes to their exclusions
}
\description{
This function maps phewas codes (optionally with ids for individuals) to a set of PheWAS code exclusions.
}
\usage{
mapPheWAStoExclusions(phewas.codes, ids=NA)
}
\arguments{
\item{phewas.codes}{
A vector of PheWAS codes.
}
\item{ids}{
An optional vector of ids to pair with the provided PheWAS codes.
}
}
\value{
A data frame containing phewas codes and their exclusions. IDs for those codes and exclusions are included if they were supplied.
\item{id}{If ids were provided, the individual ids are included as the first column}
\item{exclusion_criteria}{Input PheWAS codes}
\item{exclusion}{The exclusion PheWAS codes for the codes provided}
}
\author{
Robert Carroll
}
\keyword{ utilities }
|
calculate_infectiousness = function(states,out, JOBID) {
infectiousness_all <- vector("list", length = length(states))
all_data_out <- data.frame()
for (i in 1:length(states)){
state <- states[[i]]
N <- length(dates[[i]])
print(state)
inf <- colMeans(out$infectiousness[,1:N,i])
inf_li <- colQuantiles(out$infectiousness[,1:N,i],prob=.025)
inf_ui <- colQuantiles(out$infectiousness[,1:N,i],prob=.975)
inf_li2 <- colQuantiles(out$infectiousness[,1:N,i],prob=.25)
inf_ui2 <- colQuantiles(out$infectiousness[,1:N,i],prob=.75)
state_data <- data.frame("date" = dates[[i]],
"state" = rep(state, length(dates[[i]])),
"infectioussness" = inf,
"infectioussness_li" = inf_li,
"infectioussness_ui" = inf_ui,
"infectioussness_li2" = inf_li2,
"infectioussness_ui2" = inf_ui2)
infectiousness_all[[i]] <- state_data
state_data_csv <- state_data[,c("date", "state", "infectioussness", "infectioussness_li", "infectioussness_ui")]
colnames(state_data_csv) <- c("date", "state", "mean_infectious_individuals", "infectious_individuals_lower_CI_95", "infectious_individuals_higher_CI_95")
all_data_out <- rbind(all_data_out, state_data_csv)
}
saveRDS(all_data_out, paste0("usa/results/", "infectious-individuals-out-",JOBID,".RDS"))
saveRDS(infectiousness_all, paste0('usa/results/infectiousness_all_', JOBID, '.RDS'))
}
|
/usa/code/utils/calculate-infectiousness.r
|
permissive
|
fvalka/covid19model
|
R
| false
| false
| 1,547
|
r
|
calculate_infectiousness = function(states,out, JOBID) {
infectiousness_all <- vector("list", length = length(states))
all_data_out <- data.frame()
for (i in 1:length(states)){
state <- states[[i]]
N <- length(dates[[i]])
print(state)
inf <- colMeans(out$infectiousness[,1:N,i])
inf_li <- colQuantiles(out$infectiousness[,1:N,i],prob=.025)
inf_ui <- colQuantiles(out$infectiousness[,1:N,i],prob=.975)
inf_li2 <- colQuantiles(out$infectiousness[,1:N,i],prob=.25)
inf_ui2 <- colQuantiles(out$infectiousness[,1:N,i],prob=.75)
state_data <- data.frame("date" = dates[[i]],
"state" = rep(state, length(dates[[i]])),
"infectioussness" = inf,
"infectioussness_li" = inf_li,
"infectioussness_ui" = inf_ui,
"infectioussness_li2" = inf_li2,
"infectioussness_ui2" = inf_ui2)
infectiousness_all[[i]] <- state_data
state_data_csv <- state_data[,c("date", "state", "infectioussness", "infectioussness_li", "infectioussness_ui")]
colnames(state_data_csv) <- c("date", "state", "mean_infectious_individuals", "infectious_individuals_lower_CI_95", "infectious_individuals_higher_CI_95")
all_data_out <- rbind(all_data_out, state_data_csv)
}
saveRDS(all_data_out, paste0("usa/results/", "infectious-individuals-out-",JOBID,".RDS"))
saveRDS(infectiousness_all, paste0('usa/results/infectiousness_all_', JOBID, '.RDS'))
}
|
library(shinydashboard)
library(shinyjs)
library(DT)
header <- dashboardHeader(
title = "Trabajo con imágenes",
titleWidth = 250
)
sidebar <- dashboardSidebar(
# Menú con las pestañas
sidebarMenu(
menuItem("Información de las mamografías", tabName = "info", icon = icon("table")),
menuItem("Limpieza del fondo", tabName = "clean_imgs", icon = icon("image")),
menuItem("Limpieza del músculo pectoral", tabName = "clean_imgs_2", icon = icon("image"))
),
width = 250
)
body <- dashboardBody(
useShinyjs(),
tabItems(
# Primera pestaña: Información detallada de las imágenes
tabItem(
tabName = "info",
# Tabla principal con la información detallada
h2("Información de todas las mamografías"),
fluidRow(
box(
dataTableOutput("info_table", height = 620),
width = 9
),
box(
# Menú con los filtros para la tabla
h3(strong("Filtros"), style = "text-align: center;"),
radioButtons("bg_tissue",
label = "Por tipo de tejido",
choices = c("Todos" = "", levels(info$bg_tissue))
),
radioButtons("abnorm",
label = "Por tipo de anormalidad",
choices = c("Todos" = "", levels(info$abnorm))
),
radioButtons("severity",
label = "Por diagnóstico",
choices = c("Todos" = "", levels(info$severity))
),
width = 3
)
),
# Visualización de cada imagen de forma individual
h2("Información por mamografía"),
fluidRow(
box(
div(
# Selección de la imagen
sliderInput("img_num", label = "Número de imagen",
min = min(img_nums), max = max(img_nums), value = 1, step = 1,
animate = animationOptions(interval = 2000), pre = "Imagen "),
style = "text-align: center;"
),
# Grid con la información de cada imagen y la visualización de la propia imagen
div(
div(
# Información de cada imagen
div(h4(strong("Detalles:")), style = "text-align: center;"),
hr(),
h5(strong("Imagen: ")),
p(textOutput("img_title", inline = TRUE), style = "font-size: 16px; text-align: center;"),
h5(strong("Tipo de tejido: ")),
p(textOutput("img_bg_tissue", inline = TRUE), style = "font-size: 16px; text-align: center;"),
h5(strong("Tipo de anormalidad: ")),
p(textOutput("img_abnorm", inline = TRUE), style = "font-size: 16px; text-align: center;"),
dataTableOutput("img_abnorm_details")
),
# Visualización de la imagen
imageOutput("image", height = "100%"),
style = "display: grid; grid-template-columns: 1fr 1fr; grid-gap: 20px;"
),
width = 12
),
)
),
# Segunda pestaña: Limpieza del fondo de las imágenes
tabItem(
tabName = "clean_imgs",
h2("Resultado de la limpieza replicando el artículo"),
fluidRow(
box(
div(
# Selección de la imagen
sliderInput("clean_img_num", label = "Número de imagen",
min = min(img_nums), max = max(img_nums), value = 1, step = 1,
animate = animationOptions(interval = 2000), pre = "Imagen "),
style = "text-align: center;"
),
# Grid con la información de cada imagen y la visualización de las imágenes limpias
div(
div(
# Información de cada imagen
div(h4(strong("Detalles:")), style = "text-align: center;"),
hr(),
h5(strong("Imagen: ")),
p(textOutput("clean_img_title", inline = TRUE), style = "font-size: 16px; text-align: center;"),
h5(strong("Tipo de tejido: ")),
p(textOutput("clean_img_bg_tissue", inline = TRUE), style = "font-size: 16px; text-align: center;"),
h5(strong("Tipo de anormalidad: ")),
p(textOutput("clean_img_abnorm", inline = TRUE), style = "font-size: 16px; text-align: center;"),
dataTableOutput("clean_img_abnorm_details"),
style = "grid-row: 1 / span 2"
),
# Visualización de las imágenes
div(
div(h4(strong("Imagen original:")), style = "text-align: center;"),
imageOutput("orig_img", height = "100%")
),
div(
div(h4(strong("Imagen limpia:")), style = "text-align: center;"),
imageOutput("clean_img", height = "100%"),
),
div(
div(h4(strong("Imagen binarizada resultante:")), style = "text-align: center;"),
imageOutput("clean_img_bin", height = "100%"),
style = "grid-column: 2 / span 2"
),
style = "display: grid; grid-template: 1fr 1fr / 1fr 1fr 1fr ; grid-gap: 20px;"
),
width = 12
),
)
),
# Tercera pestaña: Limpieza del músculo pectoral
tabItem(
tabName = "clean_imgs_2",
h2("Resultado de la limpieza replicando el artículo"),
fluidRow(
box(
div(
# Selección de la imagen
sliderInput("clean_img_num_2", label = "Número de imagen",
min = min(img_nums), max = max(img_nums), value = 1, step = 1,
animate = animationOptions(interval = 2000), pre = "Imagen "),
style = "text-align: center;"
),
# Grid con la información de cada imagen y la visualización de las imágenes limpias
div(
div(
# Información de cada imagen
div(h4(strong("Detalles:")), style = "text-align: center;"),
hr(),
h5(strong("Imagen: ")),
p(textOutput("clean_img_title_2", inline = TRUE), style = "font-size: 16px; text-align: center;"),
h5(strong("Tipo de tejido: ")),
p(textOutput("clean_img_bg_tissue_2", inline = TRUE), style = "font-size: 16px; text-align: center;"),
h5(strong("Tipo de anormalidad: ")),
p(textOutput("clean_img_abnorm_2", inline = TRUE), style = "font-size: 16px; text-align: center;"),
dataTableOutput("clean_img_abnorm_details_2"),
style = "grid-row: 1 / span 2"
),
# Visualización de las imágenes
div(
div(h4(strong("Imagen original:")), style = "text-align: center;"),
imageOutput("orig_img_2", height = "100%")
),
div(
div(h4(strong("Imagen limpia:")), style = "text-align: center;"),
imageOutput("clean_img_2", height = "100%"),
),
div(
div(h4(strong("Imagen binarizada resultante:")), style = "text-align: center;"),
imageOutput("clean_img_bin_2", height = "100%"),
style = "grid-column: 2 / span 2"
),
style = "display: grid; grid-template: 1fr 1fr / 1fr 1fr 1fr ; grid-gap: 20px;"
),
width = 12
),
)
)
)
)
dashboardPage(
header, sidebar, body,
title = "Trabajo con imágenes",
skin = "purple"
)
|
/app/ui.R
|
permissive
|
data-and-code/trabajo_con_imagenes
|
R
| false
| false
| 7,524
|
r
|
library(shinydashboard)
library(shinyjs)
library(DT)
header <- dashboardHeader(
title = "Trabajo con imágenes",
titleWidth = 250
)
sidebar <- dashboardSidebar(
# Menú con las pestañas
sidebarMenu(
menuItem("Información de las mamografías", tabName = "info", icon = icon("table")),
menuItem("Limpieza del fondo", tabName = "clean_imgs", icon = icon("image")),
menuItem("Limpieza del músculo pectoral", tabName = "clean_imgs_2", icon = icon("image"))
),
width = 250
)
body <- dashboardBody(
useShinyjs(),
tabItems(
# Primera pestaña: Información detallada de las imágenes
tabItem(
tabName = "info",
# Tabla principal con la información detallada
h2("Información de todas las mamografías"),
fluidRow(
box(
dataTableOutput("info_table", height = 620),
width = 9
),
box(
# Menú con los filtros para la tabla
h3(strong("Filtros"), style = "text-align: center;"),
radioButtons("bg_tissue",
label = "Por tipo de tejido",
choices = c("Todos" = "", levels(info$bg_tissue))
),
radioButtons("abnorm",
label = "Por tipo de anormalidad",
choices = c("Todos" = "", levels(info$abnorm))
),
radioButtons("severity",
label = "Por diagnóstico",
choices = c("Todos" = "", levels(info$severity))
),
width = 3
)
),
# Visualización de cada imagen de forma individual
h2("Información por mamografía"),
fluidRow(
box(
div(
# Selección de la imagen
sliderInput("img_num", label = "Número de imagen",
min = min(img_nums), max = max(img_nums), value = 1, step = 1,
animate = animationOptions(interval = 2000), pre = "Imagen "),
style = "text-align: center;"
),
# Grid con la información de cada imagen y la visualización de la propia imagen
div(
div(
# Información de cada imagen
div(h4(strong("Detalles:")), style = "text-align: center;"),
hr(),
h5(strong("Imagen: ")),
p(textOutput("img_title", inline = TRUE), style = "font-size: 16px; text-align: center;"),
h5(strong("Tipo de tejido: ")),
p(textOutput("img_bg_tissue", inline = TRUE), style = "font-size: 16px; text-align: center;"),
h5(strong("Tipo de anormalidad: ")),
p(textOutput("img_abnorm", inline = TRUE), style = "font-size: 16px; text-align: center;"),
dataTableOutput("img_abnorm_details")
),
# Visualización de la imagen
imageOutput("image", height = "100%"),
style = "display: grid; grid-template-columns: 1fr 1fr; grid-gap: 20px;"
),
width = 12
),
)
),
# Segunda pestaña: Limpieza del fondo de las imágenes
tabItem(
tabName = "clean_imgs",
h2("Resultado de la limpieza replicando el artículo"),
fluidRow(
box(
div(
# Selección de la imagen
sliderInput("clean_img_num", label = "Número de imagen",
min = min(img_nums), max = max(img_nums), value = 1, step = 1,
animate = animationOptions(interval = 2000), pre = "Imagen "),
style = "text-align: center;"
),
# Grid con la información de cada imagen y la visualización de las imágenes limpias
div(
div(
# Información de cada imagen
div(h4(strong("Detalles:")), style = "text-align: center;"),
hr(),
h5(strong("Imagen: ")),
p(textOutput("clean_img_title", inline = TRUE), style = "font-size: 16px; text-align: center;"),
h5(strong("Tipo de tejido: ")),
p(textOutput("clean_img_bg_tissue", inline = TRUE), style = "font-size: 16px; text-align: center;"),
h5(strong("Tipo de anormalidad: ")),
p(textOutput("clean_img_abnorm", inline = TRUE), style = "font-size: 16px; text-align: center;"),
dataTableOutput("clean_img_abnorm_details"),
style = "grid-row: 1 / span 2"
),
# Visualización de las imágenes
div(
div(h4(strong("Imagen original:")), style = "text-align: center;"),
imageOutput("orig_img", height = "100%")
),
div(
div(h4(strong("Imagen limpia:")), style = "text-align: center;"),
imageOutput("clean_img", height = "100%"),
),
div(
div(h4(strong("Imagen binarizada resultante:")), style = "text-align: center;"),
imageOutput("clean_img_bin", height = "100%"),
style = "grid-column: 2 / span 2"
),
style = "display: grid; grid-template: 1fr 1fr / 1fr 1fr 1fr ; grid-gap: 20px;"
),
width = 12
),
)
),
# Tercera pestaña: Limpieza del músculo pectoral
tabItem(
tabName = "clean_imgs_2",
h2("Resultado de la limpieza replicando el artículo"),
fluidRow(
box(
div(
# Selección de la imagen
sliderInput("clean_img_num_2", label = "Número de imagen",
min = min(img_nums), max = max(img_nums), value = 1, step = 1,
animate = animationOptions(interval = 2000), pre = "Imagen "),
style = "text-align: center;"
),
# Grid con la información de cada imagen y la visualización de las imágenes limpias
div(
div(
# Información de cada imagen
div(h4(strong("Detalles:")), style = "text-align: center;"),
hr(),
h5(strong("Imagen: ")),
p(textOutput("clean_img_title_2", inline = TRUE), style = "font-size: 16px; text-align: center;"),
h5(strong("Tipo de tejido: ")),
p(textOutput("clean_img_bg_tissue_2", inline = TRUE), style = "font-size: 16px; text-align: center;"),
h5(strong("Tipo de anormalidad: ")),
p(textOutput("clean_img_abnorm_2", inline = TRUE), style = "font-size: 16px; text-align: center;"),
dataTableOutput("clean_img_abnorm_details_2"),
style = "grid-row: 1 / span 2"
),
# Visualización de las imágenes
div(
div(h4(strong("Imagen original:")), style = "text-align: center;"),
imageOutput("orig_img_2", height = "100%")
),
div(
div(h4(strong("Imagen limpia:")), style = "text-align: center;"),
imageOutput("clean_img_2", height = "100%"),
),
div(
div(h4(strong("Imagen binarizada resultante:")), style = "text-align: center;"),
imageOutput("clean_img_bin_2", height = "100%"),
style = "grid-column: 2 / span 2"
),
style = "display: grid; grid-template: 1fr 1fr / 1fr 1fr 1fr ; grid-gap: 20px;"
),
width = 12
),
)
)
)
)
dashboardPage(
header, sidebar, body,
title = "Trabajo con imágenes",
skin = "purple"
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Timing_general.R
\name{CreateTimeunits}
\alias{CreateTimeunits}
\title{Creates time unit}
\usage{
CreateTimeunits(starttime)
}
\description{
Creates time unit
}
|
/man/CreateTimeunits.Rd
|
no_license
|
winggy/FluxnetLSM
|
R
| false
| true
| 240
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Timing_general.R
\name{CreateTimeunits}
\alias{CreateTimeunits}
\title{Creates time unit}
\usage{
CreateTimeunits(starttime)
}
\description{
Creates time unit
}
|
library(targets)
library(tarchetypes)
future::plan(future::multisession)
purrr::walk(fs::dir_ls("R"), source)
tar_option_set(packages = c("tidyverse", "tidygraph", "ggraph"))
list(
tar_file(file_data, "data/data.xlsx"),
tar_target(sheet_names, readxl::excel_sheets(file_data)),
tar_target(
data,
read_data(file_data, sheet_names),
pattern = map(sheet_names)
),
tar_target(data_clean, wrangle_data(data)),
tar_target(dist_euc, calc_distances(data_clean, "euclidean")),
tar_target(mean_dist_euc, mean_distances(dist_euc)),
tar_target(graphs_euc, create_graph(dist_euc, type, SID, Time)),
tar_target(mean_graphs_euc, create_graph(mean_dist_euc, type, Time)),
tar_target(dist_chi, calc_distances(data_clean, "chisq")),
tar_target(mean_dist_chi, mean_distances(dist_chi)),
tar_target(graphs_chi, create_graph(dist_chi, type, SID, Time)),
tar_target(mean_graphs_chi, create_graph(mean_dist_chi, type, Time)),
tar_file(file_child_viz_graph, "archetypes/child_vis_graph.Rmd"),
tar_file(file_child_statistics, "archetypes/child_statistics.Rmd"),
tar_render(
output_analysis_notes,
"analysis/notes.Rmd",
output_dir = "output"
),
tar_file(
file_dist_mean_euc,
output_xlsx(mean_dist_euc, "output/data_mean_euc.xlsx")
),
tar_file(
file_dist_each_euc,
output_xlsx(dist_euc, "output/data_euc.xlsx", by = "SID")
),
tar_file(
file_dist_mean_chi,
output_xlsx(mean_dist_chi, "output/data_mean_chi.xlsx")
),
tar_file(
file_dist_each_chi,
output_xlsx(dist_chi, "output/data_chi.xlsx", by = "SID")
)
)
|
/_targets.R
|
no_license
|
psychelzh/linguistic
|
R
| false
| false
| 1,586
|
r
|
library(targets)
library(tarchetypes)
future::plan(future::multisession)
purrr::walk(fs::dir_ls("R"), source)
tar_option_set(packages = c("tidyverse", "tidygraph", "ggraph"))
list(
tar_file(file_data, "data/data.xlsx"),
tar_target(sheet_names, readxl::excel_sheets(file_data)),
tar_target(
data,
read_data(file_data, sheet_names),
pattern = map(sheet_names)
),
tar_target(data_clean, wrangle_data(data)),
tar_target(dist_euc, calc_distances(data_clean, "euclidean")),
tar_target(mean_dist_euc, mean_distances(dist_euc)),
tar_target(graphs_euc, create_graph(dist_euc, type, SID, Time)),
tar_target(mean_graphs_euc, create_graph(mean_dist_euc, type, Time)),
tar_target(dist_chi, calc_distances(data_clean, "chisq")),
tar_target(mean_dist_chi, mean_distances(dist_chi)),
tar_target(graphs_chi, create_graph(dist_chi, type, SID, Time)),
tar_target(mean_graphs_chi, create_graph(mean_dist_chi, type, Time)),
tar_file(file_child_viz_graph, "archetypes/child_vis_graph.Rmd"),
tar_file(file_child_statistics, "archetypes/child_statistics.Rmd"),
tar_render(
output_analysis_notes,
"analysis/notes.Rmd",
output_dir = "output"
),
tar_file(
file_dist_mean_euc,
output_xlsx(mean_dist_euc, "output/data_mean_euc.xlsx")
),
tar_file(
file_dist_each_euc,
output_xlsx(dist_euc, "output/data_euc.xlsx", by = "SID")
),
tar_file(
file_dist_mean_chi,
output_xlsx(mean_dist_chi, "output/data_mean_chi.xlsx")
),
tar_file(
file_dist_each_chi,
output_xlsx(dist_chi, "output/data_chi.xlsx", by = "SID")
)
)
|
#' @title The WeStCOMS mesh around a sample of elements in the Oban area
#' @description An unstructured grid surrounding elements (i.e. based on nodes) for a subset of the WeStCOMS mesh in an area around Oban (see \code{\link[fvcom.tbx]{dat_area_boundaries}}).
#'
#' @format A SpatialPolygonsDataFrame (see \code{\link[sp]{SpatialPolygonsDataFrame-class}}) with 1307 features (i.e. cells).
#'
#' @source The WeStCOMS mesh was designed by Dmitry Aleynik.
"dat_mesh_around_elements"
|
/R/dat_mesh_around_elements.R
|
no_license
|
han-tun/fvcom.tbx
|
R
| false
| false
| 482
|
r
|
#' @title The WeStCOMS mesh around a sample of elements in the Oban area
#' @description An unstructured grid surrounding elements (i.e. based on nodes) for a subset of the WeStCOMS mesh in an area around Oban (see \code{\link[fvcom.tbx]{dat_area_boundaries}}).
#'
#' @format A SpatialPolygonsDataFrame (see \code{\link[sp]{SpatialPolygonsDataFrame-class}}) with 1307 features (i.e. cells).
#'
#' @source The WeStCOMS mesh was designed by Dmitry Aleynik.
"dat_mesh_around_elements"
|
# fastmetrics: Performance metrics ported from scikit-learn
# Copyright (C) 2013 Sean Whalen
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
trapz <- function(x, y, reorder = F) {
if (reorder) {
sorted_indices <- order(x, y)
x <- x[sorted_indices]
y <- y[sorted_indices]
}
widths <- diff(x)
heights <- head(y, -1) + tail(y, -1)
direction <- ifelse(all(widths <= 0), -1, 1)
direction * as.numeric(widths %*% heights) / 2
}
binary_clf_curve <- function(y_true, y_score, pos_label = NULL) {
# ensure binary classification if pos_label is not specified
stopifnot(length(unique(y_true)) == 2)
if (is.null(pos_label)) {
pos_label <- 1
}
# make y_true a boolean vector
y_true <- y_true == pos_label
# sort scores and corresponding truth values
desc_score_indices <- rev(order(y_score))
y_score <- y_score[desc_score_indices]
y_true <- y_true[desc_score_indices]
# y_score typically has many tied values. here we extract
# the indices associated with the distinct values. we also
# concatenate a value for the end of the curve.
distinct_value_indices <- which(diff(y_score) != 0)
threshold_idxs <- c(distinct_value_indices, length(y_true))
# accumulate the true positives with decreasing threshold
tps <- cumsum(y_true)[threshold_idxs]
fps <- threshold_idxs - tps
list(fps, tps, y_score[threshold_idxs])
}
roc_curve <- function(y_true, y_score, pos_label = NULL) {
points <- binary_clf_curve(y_true, y_score, pos_label)
fps <- points[[1]]
tps <- points[[2]]
thresholds <- points[[3]]
# add an extra threshold position if necessary
if (length(tps) == 0 || fps[1] != 0) {
tps <- c(0, tps)
fps <- c(0, fps)
thresholds <- c(head(thresholds, 1) + 1, thresholds)
}
fpr <- fps / tail(fps, 1)
tpr <- tps / tail(tps, 1)
list(fpr, tpr, thresholds)
}
precision_recall_curve <- function(y_true, y_score, pos_label = NULL) {
points <- binary_clf_curve(y_true, y_score, pos_label)
fps <- points[[1]]
tps <- points[[2]]
thresholds <- points[[3]]
precision <- tps / (tps + fps)
recall <- tps / tail(tps, 1)
# TODO: this code leads to incorrect aupr, commented out for now, unit test will fail in the meantime
# stop when full recall attained
# and reverse the outputs so recall is decreasing
# last_ind <- which(tps == tail(tps, 1))[1]
# sl <- last_ind:1
# precision <- c(precision[sl], 1)
# recall <- c(recall[sl], 0)
# thresholds <- thresholds[sl]
list(precision, recall, thresholds)
}
average_precision_score <- function(y_true, y_score, pos_label = NULL) {
points <- precision_recall_curve(y_true, y_score, pos_label)
trapz(points[[2]], points[[1]], reorder = F)
}
roc_auc_score <- function(y_true, y_score, pos_label = NULL) {
points <- roc_curve(y_true, y_score, pos_label)
trapz(points[[1]], points[[2]], reorder = T)
}
fmax_score <- function(y_true, y_score, beta = 1.0, pos_label = NULL) {
# Radivojac, P. et al. (2013). A Large-Scale Evaluation of Computational Protein Function Prediction. Nature Methods, 10(3), 221–227.
# Manning, C. D. et al. (2008). Evaluation in Information Retrieval. In Introduction to Information Retrieval. Cambridge University Press.
points <- precision_recall_curve(y_true, y_score, pos_label)
precision <- points[[1]]
recall <- points[[2]]
f1 <- (1 + beta**2) * (precision * recall) / ((beta**2 * precision) + recall)
max(f1[complete.cases(f1)])
}
test_trapz <- function() {
stopifnot(
trapz(c(0, 1), c(0, 1)) == 0.5
)
stopifnot(
trapz(c(1, 0), c(0, 1)) == 0.5
)
stopifnot(
trapz(c(1, 0, 0), c(0, 1, 1)) == 0.5
)
stopifnot(
trapz(c(0, 1), c(1, 1)) == 1.0
)
stopifnot(
trapz(c(0, 0.5, 1), c(0, 0.5, 1)) == 0.5
)
stopifnot( # test duplicate values
trapz(c(-2.0, 0.0, 0.0, 0.0, 1.0), c(2.0, 0.0, 0.5, 1.0, 1.0), reorder = T) == 3
)
stopifnot( # test duplicate values
trapz(c(-2.0, 0.0, 0.0, 0.0, 1.0), c(2.0, 1.0, 0.0, 0.5, 1.0), reorder = T) == 3
)
stopifnot( # test duplicate values
trapz(c(-2.0, 0.0, 0.0, 0.0, 1.0), c(2.0, 1.0, 0.5, 0.0, 1.0), reorder = T) == 3
)
}
test_precision_recall_curve <- function() {
expected_precision <- c(1/2., 1/3., 1/2., 1., 1.)
expected_recall <- c(1., 1/2., 1/2., 1/2., 0.)
expected_thresholds <- c(1, 2, 3, 4)
points <- precision_recall_curve(c(1, 0, 0, 1), c(1, 2, 3, 4))
precision <- points[[1]]
recall <- points[[2]]
thresholds <- points[[3]]
stopifnot(
sum(precision - expected_precision) < 1e-7
)
stopifnot(
sum(recall - expected_recall) < 1e-7
)
stopifnot(
sum(thresholds - expected_thresholds) < 1e-7
)
}
test_roc_curve_end_points <- function() {
set.seed(0)
y_true <- c(rep(0, 50), rep(1, 50))
y_score <- sample(0:2, 100, replace = T)
points <- roc_curve(y_true, y_score)
fpr <- points[[1]]
tpr <- points[[2]]
thresholds <- points[[3]]
stopifnot(
head(fpr, 1) == 0
)
stopifnot(
tail(fpr, 1) == 1
)
stopifnot(
length(fpr) == length(tpr)
)
stopifnot(
length(fpr) == length(thresholds)
)
}
test_average_precision_score <- function() {
stopifnot( # test best
average_precision_score(c(0, 1), c(0.0, 1.0)) == 1.0
)
stopifnot( # test worst
average_precision_score(c(1, 0), c(0.0, 1.0)) == 0.25
)
stopifnot( # test alternate labels
average_precision_score(c('z', 'z', 'a', 'a'), c(0.1, 0.4, 0.35, 0.8), pos_label = 'a') - 0.7916667 < 1e-7
)
stopifnot( # test random
average_precision_score(c(1, 1, 1, 1, 0), c(0.025, 0.469, 0.418, 0.288, 0.032)) - 0.94374 < 1e-5
)
stopifnot( # test duplicate values
average_precision_score(c(0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1), c(0, .1, .1, .4, .5, .6, .6, .9, .9, 1, 1)) == 1
)
stopifnot( # test ties
average_precision_score(c(0, 1, 1), c(.5, .5, .6)) != 1
)
}
test_roc_auc_score <- function() {
stopifnot( # test best
roc_auc_score(c(0, 1), c(0.0, 1.0)) == 1.0
)
stopifnot( # test worst
roc_auc_score(c(1, 0), c(0.0, 1.0)) == 0.0
)
stopifnot( # test alternate labels
roc_auc_score(c('z', 'z', 'a', 'a'), c(0.1, 0.4, 0.35, 0.8), pos_label = 'a') == 0.75
)
stopifnot( # test random
roc_auc_score(c(0, 1, 0, 1, 1), c(0.025, 0.469, 0.418, 0.288, 0.032)) - 2/3. < 1e-7
)
}
|
/R/fastmetrics.R
|
no_license
|
Web5design/fastmetrics
|
R
| false
| false
| 7,459
|
r
|
# fastmetrics: Performance metrics ported from scikit-learn
# Copyright (C) 2013 Sean Whalen
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
trapz <- function(x, y, reorder = F) {
if (reorder) {
sorted_indices <- order(x, y)
x <- x[sorted_indices]
y <- y[sorted_indices]
}
widths <- diff(x)
heights <- head(y, -1) + tail(y, -1)
direction <- ifelse(all(widths <= 0), -1, 1)
direction * as.numeric(widths %*% heights) / 2
}
binary_clf_curve <- function(y_true, y_score, pos_label = NULL) {
# ensure binary classification if pos_label is not specified
stopifnot(length(unique(y_true)) == 2)
if (is.null(pos_label)) {
pos_label <- 1
}
# make y_true a boolean vector
y_true <- y_true == pos_label
# sort scores and corresponding truth values
desc_score_indices <- rev(order(y_score))
y_score <- y_score[desc_score_indices]
y_true <- y_true[desc_score_indices]
# y_score typically has many tied values. here we extract
# the indices associated with the distinct values. we also
# concatenate a value for the end of the curve.
distinct_value_indices <- which(diff(y_score) != 0)
threshold_idxs <- c(distinct_value_indices, length(y_true))
# accumulate the true positives with decreasing threshold
tps <- cumsum(y_true)[threshold_idxs]
fps <- threshold_idxs - tps
list(fps, tps, y_score[threshold_idxs])
}
roc_curve <- function(y_true, y_score, pos_label = NULL) {
points <- binary_clf_curve(y_true, y_score, pos_label)
fps <- points[[1]]
tps <- points[[2]]
thresholds <- points[[3]]
# add an extra threshold position if necessary
if (length(tps) == 0 || fps[1] != 0) {
tps <- c(0, tps)
fps <- c(0, fps)
thresholds <- c(head(thresholds, 1) + 1, thresholds)
}
fpr <- fps / tail(fps, 1)
tpr <- tps / tail(tps, 1)
list(fpr, tpr, thresholds)
}
precision_recall_curve <- function(y_true, y_score, pos_label = NULL) {
points <- binary_clf_curve(y_true, y_score, pos_label)
fps <- points[[1]]
tps <- points[[2]]
thresholds <- points[[3]]
precision <- tps / (tps + fps)
recall <- tps / tail(tps, 1)
# TODO: this code leads to incorrect aupr, commented out for now, unit test will fail in the meantime
# stop when full recall attained
# and reverse the outputs so recall is decreasing
# last_ind <- which(tps == tail(tps, 1))[1]
# sl <- last_ind:1
# precision <- c(precision[sl], 1)
# recall <- c(recall[sl], 0)
# thresholds <- thresholds[sl]
list(precision, recall, thresholds)
}
average_precision_score <- function(y_true, y_score, pos_label = NULL) {
points <- precision_recall_curve(y_true, y_score, pos_label)
trapz(points[[2]], points[[1]], reorder = F)
}
roc_auc_score <- function(y_true, y_score, pos_label = NULL) {
points <- roc_curve(y_true, y_score, pos_label)
trapz(points[[1]], points[[2]], reorder = T)
}
fmax_score <- function(y_true, y_score, beta = 1.0, pos_label = NULL) {
# Radivojac, P. et al. (2013). A Large-Scale Evaluation of Computational Protein Function Prediction. Nature Methods, 10(3), 221–227.
# Manning, C. D. et al. (2008). Evaluation in Information Retrieval. In Introduction to Information Retrieval. Cambridge University Press.
points <- precision_recall_curve(y_true, y_score, pos_label)
precision <- points[[1]]
recall <- points[[2]]
f1 <- (1 + beta**2) * (precision * recall) / ((beta**2 * precision) + recall)
max(f1[complete.cases(f1)])
}
test_trapz <- function() {
stopifnot(
trapz(c(0, 1), c(0, 1)) == 0.5
)
stopifnot(
trapz(c(1, 0), c(0, 1)) == 0.5
)
stopifnot(
trapz(c(1, 0, 0), c(0, 1, 1)) == 0.5
)
stopifnot(
trapz(c(0, 1), c(1, 1)) == 1.0
)
stopifnot(
trapz(c(0, 0.5, 1), c(0, 0.5, 1)) == 0.5
)
stopifnot( # test duplicate values
trapz(c(-2.0, 0.0, 0.0, 0.0, 1.0), c(2.0, 0.0, 0.5, 1.0, 1.0), reorder = T) == 3
)
stopifnot( # test duplicate values
trapz(c(-2.0, 0.0, 0.0, 0.0, 1.0), c(2.0, 1.0, 0.0, 0.5, 1.0), reorder = T) == 3
)
stopifnot( # test duplicate values
trapz(c(-2.0, 0.0, 0.0, 0.0, 1.0), c(2.0, 1.0, 0.5, 0.0, 1.0), reorder = T) == 3
)
}
test_precision_recall_curve <- function() {
expected_precision <- c(1/2., 1/3., 1/2., 1., 1.)
expected_recall <- c(1., 1/2., 1/2., 1/2., 0.)
expected_thresholds <- c(1, 2, 3, 4)
points <- precision_recall_curve(c(1, 0, 0, 1), c(1, 2, 3, 4))
precision <- points[[1]]
recall <- points[[2]]
thresholds <- points[[3]]
stopifnot(
sum(precision - expected_precision) < 1e-7
)
stopifnot(
sum(recall - expected_recall) < 1e-7
)
stopifnot(
sum(thresholds - expected_thresholds) < 1e-7
)
}
test_roc_curve_end_points <- function() {
set.seed(0)
y_true <- c(rep(0, 50), rep(1, 50))
y_score <- sample(0:2, 100, replace = T)
points <- roc_curve(y_true, y_score)
fpr <- points[[1]]
tpr <- points[[2]]
thresholds <- points[[3]]
stopifnot(
head(fpr, 1) == 0
)
stopifnot(
tail(fpr, 1) == 1
)
stopifnot(
length(fpr) == length(tpr)
)
stopifnot(
length(fpr) == length(thresholds)
)
}
test_average_precision_score <- function() {
stopifnot( # test best
average_precision_score(c(0, 1), c(0.0, 1.0)) == 1.0
)
stopifnot( # test worst
average_precision_score(c(1, 0), c(0.0, 1.0)) == 0.25
)
stopifnot( # test alternate labels
average_precision_score(c('z', 'z', 'a', 'a'), c(0.1, 0.4, 0.35, 0.8), pos_label = 'a') - 0.7916667 < 1e-7
)
stopifnot( # test random
average_precision_score(c(1, 1, 1, 1, 0), c(0.025, 0.469, 0.418, 0.288, 0.032)) - 0.94374 < 1e-5
)
stopifnot( # test duplicate values
average_precision_score(c(0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1), c(0, .1, .1, .4, .5, .6, .6, .9, .9, 1, 1)) == 1
)
stopifnot( # test ties
average_precision_score(c(0, 1, 1), c(.5, .5, .6)) != 1
)
}
test_roc_auc_score <- function() {
stopifnot( # test best
roc_auc_score(c(0, 1), c(0.0, 1.0)) == 1.0
)
stopifnot( # test worst
roc_auc_score(c(1, 0), c(0.0, 1.0)) == 0.0
)
stopifnot( # test alternate labels
roc_auc_score(c('z', 'z', 'a', 'a'), c(0.1, 0.4, 0.35, 0.8), pos_label = 'a') == 0.75
)
stopifnot( # test random
roc_auc_score(c(0, 1, 0, 1, 1), c(0.025, 0.469, 0.418, 0.288, 0.032)) - 2/3. < 1e-7
)
}
|
testlist <- list(A = structure(c(2.31584178474648e+77, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0), .Dim = c(1L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
/multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613112171-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 257
|
r
|
testlist <- list(A = structure(c(2.31584178474648e+77, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0), .Dim = c(1L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(shinythemes)
library(SimSpin)
library(shinycssloaders)
library(plot3D)
options(shiny.maxRequestSize = 100*1024^2)
shinyUI(
fluidPage(
titlePanel('', windowTitle = 'SimSpin'),
tags$head(tags$link(rel="shortcut icon", href="favicon.ico"),
tags$link(rel="apple-touch-icon", href="apple_icon.png", sizes="158x158")
),
tags$style(type="text/css", "body { overflow-y: scroll; }"),
div(class = " titleContainer", img(src="simspin.gif",class="headerImage",style="width:100%")),
div(class="row marginRow"),
div (id="navAlign",
navbarPage(" ", theme = shinytheme("simplex"), fluid = TRUE, collapsible = TRUE,
tabPanel("Analyse",
# Sidebar with a file input for simulation to be analysed
sidebarLayout(
sidebarPanel(
checkboxInput("example_file", label = "Use SimSpin example file?", value=TRUE),
fileInput("sim_file", label = "Or upload simulation file:", multiple = FALSE, buttonLabel = "Browse...",
placeholder = "No file selected"),
checkboxGroupInput("ptype", label = "Particle types:",
choiceNames = c("Dark Matter", "Disc", "Bulge"),
choiceValues = c(1, 2, 3), selected = c(2,3)),
selectInput("DM_profile", label = "Dark Matter Profile:", choices = c("Hernquist", "NFW", "None")),
# only show this panel if DM_profile == Hernquist
conditionalPanel(
condition = "input.DM_profile == 'Hernquist'",
numericInput("DM_mass", label = "Mass:", value = 185.966),
numericInput("DM_a", label = "Scale radius:", value = 34.5)
),
# only show this panel if DM_profile == NFW
conditionalPanel(
condition = "input.DM_profile == 'NFW'",
numericInput("DM_vm", label = "Virial mass:", value = 185.966),
numericInput("DM_a", label = "Scale radius:", value = 34.5),
numericInput("DM_rhof", label = "Density at flattening radius:", value = 0.00035)
),
selectInput("bin_type", label = "Segementation:", choices = c("r", "cr", "z"), selected = "r",
multiple = FALSE),
sliderInput("rmax", label = "Maximum radius:", min = 1, max = 500, value = 200, step = 1),
sliderInput("rbin", label = "Number of segments:", min = 1, max = 1000, value = 200, step = 1),
actionButton("submit_1", label = "Go!")
),
mainPanel(
tabsetPanel(
tabPanel("Segments",
h3(htmlOutput("segments_title")),
withSpinner(plotOutput("simulation_segments"), type = 5, color="#581845"),
htmlOutput("user_specs")
),
tabPanel("Kinematics",
h3(htmlOutput("logp_title")),
withSpinner(plotOutput("logp_dist"), type = 5, color="#FF5733"),
h3(htmlOutput("vrot_title")),
withSpinner(plotOutput("vrot_dist"), type = 5, color="#FFC300"),
h5(htmlOutput("vrot_beware"))
),
tabPanel("Summary",
h3(htmlOutput("kin_title")),
withSpinner(tableOutput("kinematics"), type = 5, color="#C70039"),
h5(htmlOutput("kin_beware")))
))
)
),
tabPanel("Cube",
sidebarLayout(
sidebarPanel(
tags$head(tags$script('$(document).on("shiny:connected", function(e) {
Shiny.onInputChange("innerWidth", window.innerWidth);
});
$(window).resize(function(e) {
Shiny.onInputChange("innerWidth", window.innerWidth);
});
')),
checkboxInput("example_file_2", label = "Use SimSpin example file?", value=TRUE),
fileInput("sim_file_2", label = "Or upload simulation file:", multiple = FALSE, buttonLabel = "Browse...",
placeholder = "No file selected"),
checkboxGroupInput("ptype_2", label = "Particle types:",
choiceNames = c("Disc", "Bulge"),
choiceValues = c(2, 3), selected = c(2,3)),
#numericInput("r200", label = "Virial radius:", value = 200),
sliderInput("z", label = "Redshift:", min = 0.01, max = 0.1, value = 0.06),
selectInput("survey", label = "Survey:", choices = c("SAMI", "MaNGA", "CALIFA", "Hector", "Specified")),
# only show this panel if survey == "Specified"
conditionalPanel(
condition = "input.survey == 'Specified'",
numericInput("fov", label = "IFU field of view:", value = 15),
selectInput("ap_shape", label = "Aperture shape:", choices = c("circular", "square", "hexagonal")),
numericInput("central_wvl", label = HTML("Central filter wavelength / Å :"), value = 4800),
numericInput("lsf_fwhm", label = "Line spread function:", value = 2.65),
sliderInput("pixel_sscale", label = "Spaxel scale / '' :", min = 0.25, max = 2, value = 0.5, step = 0.01),
sliderInput("pixel_vscale", label = HTML("Voxel scale / Å :"), min = 0.5, max = 2, value = 1.04, step = 0.01),
numericInput("threshold", label = "Magnitude limit:", value = 25)
),
sliderInput("inc_deg", label = "Inclination:", min = 0, max = 90, value = 90, step = 1),
numericInput("m2l_disc", label = "Disc mass-to-light ratio:", value = 2),
numericInput("m2l_bulge", label = "Bulge mass-to-light ratio:", value = 1),
checkboxInput("blur", label = "Blur?", value = FALSE),
# only show this panel if DM_profile == NFW
conditionalPanel(
condition = "input.blur == true",
selectInput("psf", label = "PSF shape:", choices = c("Moffat", "Gaussian")),
sliderInput("fwhm", label = "FWHM:", min = 0, max = 5, value = 0.5, step = 0.5)
),
actionButton("submit_2", label = "Go!")
),
mainPanel(
h3(htmlOutput("bc_title")),
withSpinner(plotOutput("datacube", height = "100%"), type = 5, color="#581845"),
h3(htmlOutput("build_sum_title")),
withSpinner(tableOutput("build_summary"), type = 5, color="#FF5733")
)
)
),
tabPanel("Observe",
sidebarLayout(
sidebarPanel(
tags$head(tags$script('$(document).on("shiny:connected", function(e) {
Shiny.onInputChange("innerWidth", window.innerWidth);
});
$(window).resize(function(e) {
Shiny.onInputChange("innerWidth", window.innerWidth);
});
')),
checkboxInput("example_file_3", label = "Use SimSpin example file?", value=TRUE),
fileInput("sim_file_3", label = "Or upload simulation file:", multiple = FALSE, buttonLabel = "Browse...",
placeholder = "No file selected"),
checkboxGroupInput("ptype_3", label = "Particle types:",
choiceNames = c("Disc", "Bulge"),
choiceValues = c(2, 3), selected = c(2,3)),
#numericInput("r200_2", label = "Virial radius:", value = 200),
sliderInput("z_2", label = "Redshift:", min = 0.01, max = 0.1, value = 0.06),
selectInput("survey_2", label = "Survey:", choices = c("SAMI", "MaNGA", "CALIFA", "Hector", "Specified")),
# only show this panel if survey == "Specified"
conditionalPanel(
condition = "input.survey_2 == 'Specified'",
numericInput("fov_2", label = "IFU field of view:", value = 15),
selectInput("ap_shape_2", label = "Aperture shape:", choices = c("circular", "square", "hexagonal")),
numericInput("central_wvl_2", label = HTML("Central filter wavelength / Å :"), value = 4800),
numericInput("lsf_fwhm_2", label = "Line spread function:", value = 2.65),
sliderInput("pixel_sscale_2", label = "Spaxel scale / '' :", min = 0.25, max = 2, value = 0.5, step = 0.01),
sliderInput("pixel_vscale_2", label = HTML("Voxel scale / Å :"), min = 0.5, max = 2, value = 1.04, step = 0.01),
numericInput("threshold_2", label = "Magnitude limit:", value = 25)
),
sliderInput("inc_deg_2", label = "Inclination:", min = 0, max = 90, value = 90, step = 1),
numericInput("m2l_disc_2", label = "Disc mass-to-light ratio:", value = 2),
numericInput("m2l_bulge_2", label = "Bulge mass-to-light ratio:", value = 1),
checkboxInput("blur_2", label = "Blur?", value = FALSE),
# only show this panel if DM_profile == NFW
conditionalPanel(
condition = "input.blur_2 == true",
selectInput("psf_2", label = "PSF shape:", choices = c("Moffat", "Gaussian")),
sliderInput("fwhm_2", label = "FWHM:", min = 0, max = 5, value = 0.5, step = 0.5)
),
selectInput("measure_type", label = "Measurment radius details:", choices = c("Fit", "Specified", "Fixed")),
conditionalPanel(
condition = "input.measure_type == 'Fit'",
sliderInput("fac", label = HTML("Factor / R<sub>eff</sub>:"), min = 0, max = 5, value = 1, step = 0.1)
),
conditionalPanel(
condition = "input.measure_type == 'Specified'",
sliderInput("fract", label = HTML("Fraction of mass included:"), min = 0, max = 1, value = 0.5, step = 0.1),
numericInput("ar_a", label = "Semi-major, a / kpc", value = 2),
numericInput("ar_b", label = "Semi-minor, b / kpc", value = 1)
),
conditionalPanel(
condition = "input.measure_type == 'Fixed'",
sliderInput("fac", label = HTML("Factor / R<sub>eff</sub>:"), min = 0, max = 5, value = 1, step = 0.1),
numericInput("ar_a", label = "Semi-major, a / kpc", value = 2),
numericInput("ar_b", label = "Semi-minor, b / kpc", value = 1)
),
actionButton("submit_3", label = "Go!")
),
mainPanel(
tabsetPanel(
tabPanel("Flux",
h3(htmlOutput("fl_flux_title")),
withSpinner(plotOutput("fl_flux_plot", height = "100%"), type = 5, color="#FFC300")
),
tabPanel("Velocity",
h3(htmlOutput("fl_vel_title")),
withSpinner(plotOutput("fl_vel_plot", height = "100%"), type = 5, color="#C70039")
),
tabPanel("Dispersion",
h3(htmlOutput("fl_dis_title")),
withSpinner(plotOutput("fl_dis_plot", height = "100%"), type = 5, color="#581845")
),
tabPanel("Summary",
h3(htmlOutput("find_sum_title")),
withSpinner(tableOutput("find_summary"), type = 5, color="#FF5733")
)
)
)
)
),
tabPanel("Contact",
h3(htmlOutput("contact_info_header")),
htmlOutput("contact_info_1"),
a(actionButton(inputId = "email1", label = "Contact Admin",
icon = icon("envelope", lib = "font-awesome")),
href="mailto:katherine.harborne@icrar.org?subject=SimSpin Web-app Issue"),
htmlOutput("contact_info_2"))
)
))
)
|
/ui.R
|
no_license
|
kateharborne/SimSpin_app
|
R
| false
| false
| 12,834
|
r
|
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(shinythemes)
library(SimSpin)
library(shinycssloaders)
library(plot3D)
options(shiny.maxRequestSize = 100*1024^2)
shinyUI(
fluidPage(
titlePanel('', windowTitle = 'SimSpin'),
tags$head(tags$link(rel="shortcut icon", href="favicon.ico"),
tags$link(rel="apple-touch-icon", href="apple_icon.png", sizes="158x158")
),
tags$style(type="text/css", "body { overflow-y: scroll; }"),
div(class = " titleContainer", img(src="simspin.gif",class="headerImage",style="width:100%")),
div(class="row marginRow"),
div (id="navAlign",
navbarPage(" ", theme = shinytheme("simplex"), fluid = TRUE, collapsible = TRUE,
tabPanel("Analyse",
# Sidebar with a file input for simulation to be analysed
sidebarLayout(
sidebarPanel(
checkboxInput("example_file", label = "Use SimSpin example file?", value=TRUE),
fileInput("sim_file", label = "Or upload simulation file:", multiple = FALSE, buttonLabel = "Browse...",
placeholder = "No file selected"),
checkboxGroupInput("ptype", label = "Particle types:",
choiceNames = c("Dark Matter", "Disc", "Bulge"),
choiceValues = c(1, 2, 3), selected = c(2,3)),
selectInput("DM_profile", label = "Dark Matter Profile:", choices = c("Hernquist", "NFW", "None")),
# only show this panel if DM_profile == Hernquist
conditionalPanel(
condition = "input.DM_profile == 'Hernquist'",
numericInput("DM_mass", label = "Mass:", value = 185.966),
numericInput("DM_a", label = "Scale radius:", value = 34.5)
),
# only show this panel if DM_profile == NFW
conditionalPanel(
condition = "input.DM_profile == 'NFW'",
numericInput("DM_vm", label = "Virial mass:", value = 185.966),
numericInput("DM_a", label = "Scale radius:", value = 34.5),
numericInput("DM_rhof", label = "Density at flattening radius:", value = 0.00035)
),
selectInput("bin_type", label = "Segementation:", choices = c("r", "cr", "z"), selected = "r",
multiple = FALSE),
sliderInput("rmax", label = "Maximum radius:", min = 1, max = 500, value = 200, step = 1),
sliderInput("rbin", label = "Number of segments:", min = 1, max = 1000, value = 200, step = 1),
actionButton("submit_1", label = "Go!")
),
mainPanel(
tabsetPanel(
tabPanel("Segments",
h3(htmlOutput("segments_title")),
withSpinner(plotOutput("simulation_segments"), type = 5, color="#581845"),
htmlOutput("user_specs")
),
tabPanel("Kinematics",
h3(htmlOutput("logp_title")),
withSpinner(plotOutput("logp_dist"), type = 5, color="#FF5733"),
h3(htmlOutput("vrot_title")),
withSpinner(plotOutput("vrot_dist"), type = 5, color="#FFC300"),
h5(htmlOutput("vrot_beware"))
),
tabPanel("Summary",
h3(htmlOutput("kin_title")),
withSpinner(tableOutput("kinematics"), type = 5, color="#C70039"),
h5(htmlOutput("kin_beware")))
))
)
),
tabPanel("Cube",
sidebarLayout(
sidebarPanel(
tags$head(tags$script('$(document).on("shiny:connected", function(e) {
Shiny.onInputChange("innerWidth", window.innerWidth);
});
$(window).resize(function(e) {
Shiny.onInputChange("innerWidth", window.innerWidth);
});
')),
checkboxInput("example_file_2", label = "Use SimSpin example file?", value=TRUE),
fileInput("sim_file_2", label = "Or upload simulation file:", multiple = FALSE, buttonLabel = "Browse...",
placeholder = "No file selected"),
checkboxGroupInput("ptype_2", label = "Particle types:",
choiceNames = c("Disc", "Bulge"),
choiceValues = c(2, 3), selected = c(2,3)),
#numericInput("r200", label = "Virial radius:", value = 200),
sliderInput("z", label = "Redshift:", min = 0.01, max = 0.1, value = 0.06),
selectInput("survey", label = "Survey:", choices = c("SAMI", "MaNGA", "CALIFA", "Hector", "Specified")),
# only show this panel if survey == "Specified"
conditionalPanel(
condition = "input.survey == 'Specified'",
numericInput("fov", label = "IFU field of view:", value = 15),
selectInput("ap_shape", label = "Aperture shape:", choices = c("circular", "square", "hexagonal")),
numericInput("central_wvl", label = HTML("Central filter wavelength / Å :"), value = 4800),
numericInput("lsf_fwhm", label = "Line spread function:", value = 2.65),
sliderInput("pixel_sscale", label = "Spaxel scale / '' :", min = 0.25, max = 2, value = 0.5, step = 0.01),
sliderInput("pixel_vscale", label = HTML("Voxel scale / Å :"), min = 0.5, max = 2, value = 1.04, step = 0.01),
numericInput("threshold", label = "Magnitude limit:", value = 25)
),
sliderInput("inc_deg", label = "Inclination:", min = 0, max = 90, value = 90, step = 1),
numericInput("m2l_disc", label = "Disc mass-to-light ratio:", value = 2),
numericInput("m2l_bulge", label = "Bulge mass-to-light ratio:", value = 1),
checkboxInput("blur", label = "Blur?", value = FALSE),
# only show this panel if DM_profile == NFW
conditionalPanel(
condition = "input.blur == true",
selectInput("psf", label = "PSF shape:", choices = c("Moffat", "Gaussian")),
sliderInput("fwhm", label = "FWHM:", min = 0, max = 5, value = 0.5, step = 0.5)
),
actionButton("submit_2", label = "Go!")
),
mainPanel(
h3(htmlOutput("bc_title")),
withSpinner(plotOutput("datacube", height = "100%"), type = 5, color="#581845"),
h3(htmlOutput("build_sum_title")),
withSpinner(tableOutput("build_summary"), type = 5, color="#FF5733")
)
)
),
tabPanel("Observe",
sidebarLayout(
sidebarPanel(
tags$head(tags$script('$(document).on("shiny:connected", function(e) {
Shiny.onInputChange("innerWidth", window.innerWidth);
});
$(window).resize(function(e) {
Shiny.onInputChange("innerWidth", window.innerWidth);
});
')),
checkboxInput("example_file_3", label = "Use SimSpin example file?", value=TRUE),
fileInput("sim_file_3", label = "Or upload simulation file:", multiple = FALSE, buttonLabel = "Browse...",
placeholder = "No file selected"),
checkboxGroupInput("ptype_3", label = "Particle types:",
choiceNames = c("Disc", "Bulge"),
choiceValues = c(2, 3), selected = c(2,3)),
#numericInput("r200_2", label = "Virial radius:", value = 200),
sliderInput("z_2", label = "Redshift:", min = 0.01, max = 0.1, value = 0.06),
selectInput("survey_2", label = "Survey:", choices = c("SAMI", "MaNGA", "CALIFA", "Hector", "Specified")),
# only show this panel if survey == "Specified"
conditionalPanel(
condition = "input.survey_2 == 'Specified'",
numericInput("fov_2", label = "IFU field of view:", value = 15),
selectInput("ap_shape_2", label = "Aperture shape:", choices = c("circular", "square", "hexagonal")),
numericInput("central_wvl_2", label = HTML("Central filter wavelength / Å :"), value = 4800),
numericInput("lsf_fwhm_2", label = "Line spread function:", value = 2.65),
sliderInput("pixel_sscale_2", label = "Spaxel scale / '' :", min = 0.25, max = 2, value = 0.5, step = 0.01),
sliderInput("pixel_vscale_2", label = HTML("Voxel scale / Å :"), min = 0.5, max = 2, value = 1.04, step = 0.01),
numericInput("threshold_2", label = "Magnitude limit:", value = 25)
),
sliderInput("inc_deg_2", label = "Inclination:", min = 0, max = 90, value = 90, step = 1),
numericInput("m2l_disc_2", label = "Disc mass-to-light ratio:", value = 2),
numericInput("m2l_bulge_2", label = "Bulge mass-to-light ratio:", value = 1),
checkboxInput("blur_2", label = "Blur?", value = FALSE),
# only show this panel if DM_profile == NFW
conditionalPanel(
condition = "input.blur_2 == true",
selectInput("psf_2", label = "PSF shape:", choices = c("Moffat", "Gaussian")),
sliderInput("fwhm_2", label = "FWHM:", min = 0, max = 5, value = 0.5, step = 0.5)
),
selectInput("measure_type", label = "Measurment radius details:", choices = c("Fit", "Specified", "Fixed")),
conditionalPanel(
condition = "input.measure_type == 'Fit'",
sliderInput("fac", label = HTML("Factor / R<sub>eff</sub>:"), min = 0, max = 5, value = 1, step = 0.1)
),
conditionalPanel(
condition = "input.measure_type == 'Specified'",
sliderInput("fract", label = HTML("Fraction of mass included:"), min = 0, max = 1, value = 0.5, step = 0.1),
numericInput("ar_a", label = "Semi-major, a / kpc", value = 2),
numericInput("ar_b", label = "Semi-minor, b / kpc", value = 1)
),
conditionalPanel(
condition = "input.measure_type == 'Fixed'",
sliderInput("fac", label = HTML("Factor / R<sub>eff</sub>:"), min = 0, max = 5, value = 1, step = 0.1),
numericInput("ar_a", label = "Semi-major, a / kpc", value = 2),
numericInput("ar_b", label = "Semi-minor, b / kpc", value = 1)
),
actionButton("submit_3", label = "Go!")
),
mainPanel(
tabsetPanel(
tabPanel("Flux",
h3(htmlOutput("fl_flux_title")),
withSpinner(plotOutput("fl_flux_plot", height = "100%"), type = 5, color="#FFC300")
),
tabPanel("Velocity",
h3(htmlOutput("fl_vel_title")),
withSpinner(plotOutput("fl_vel_plot", height = "100%"), type = 5, color="#C70039")
),
tabPanel("Dispersion",
h3(htmlOutput("fl_dis_title")),
withSpinner(plotOutput("fl_dis_plot", height = "100%"), type = 5, color="#581845")
),
tabPanel("Summary",
h3(htmlOutput("find_sum_title")),
withSpinner(tableOutput("find_summary"), type = 5, color="#FF5733")
)
)
)
)
),
tabPanel("Contact",
h3(htmlOutput("contact_info_header")),
htmlOutput("contact_info_1"),
a(actionButton(inputId = "email1", label = "Contact Admin",
icon = icon("envelope", lib = "font-awesome")),
href="mailto:katherine.harborne@icrar.org?subject=SimSpin Web-app Issue"),
htmlOutput("contact_info_2"))
)
))
)
|
install.packages("data.table")
library(data.table)
DF = data.frame(x=rnorm(9))
help(rnorm)
|
/longgb/R/Learning/learning_01.R
|
no_license
|
longgb246/pythonstudy
|
R
| false
| false
| 94
|
r
|
install.packages("data.table")
library(data.table)
DF = data.frame(x=rnorm(9))
help(rnorm)
|
df = read.csv('data/repository_names_count_events_top1000.csv')
dfs = read.csv('data/repository_names_count_events_top1000_sorted_by_name.csv.csv')
dfs = read.csv('data/repository_names_count_events_top1000_sorted_by_name.csv')
head(dfs)
sum(dfs$aggregate_counts)
dfs = read.csv('data/repository_names_count_events_top1000_sorted_by_name.csv', asis=TRUE)
dfs = read.csv('data/repository_names_count_events_top1000_sorted_by_name.csv', stringsAsFactors=FALSE)
sum(dfs$aggregate_counts)
dfs$aggregate_counts
as.integer(dfs$aggregate_counts)
sum(dfs$aggregate_counts)
dfs$aggregate_counts = as.integer(dfs$aggregate_counts)
sum(dfs$aggregate_counts)
sum(dfs$aggregate_counts,na.rm=TRUE)
unique(dfs$aggregate_counts)
dfs$aggregate_counts
dfs = read.csv('data/repository_names_count_events_top1000_sorted_by_name.csv', stringsAsFactors=FALSE)
dfs$aggregate_counts
sub(dfs$aggregate_counts, '', '0')
sub(dfs$aggregate_counts, pattern='', '0')
sub(x=dfs$aggregate_counts, pattern='', replacement='0')
as.integer(sub(x=dfs$aggregate_counts, pattern='', replacement='0'))
?as.integer
as.numeric(sub(x=dfs$aggregate_counts, pattern='', replacement='0'))
as.integer(sub(x=dfs$aggregate_counts, pattern='', replacement='0'dfs = read.csv('data/repository_names_count_events_top1000_sorted_by_name.csv', stringsAsFactors=FALSE)))
dfs = read.csv('data/repository_names_count_events_top1000_sorted_by_name.csv', stringsAsFactors=FALSE)
sub(x=dfs$aggregate_counts, pattern='', replacement='0')
as.numeric(sub(x=dfs$aggregate_counts, pattern='', replacement='0'))
as.numeric(sub(x=dfs$aggregate_counts, pattern='', replacement='0'))sum(dfs$aggregate_counts)
dfs$aggregate_counts = as.numeric(sub(x=dfs$aggregate_counts, pattern='', replacement='0'))
sum(dfs$aggregate_counts)
table(dfs$aggregate_counts)
sum(dfs$aggregate_counts, na.rm=TRUE)
sum(dfs$count)
proportion = sum(dfs$aggregate_counts, na.rm=TRUE)/sum(dfs$count)
proportion
query = "SELECT repository_name, lower(repository_name) as repository_name_lower, count(lower(repository_name_lower)) as count FROM [githubarchive:github.timeline] group by repository_name_lower, repository_name order by count desc LIMIT 1000"
df = query_exec(query, 'metacommunities')
library(bigrquery)
df = query_exec(query, 'metacommunities')
query = "SELECT repository_name, lower(repository_name) as repository_name_lower, count(lower(repository_name)) as count FROM [githubarchive:github.timeline] group by repository_name_lower, repository_name order by count desc LIMIT 1000"
df = query_exec(query, 'metacommunities')
head(df)
write.csv(df, 'data/repository_names_count_events_top1000.csv')
event_total = 289000000
sum(df$count)/event_total * 100
sub(x=df$repository_name_lower, '$\.', '')
sub(x=df$repository_name_lower, '$\\.', '')
repo_names = sub(x=df$repository_name_lower, '$\\.', '')
df$repo_names_clean = repo_names
tail(df$repo_names_clean)
df = df[order(df$repo_names_clean),]
head(df)
tail(df)
View(df)
sbu(x='.dit', '$\\.', '')
sub(x='.dit', '$\\.', '')
sub(x='.dit', '$.', '')
sub(x='.dit', '$//.', '')
sub(x='.dit', '.', '')
sub(x='.dit', '^.', '')
sub(x='.dit', '^.', '')
sub(x='.dit', '^//.', '')
sub(x='.dit', '^\.', '')
sub(x='.dit', '^\\.', '')
df$repo_names_clean = sub(x=df$repository_name_lower, '^\\.', '')
df = df[order(df$repo_names_clean),]
View(df)
savehistory(file='clean_repo_names.r')
|
/analysis/repo_name_imitation/clean_repo_names.r
|
no_license
|
metacommunities/metacommunities
|
R
| false
| false
| 3,343
|
r
|
df = read.csv('data/repository_names_count_events_top1000.csv')
dfs = read.csv('data/repository_names_count_events_top1000_sorted_by_name.csv.csv')
dfs = read.csv('data/repository_names_count_events_top1000_sorted_by_name.csv')
head(dfs)
sum(dfs$aggregate_counts)
dfs = read.csv('data/repository_names_count_events_top1000_sorted_by_name.csv', asis=TRUE)
dfs = read.csv('data/repository_names_count_events_top1000_sorted_by_name.csv', stringsAsFactors=FALSE)
sum(dfs$aggregate_counts)
dfs$aggregate_counts
as.integer(dfs$aggregate_counts)
sum(dfs$aggregate_counts)
dfs$aggregate_counts = as.integer(dfs$aggregate_counts)
sum(dfs$aggregate_counts)
sum(dfs$aggregate_counts,na.rm=TRUE)
unique(dfs$aggregate_counts)
dfs$aggregate_counts
dfs = read.csv('data/repository_names_count_events_top1000_sorted_by_name.csv', stringsAsFactors=FALSE)
dfs$aggregate_counts
sub(dfs$aggregate_counts, '', '0')
sub(dfs$aggregate_counts, pattern='', '0')
sub(x=dfs$aggregate_counts, pattern='', replacement='0')
as.integer(sub(x=dfs$aggregate_counts, pattern='', replacement='0'))
?as.integer
as.numeric(sub(x=dfs$aggregate_counts, pattern='', replacement='0'))
as.integer(sub(x=dfs$aggregate_counts, pattern='', replacement='0'dfs = read.csv('data/repository_names_count_events_top1000_sorted_by_name.csv', stringsAsFactors=FALSE)))
dfs = read.csv('data/repository_names_count_events_top1000_sorted_by_name.csv', stringsAsFactors=FALSE)
sub(x=dfs$aggregate_counts, pattern='', replacement='0')
as.numeric(sub(x=dfs$aggregate_counts, pattern='', replacement='0'))
as.numeric(sub(x=dfs$aggregate_counts, pattern='', replacement='0'))sum(dfs$aggregate_counts)
dfs$aggregate_counts = as.numeric(sub(x=dfs$aggregate_counts, pattern='', replacement='0'))
sum(dfs$aggregate_counts)
table(dfs$aggregate_counts)
sum(dfs$aggregate_counts, na.rm=TRUE)
sum(dfs$count)
proportion = sum(dfs$aggregate_counts, na.rm=TRUE)/sum(dfs$count)
proportion
query = "SELECT repository_name, lower(repository_name) as repository_name_lower, count(lower(repository_name_lower)) as count FROM [githubarchive:github.timeline] group by repository_name_lower, repository_name order by count desc LIMIT 1000"
df = query_exec(query, 'metacommunities')
library(bigrquery)
df = query_exec(query, 'metacommunities')
query = "SELECT repository_name, lower(repository_name) as repository_name_lower, count(lower(repository_name)) as count FROM [githubarchive:github.timeline] group by repository_name_lower, repository_name order by count desc LIMIT 1000"
df = query_exec(query, 'metacommunities')
head(df)
write.csv(df, 'data/repository_names_count_events_top1000.csv')
event_total = 289000000
sum(df$count)/event_total * 100
sub(x=df$repository_name_lower, '$\.', '')
sub(x=df$repository_name_lower, '$\\.', '')
repo_names = sub(x=df$repository_name_lower, '$\\.', '')
df$repo_names_clean = repo_names
tail(df$repo_names_clean)
df = df[order(df$repo_names_clean),]
head(df)
tail(df)
View(df)
sbu(x='.dit', '$\\.', '')
sub(x='.dit', '$\\.', '')
sub(x='.dit', '$.', '')
sub(x='.dit', '$//.', '')
sub(x='.dit', '.', '')
sub(x='.dit', '^.', '')
sub(x='.dit', '^.', '')
sub(x='.dit', '^//.', '')
sub(x='.dit', '^\.', '')
sub(x='.dit', '^\\.', '')
df$repo_names_clean = sub(x=df$repository_name_lower, '^\\.', '')
df = df[order(df$repo_names_clean),]
View(df)
savehistory(file='clean_repo_names.r')
|
library(leaflet)
dir()
ct<- read.csv("data/cafe.csv")
# Be sure to first set the working directory in R to where the file is listed
address_waw <- c(52.2330251,20.9803086)
m <- leaflet(ct) %>% addProviderTiles("CartoDB.Positron") %>%
setView(address_waw[2], address_waw[1], zoom = 11) %>%
addCircles(~lon, ~lat, popup=ct$type, weight = 3, radius=40,
color="#ffa500", stroke = TRUE, fillOpacity = 0.8)
m
|
/R/visualize_sinlge_category.R
|
no_license
|
WawCode16/mdw-data-retrieval
|
R
| false
| false
| 427
|
r
|
library(leaflet)
dir()
ct<- read.csv("data/cafe.csv")
# Be sure to first set the working directory in R to where the file is listed
address_waw <- c(52.2330251,20.9803086)
m <- leaflet(ct) %>% addProviderTiles("CartoDB.Positron") %>%
setView(address_waw[2], address_waw[1], zoom = 11) %>%
addCircles(~lon, ~lat, popup=ct$type, weight = 3, radius=40,
color="#ffa500", stroke = TRUE, fillOpacity = 0.8)
m
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/EvoTrees.R
\name{best_iter.JuliaObject}
\alias{best_iter.JuliaObject}
\title{Get model best iter and eval metric}
\usage{
best_iter.JuliaObject(model)
}
\description{
Get model best iter and eval metric
}
|
/man/best_iter.JuliaObject.Rd
|
permissive
|
StatMixedML/EvoTrees
|
R
| false
| true
| 284
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/EvoTrees.R
\name{best_iter.JuliaObject}
\alias{best_iter.JuliaObject}
\title{Get model best iter and eval metric}
\usage{
best_iter.JuliaObject(model)
}
\description{
Get model best iter and eval metric
}
|
shinyServer(function(input, output) {
output$provincia.uisel<-renderUI({
provincia.list<-df %>% filter(CCAA==input$CCAA.sel) %>% select(Provincia) %>% distinct()
provincia.list<-as.character(provincia.list[,1])
selectInput("provincia.sel",
label = h4("Provincia"),
choices = provincia.list,
selected = 'Total',
multiple = FALSE)
})
output$plot <- renderPlotly({
norm.Sel<-input$norm.sel
CCAA.Sel<-input$CCAA.sel
if(is.null(input$provincia.sel)){
Provincia.Sel<-'Total'
}else{
Provincia.Sel<-input$provincia.sel
}
prod.Sel<-input$prod.sel
xlim.Sel<-c(as.yearmon(paste0(input$fechas.sel[1],"-01-01")),
as.yearmon(paste0(input$fechas.sel[2],"-01-01")))
if(fechas.rango[2] == input$fechas.sel[2]){xlim.Sel[2]<-df$fecha[nrow(df)]}
iprod.Sel<-which(names(df) %in% prod.Sel, arr.ind=T)
if(length(iprod.Sel)>0){
z<-df %>% filter(CCAA==CCAA.Sel & Provincia==Provincia.Sel) %>% select(1, iprod.Sel)
if(NROW(z)>0){
z<-zoo(x=z[,2:NCOL(z)], order.by = as.Date(z[,1]))
names(z)[1]<-prod.Sel[1]
ytit<-"kt/mes"
if(norm.Sel){
nor<-sapply(window(z, start = as.Date(xlim.Sel[1]), end = as.Date(xlim.Sel[1]+11/12)),function(x) 100/mean(x))
if(NCOL(z)>1) nor<-matrix(rep(nor,each=NROW(z)),ncol=NCOL(z))
z<-z * nor
ytit<-"%"
}
zdf <- fortify.zoo(z) %>% filter(Index >= as.Date(xlim.Sel[1]) & Index <= as.Date(xlim.Sel[2]))
names(zdf)[2]<-prod.Sel[1]
if(input$onefacet2.sel){
p <- plot_ly()
for(i in 2:length(zdf)){
tsi<-ts(zdf[,i], frequency=12, start =c(as.numeric(format(index(z[1,1]), "%Y")), as.numeric(format(index(z[1,1]), "%m"))))
trend<-stl(tsi, "per")$time.series[, 2]
p <- p %>%
add_lines(x=zdf[,1], y=zdf[,i], type="scatter", mode = "lines", name=names(zdf[i]),
line=list(color=jBrewColors[i-1], width=1.0)) %>%
add_lines(x=zdf[,1], y=coredata(trend), type="scatter", mode = "lines",
showlegend=FALSE, hoverinfo = "none", line=list(color=jBrewColors[i-1], width=2.0))
}
p<-p %>% layout(xaxis=list(title="Fecha", type="date", showline=TRUE, showgrid=TRUE,
tickwidth=1, mirror = TRUE,
gridwidth=0.5, gridcolor = toRGB("red", alpha=0.5)),
yaxis=list(title=ytit, type="linear", showline=TRUE, showgrid=TRUE,
exponentformat="SI",
tickwidth=1, mirror = TRUE,
gridwidth=0.25, gridcolor = toRGB("red", alpha=0.25)),
title=paste0("<b>",CCAA.Sel, "-", Provincia.Sel,"</b>"), margin=list(t = 40, b=50),
font = list(size=16))
p
}else{
l<-list()
for(i in 2:length(zdf)){
tsi<-ts(zdf[,i], frequency=12, start =c(as.numeric(format(index(z[1,1]), "%Y")), as.numeric(format(index(z[1,1]), "%m"))))
trend<-stl(tsi, s.window = 15)$time.series[, 2]
p1 <- plot_ly() %>%
add_lines(x=zdf[,1], y=zdf[,i], type="scatter", mode = "lines", name=names(zdf[i]),
line=list(color=jBrewColors[i-1], width=1.0)) %>%
add_lines(x=zdf[,1], y=coredata(trend), type="scatter", mode = "lines",
name=paste("trend",names(zdf[i])), showlegend=FALSE, hoverinfo = "none",
line=list(color=jBrewColors[i-1], width=2.0)) %>%
layout(xaxis=list(title="Fecha", type="date", showline=TRUE, showgrid=TRUE,
tickwidth=1, mirror = TRUE,
gridwidth=0.5, gridcolor = toRGB("red", alpha=0.5)),
yaxis=list(title=ytit, type="linear", showline=TRUE, showgrid=TRUE,
exponentformat="SI",
tickwidth=1, mirror = TRUE,
gridwidth=0.5, gridcolor = toRGB("red", alpha=0.5)))
p2 <- plot_ly() %>%
add_bars(x=zdf[,1], y=c(NA, diff(trend,1)), name=paste("Var",names(zdf[i])),
marker=list(color=jBrewColors[i-1])) %>%
layout(xaxis=list(title="Fecha", type="date", showline=TRUE, showgrid=TRUE,
tickwidth=1, mirror = TRUE,
gridwidth=0.5, gridcolor = toRGB("red", alpha=0.5)),
yaxis=list(title=ytit, type="linear", showline=TRUE, showgrid=TRUE,
exponentformat="SI",
tickwidth=1, mirror = TRUE,
gridwidth=0.5, gridcolor = toRGB("red", alpha=0.5)))
l[[i-1]]<-subplot(p1, p2, nrows = 2, shareX = TRUE, heights = c(0.75,0.25))
}
subplot(l, nrows = ncol(zdf)-1, shareX = TRUE) %>%
layout(xaxis=list(title="Fecha", type="date", showline=TRUE, showgrid=TRUE,
tickwidth=1, mirror = TRUE,
gridwidth=0.5, gridcolor = toRGB("red", alpha=0.5)),
yaxis=list(title=ytit, type="linear", showline=TRUE, showgrid=TRUE,
exponentformat="SI",
tickwidth=1, mirror = TRUE,
gridwidth=0.5, gridcolor = toRGB("red", alpha=0.5)),
title=paste0("<b>",CCAA.Sel, "-", Provincia.Sel,"</b>"), margin=list(t = 40, b=50),
font = list(size=16))
}
}
}
})
output$provincia1.uisel<-renderUI({
provincia1.list<-df %>% filter(CCAA %in% input$CCAA1.sel) %>% select(Provincia) %>% distinct()
provincia1.list<-as.character(provincia1.list[,1])
selectInput("provincia1.sel",
label = h4("Provincia"),
choices = provincia1.list,
selected = 'Total',
multiple = TRUE)
})
output$plot1 <- renderPlotly({
norm.Sel<-input$norm1.sel
CCAA.Sel<-input$CCAA1.sel
onefacet.Sel<-input$onefacet.sel
xlim.Sel<-c(as.yearmon(paste0(input$fechas1.sel[1],"-01-01")),
as.yearmon(paste0(input$fechas1.sel[2],"-01-01")))
if(fechas.rango[2] == input$fechas1.sel[2]){xlim.Sel[2]<-df$fecha[nrow(df)]}
if(is.null(input$provincia1.sel)){
Provincia.Sel<-'Total'
}else{
Provincia.Sel<-input$provincia1.sel
}
prod.Sel<-input$prod1.sel
iprod.Sel<-which(names(df) == prod.Sel)
z<-df %>%
filter(CCAA %in% CCAA.Sel) %>%
filter(Provincia %in% Provincia.Sel) %>%
select(1:3, psel=iprod.Sel) %>%
mutate(CCAA.Prov=paste0(CCAA,'.',Provincia)) %>%
select(-CCAA, -Provincia) %>%
spread(key=CCAA.Prov, value=psel)
if(NROW(z)>0) {
z<-zoo(x=z[,2:NCOL(z)], order.by = as.Date(z[,1]))
ytit<-"kt/mes"
if(norm.Sel){
nor<-sapply(window(z, start = as.Date(xlim.Sel[1]), end = as.Date(xlim.Sel[1]+11/12)),function(x) 100/mean(x))
if(NCOL(z)>1) nor<-matrix(rep(nor,each=NROW(z)),ncol=NCOL(z))
z<-z * nor
ytit<-"%"
}
zdf <- fortify.zoo(z) %>% filter(Index >= as.Date(xlim.Sel[1]) & Index <= as.Date(xlim.Sel[2]))
if(ncol(zdf) == 2) names(zdf)[2]<-paste0(CCAA.Sel[1],'.',Provincia.Sel[1])
if(onefacet.Sel){
p <- plot_ly()
for(i in 2:length(zdf)){
tsi<-ts(zdf[,i], frequency=12, start =c(as.numeric(format(index(z[1,1]), "%Y")), as.numeric(format(index(z[1,1]), "%m"))))
trend<-stl(tsi, "per")$time.series[, 2]
p <- p %>%
add_lines(x=zdf[,1], y=zdf[,i], type="scatter", mode = "lines", name=names(zdf[i]),
line=list(color=jBrewColors[i-1], width=1.0)) %>%
add_lines(x=zdf[,1], y=coredata(trend), type="scatter", mode = "lines",
showlegend=FALSE, hoverinfo = "none", line=list(color=jBrewColors[i-1], width=2.0))
}
p<-p %>% layout(xaxis=list(title="Fecha", type="date", showline=TRUE, showgrid=TRUE,
tickwidth=1, mirror = TRUE,
gridwidth=0.5, gridcolor = toRGB("red", alpha=0.5)),
yaxis=list(title=ytit, type="linear", showline=TRUE, showgrid=TRUE,
exponentformat="SI",
tickwidth=1, mirror = TRUE,
gridwidth=0.25, gridcolor = toRGB("red", alpha=0.25)),
title=paste0("<b>",prod.Sel,"</b>"), margin=list(t = 40, b=50),
font = list(size=16))
p
}else{
l<-list()
for(i in 2:length(zdf)){
tsi<-ts(zdf[,i], frequency=12, start =c(as.numeric(format(index(z[1,1]), "%Y")), as.numeric(format(index(z[1,1]), "%m"))))
trend<-stl(tsi, s.window = 15)$time.series[, 2]
p1 <- plot_ly() %>%
add_lines(x=zdf[,1], y=zdf[,i], type="scatter", mode = "lines", name=names(zdf[i]),
line=list(color=jBrewColors[i-1], width=1.0)) %>%
add_lines(x=zdf[,1], y=coredata(trend), type="scatter", mode = "lines",
name=paste("trend",names(zdf[i])), showlegend=FALSE, hoverinfo = "none",
line=list(color=jBrewColors[i-1], width=2.0)) %>%
layout(xaxis=list(title="Fecha", type="date", showline=TRUE, showgrid=TRUE,
tickwidth=1, mirror = TRUE,
gridwidth=0.5, gridcolor = toRGB("red", alpha=0.5)),
yaxis=list(title=ytit, type="linear", showline=TRUE, showgrid=TRUE,
exponentformat="SI",
tickwidth=1, mirror = TRUE,
gridwidth=0.5, gridcolor = toRGB("red", alpha=0.5)))
p2 <- plot_ly() %>%
add_bars(x=zdf[,1], y=c(NA, diff(trend,1)), name=paste("Var",names(zdf[i])),
marker=list(color=jBrewColors[i-1])) %>%
layout(xaxis=list(title="Fecha", type="date", showline=TRUE, showgrid=TRUE,
tickwidth=1, mirror = TRUE,
gridwidth=0.5, gridcolor = toRGB("red", alpha=0.5)),
yaxis=list(title=ytit, type="linear", showline=TRUE, showgrid=TRUE,
exponentformat="SI",
tickwidth=1, mirror = TRUE,
gridwidth=0.5, gridcolor = toRGB("red", alpha=0.5)))
l[[i-1]]<-subplot(p1, p2, nrows = 2, shareX = TRUE, heights = c(0.75,0.25))
}
subplot(l, nrows = ncol(zdf)-1, shareX = TRUE) %>%
layout(xaxis=list(title="Fecha", type="date", showline=TRUE, showgrid=TRUE,
tickwidth=1, mirror = TRUE,
gridwidth=0.5, gridcolor = toRGB("red", alpha=0.5)),
yaxis=list(title=ytit, type="linear", showline=TRUE, showgrid=TRUE,
exponentformat="SI",
tickwidth=1, mirror = TRUE,
gridwidth=0.5, gridcolor = toRGB("red", alpha=0.5)),
title=paste0("<b>",prod.Sel,"</b>"), margin=list(t = 40, b=50),
font = list(size=16))
}
}
}) #, height = 750, width = 'auto')
output$producto.uisel<-renderUI({
product1.list<-NULL
for(f in input$familia.sel){
product1.list<-c(product1.list, products.pp[grep(paste0("^",f), products.pp)])
}
selectInput("product1.sel",
label = h4("Producto"),
choices = product1.list,
selected = c('GOIL.total', 'GSNA.total'),
multiple = TRUE)
})
output$plot2 <- renderPlotly({
if(!is.null(input$product1.sel)){
norm.Sel<-input$norm2.sel
xlim.Sel<-c(as.yearmon(paste0(input$fechas2.sel[1],"-01-01")),
as.yearmon(paste0(input$fechas2.sel[2],"-01-01")))
if(fechas1.rango[2] == input$fechas2.sel[2]){xlim.Sel[2]<-pp.df$fecha[nrow(pp.df)]}
zpp<-pp.df %>% select(fecha, -anyo, -mes, one_of(input$product1.sel))
zpp<-zoo(x = select(zpp, -fecha), order.by=zpp$fecha)
if(norm.Sel){
nor<-sapply(window(zpp, start = xlim.Sel[1], end = xlim.Sel[1]+11/12),
function(x) ifelse(is.na(100/mean(x)), 1, 100/mean(x)))
if(NCOL(zpp)>1) nor<-matrix(rep(nor,each=NROW(zpp)),ncol=NCOL(zpp))
zpp<-zpp * nor
}
#breaks.zpp = xlim.Sel[1]+seq.int(0,(xlim.Sel[2]-xlim.Sel[1])*12, length.out = 12)/12
if(input$onefacet1.sel){
g<-autoplot(zpp, na.rm = TRUE, facets = NULL)
}else{
g<-autoplot(zpp, na.rm = TRUE)
if(NCOL(zpp)>1) g<-g + facet_free()
}
g<-g + scale_x_yearmon(limits=xlim.Sel, format = "%b %Y") #,breaks=breaks.zpp
g<-g + geom_line(size=0.5, na.rm = TRUE)
g<-g + geom_smooth(se=F, size=1, na.rm = TRUE)
g<-g + xlab("Fecha")+ggtitle("Consumo mensual")
if(norm.Sel) {
g<-g + ylab("%")
}else{
g<-g + ylab("kt/mes")
}
g<-g + theme(axis.text = element_text(size = 12),
plot.title = element_text(size = 16, face='bold'),
strip.text = element_text(size = 16, face='bold'),
axis.title.x = element_text(size = 14),
axis.title.y = element_text(size = 14),
panel.border = element_rect(linetype = 'solid', color = 'red', fill = NA),
strip.background = element_rect(linetype = 'solid', color = 'darkred', fill = 'gray'),
panel.grid.major= element_line(size = 0.25, colour = "red", linetype = "dotted"),
panel.grid.minor = element_blank(),
legend.position = 'bottom',
legend.text = element_text(size = 14),
legend.title=element_blank())
ggplotly(g)
}
}) #, height = 750, width = 'auto')
output$producto1.uisel<-renderUI({
product2.list<-NULL
for(f in input$familia1.sel){
product2.list<-c(product2.list, products.pp[grep(paste0("^",f), products.pp)])
}
selectInput("product2.sel",
label = h4("Producto"),
choices = product2.list,
selected = 'GOIL.total',
multiple = FALSE)
})
output$plot3 <- renderPlot({
if(!is.null(input$product2.sel)){
xlim.Sel<-c(as.yearmon(paste0(input$fechas3.sel[1],"-01-01")),
as.yearmon(paste0(input$fechas3.sel[2],"-01-01")))
if(fechas1.rango[2] == input$fechas3.sel[2]){xlim.Sel[2]<-pp.df$fecha[nrow(pp.df)]}
z<-pp.df %>% select(fecha, -anyo, -mes, one_of(input$product2.sel))
z<-zoo(x = select(z, -fecha), order.by=z$fecha)
z<-na.locf(z)
fit<-seas(as.ts(z), forecast.save = "fct", forecast.probability = 0.95)
zz0<-data.frame(fecha=as.character(as.yearmon(time(original(fit)))),
original=drop(coredata(original(fit))),
stringsAsFactors = FALSE)
zz<-data.frame(fecha=as.character(as.yearmon(time(final(fit)))),
outlier=coredata(outlier(fit)),
final=coredata(final(fit)),
trend=coredata(trend(fit)),
stringsAsFactors = FALSE)
zz<- left_join(zz, zz0, by='fecha')
zzf<-data.frame(fecha=as.character(as.yearmon(time(series(fit, 'forecast.forecasts')))),
series(fit, 'forecast.forecasts'),
stringsAsFactors = FALSE)
zz<- full_join(zz, zzf, by='fecha') %>% mutate(fecha=as.yearmon(fecha))
g<-ggplot(zz, na.rm = TRUE)
g<-g + geom_line(aes(x=fecha, y=original, color='original'), size=0.5, na.rm = TRUE)
g<-g + geom_text(aes(x=fecha, y=original, label=outlier), na.rm = TRUE)
g<-g + geom_label(aes(x=fecha, y=original, label=outlier, color='outlier'), na.rm = TRUE)
g<-g + geom_line(aes(x=fecha, y=final, color='final'), size=1, na.rm = TRUE)
g<-g + geom_line(aes(x=fecha, y=trend, color='trend'), size=1.5, na.rm = TRUE)
g<-g + geom_line(aes(x=fecha, y=forecast, color='forecast'), size=1, na.rm = TRUE)
g<-g + geom_ribbon(aes(x=fecha, ymin = lowerci, ymax = upperci), fill = "grey70", alpha=0.5)
g<-g + scale_color_manual(values=c('original'='chartreuse4', 'final'='black', 'trend'='blue', 'forecast'='red', 'outlier'='orange1'),
breaks=c('original', 'final', 'trend', 'forecast', 'outlier'))
g<-g + ylab("kt")+ggtitle("Consumo mensual")
g<-g + scale_x_yearmon(limits=xlim.Sel)
g<-g + theme(panel.background = element_rect(colour = "red"),
plot.title = element_text(size = 16, face='bold', color='blue'),
panel.grid.major= element_line(size = 0.25, colour = "red", linetype = "dotted"),
axis.text.x = element_text(size = 14),
axis.title.x = element_blank(),
axis.title.y = element_text(size = 14),
legend.position = 'bottom',
legend.text = element_text(size = 14),
legend.title=element_blank())
g
}
}, height = 750, width = 'auto')
})
|
/server.R
|
no_license
|
BaltiBoix/shinyCORES
|
R
| false
| false
| 23,067
|
r
|
shinyServer(function(input, output) {
output$provincia.uisel<-renderUI({
provincia.list<-df %>% filter(CCAA==input$CCAA.sel) %>% select(Provincia) %>% distinct()
provincia.list<-as.character(provincia.list[,1])
selectInput("provincia.sel",
label = h4("Provincia"),
choices = provincia.list,
selected = 'Total',
multiple = FALSE)
})
output$plot <- renderPlotly({
norm.Sel<-input$norm.sel
CCAA.Sel<-input$CCAA.sel
if(is.null(input$provincia.sel)){
Provincia.Sel<-'Total'
}else{
Provincia.Sel<-input$provincia.sel
}
prod.Sel<-input$prod.sel
xlim.Sel<-c(as.yearmon(paste0(input$fechas.sel[1],"-01-01")),
as.yearmon(paste0(input$fechas.sel[2],"-01-01")))
if(fechas.rango[2] == input$fechas.sel[2]){xlim.Sel[2]<-df$fecha[nrow(df)]}
iprod.Sel<-which(names(df) %in% prod.Sel, arr.ind=T)
if(length(iprod.Sel)>0){
z<-df %>% filter(CCAA==CCAA.Sel & Provincia==Provincia.Sel) %>% select(1, iprod.Sel)
if(NROW(z)>0){
z<-zoo(x=z[,2:NCOL(z)], order.by = as.Date(z[,1]))
names(z)[1]<-prod.Sel[1]
ytit<-"kt/mes"
if(norm.Sel){
nor<-sapply(window(z, start = as.Date(xlim.Sel[1]), end = as.Date(xlim.Sel[1]+11/12)),function(x) 100/mean(x))
if(NCOL(z)>1) nor<-matrix(rep(nor,each=NROW(z)),ncol=NCOL(z))
z<-z * nor
ytit<-"%"
}
zdf <- fortify.zoo(z) %>% filter(Index >= as.Date(xlim.Sel[1]) & Index <= as.Date(xlim.Sel[2]))
names(zdf)[2]<-prod.Sel[1]
if(input$onefacet2.sel){
p <- plot_ly()
for(i in 2:length(zdf)){
tsi<-ts(zdf[,i], frequency=12, start =c(as.numeric(format(index(z[1,1]), "%Y")), as.numeric(format(index(z[1,1]), "%m"))))
trend<-stl(tsi, "per")$time.series[, 2]
p <- p %>%
add_lines(x=zdf[,1], y=zdf[,i], type="scatter", mode = "lines", name=names(zdf[i]),
line=list(color=jBrewColors[i-1], width=1.0)) %>%
add_lines(x=zdf[,1], y=coredata(trend), type="scatter", mode = "lines",
showlegend=FALSE, hoverinfo = "none", line=list(color=jBrewColors[i-1], width=2.0))
}
p<-p %>% layout(xaxis=list(title="Fecha", type="date", showline=TRUE, showgrid=TRUE,
tickwidth=1, mirror = TRUE,
gridwidth=0.5, gridcolor = toRGB("red", alpha=0.5)),
yaxis=list(title=ytit, type="linear", showline=TRUE, showgrid=TRUE,
exponentformat="SI",
tickwidth=1, mirror = TRUE,
gridwidth=0.25, gridcolor = toRGB("red", alpha=0.25)),
title=paste0("<b>",CCAA.Sel, "-", Provincia.Sel,"</b>"), margin=list(t = 40, b=50),
font = list(size=16))
p
}else{
l<-list()
for(i in 2:length(zdf)){
tsi<-ts(zdf[,i], frequency=12, start =c(as.numeric(format(index(z[1,1]), "%Y")), as.numeric(format(index(z[1,1]), "%m"))))
trend<-stl(tsi, s.window = 15)$time.series[, 2]
p1 <- plot_ly() %>%
add_lines(x=zdf[,1], y=zdf[,i], type="scatter", mode = "lines", name=names(zdf[i]),
line=list(color=jBrewColors[i-1], width=1.0)) %>%
add_lines(x=zdf[,1], y=coredata(trend), type="scatter", mode = "lines",
name=paste("trend",names(zdf[i])), showlegend=FALSE, hoverinfo = "none",
line=list(color=jBrewColors[i-1], width=2.0)) %>%
layout(xaxis=list(title="Fecha", type="date", showline=TRUE, showgrid=TRUE,
tickwidth=1, mirror = TRUE,
gridwidth=0.5, gridcolor = toRGB("red", alpha=0.5)),
yaxis=list(title=ytit, type="linear", showline=TRUE, showgrid=TRUE,
exponentformat="SI",
tickwidth=1, mirror = TRUE,
gridwidth=0.5, gridcolor = toRGB("red", alpha=0.5)))
p2 <- plot_ly() %>%
add_bars(x=zdf[,1], y=c(NA, diff(trend,1)), name=paste("Var",names(zdf[i])),
marker=list(color=jBrewColors[i-1])) %>%
layout(xaxis=list(title="Fecha", type="date", showline=TRUE, showgrid=TRUE,
tickwidth=1, mirror = TRUE,
gridwidth=0.5, gridcolor = toRGB("red", alpha=0.5)),
yaxis=list(title=ytit, type="linear", showline=TRUE, showgrid=TRUE,
exponentformat="SI",
tickwidth=1, mirror = TRUE,
gridwidth=0.5, gridcolor = toRGB("red", alpha=0.5)))
l[[i-1]]<-subplot(p1, p2, nrows = 2, shareX = TRUE, heights = c(0.75,0.25))
}
subplot(l, nrows = ncol(zdf)-1, shareX = TRUE) %>%
layout(xaxis=list(title="Fecha", type="date", showline=TRUE, showgrid=TRUE,
tickwidth=1, mirror = TRUE,
gridwidth=0.5, gridcolor = toRGB("red", alpha=0.5)),
yaxis=list(title=ytit, type="linear", showline=TRUE, showgrid=TRUE,
exponentformat="SI",
tickwidth=1, mirror = TRUE,
gridwidth=0.5, gridcolor = toRGB("red", alpha=0.5)),
title=paste0("<b>",CCAA.Sel, "-", Provincia.Sel,"</b>"), margin=list(t = 40, b=50),
font = list(size=16))
}
}
}
})
output$provincia1.uisel<-renderUI({
provincia1.list<-df %>% filter(CCAA %in% input$CCAA1.sel) %>% select(Provincia) %>% distinct()
provincia1.list<-as.character(provincia1.list[,1])
selectInput("provincia1.sel",
label = h4("Provincia"),
choices = provincia1.list,
selected = 'Total',
multiple = TRUE)
})
output$plot1 <- renderPlotly({
norm.Sel<-input$norm1.sel
CCAA.Sel<-input$CCAA1.sel
onefacet.Sel<-input$onefacet.sel
xlim.Sel<-c(as.yearmon(paste0(input$fechas1.sel[1],"-01-01")),
as.yearmon(paste0(input$fechas1.sel[2],"-01-01")))
if(fechas.rango[2] == input$fechas1.sel[2]){xlim.Sel[2]<-df$fecha[nrow(df)]}
if(is.null(input$provincia1.sel)){
Provincia.Sel<-'Total'
}else{
Provincia.Sel<-input$provincia1.sel
}
prod.Sel<-input$prod1.sel
iprod.Sel<-which(names(df) == prod.Sel)
z<-df %>%
filter(CCAA %in% CCAA.Sel) %>%
filter(Provincia %in% Provincia.Sel) %>%
select(1:3, psel=iprod.Sel) %>%
mutate(CCAA.Prov=paste0(CCAA,'.',Provincia)) %>%
select(-CCAA, -Provincia) %>%
spread(key=CCAA.Prov, value=psel)
if(NROW(z)>0) {
z<-zoo(x=z[,2:NCOL(z)], order.by = as.Date(z[,1]))
ytit<-"kt/mes"
if(norm.Sel){
nor<-sapply(window(z, start = as.Date(xlim.Sel[1]), end = as.Date(xlim.Sel[1]+11/12)),function(x) 100/mean(x))
if(NCOL(z)>1) nor<-matrix(rep(nor,each=NROW(z)),ncol=NCOL(z))
z<-z * nor
ytit<-"%"
}
zdf <- fortify.zoo(z) %>% filter(Index >= as.Date(xlim.Sel[1]) & Index <= as.Date(xlim.Sel[2]))
if(ncol(zdf) == 2) names(zdf)[2]<-paste0(CCAA.Sel[1],'.',Provincia.Sel[1])
if(onefacet.Sel){
p <- plot_ly()
for(i in 2:length(zdf)){
tsi<-ts(zdf[,i], frequency=12, start =c(as.numeric(format(index(z[1,1]), "%Y")), as.numeric(format(index(z[1,1]), "%m"))))
trend<-stl(tsi, "per")$time.series[, 2]
p <- p %>%
add_lines(x=zdf[,1], y=zdf[,i], type="scatter", mode = "lines", name=names(zdf[i]),
line=list(color=jBrewColors[i-1], width=1.0)) %>%
add_lines(x=zdf[,1], y=coredata(trend), type="scatter", mode = "lines",
showlegend=FALSE, hoverinfo = "none", line=list(color=jBrewColors[i-1], width=2.0))
}
p<-p %>% layout(xaxis=list(title="Fecha", type="date", showline=TRUE, showgrid=TRUE,
tickwidth=1, mirror = TRUE,
gridwidth=0.5, gridcolor = toRGB("red", alpha=0.5)),
yaxis=list(title=ytit, type="linear", showline=TRUE, showgrid=TRUE,
exponentformat="SI",
tickwidth=1, mirror = TRUE,
gridwidth=0.25, gridcolor = toRGB("red", alpha=0.25)),
title=paste0("<b>",prod.Sel,"</b>"), margin=list(t = 40, b=50),
font = list(size=16))
p
}else{
l<-list()
for(i in 2:length(zdf)){
tsi<-ts(zdf[,i], frequency=12, start =c(as.numeric(format(index(z[1,1]), "%Y")), as.numeric(format(index(z[1,1]), "%m"))))
trend<-stl(tsi, s.window = 15)$time.series[, 2]
p1 <- plot_ly() %>%
add_lines(x=zdf[,1], y=zdf[,i], type="scatter", mode = "lines", name=names(zdf[i]),
line=list(color=jBrewColors[i-1], width=1.0)) %>%
add_lines(x=zdf[,1], y=coredata(trend), type="scatter", mode = "lines",
name=paste("trend",names(zdf[i])), showlegend=FALSE, hoverinfo = "none",
line=list(color=jBrewColors[i-1], width=2.0)) %>%
layout(xaxis=list(title="Fecha", type="date", showline=TRUE, showgrid=TRUE,
tickwidth=1, mirror = TRUE,
gridwidth=0.5, gridcolor = toRGB("red", alpha=0.5)),
yaxis=list(title=ytit, type="linear", showline=TRUE, showgrid=TRUE,
exponentformat="SI",
tickwidth=1, mirror = TRUE,
gridwidth=0.5, gridcolor = toRGB("red", alpha=0.5)))
p2 <- plot_ly() %>%
add_bars(x=zdf[,1], y=c(NA, diff(trend,1)), name=paste("Var",names(zdf[i])),
marker=list(color=jBrewColors[i-1])) %>%
layout(xaxis=list(title="Fecha", type="date", showline=TRUE, showgrid=TRUE,
tickwidth=1, mirror = TRUE,
gridwidth=0.5, gridcolor = toRGB("red", alpha=0.5)),
yaxis=list(title=ytit, type="linear", showline=TRUE, showgrid=TRUE,
exponentformat="SI",
tickwidth=1, mirror = TRUE,
gridwidth=0.5, gridcolor = toRGB("red", alpha=0.5)))
l[[i-1]]<-subplot(p1, p2, nrows = 2, shareX = TRUE, heights = c(0.75,0.25))
}
subplot(l, nrows = ncol(zdf)-1, shareX = TRUE) %>%
layout(xaxis=list(title="Fecha", type="date", showline=TRUE, showgrid=TRUE,
tickwidth=1, mirror = TRUE,
gridwidth=0.5, gridcolor = toRGB("red", alpha=0.5)),
yaxis=list(title=ytit, type="linear", showline=TRUE, showgrid=TRUE,
exponentformat="SI",
tickwidth=1, mirror = TRUE,
gridwidth=0.5, gridcolor = toRGB("red", alpha=0.5)),
title=paste0("<b>",prod.Sel,"</b>"), margin=list(t = 40, b=50),
font = list(size=16))
}
}
}) #, height = 750, width = 'auto')
output$producto.uisel<-renderUI({
product1.list<-NULL
for(f in input$familia.sel){
product1.list<-c(product1.list, products.pp[grep(paste0("^",f), products.pp)])
}
selectInput("product1.sel",
label = h4("Producto"),
choices = product1.list,
selected = c('GOIL.total', 'GSNA.total'),
multiple = TRUE)
})
output$plot2 <- renderPlotly({
if(!is.null(input$product1.sel)){
norm.Sel<-input$norm2.sel
xlim.Sel<-c(as.yearmon(paste0(input$fechas2.sel[1],"-01-01")),
as.yearmon(paste0(input$fechas2.sel[2],"-01-01")))
if(fechas1.rango[2] == input$fechas2.sel[2]){xlim.Sel[2]<-pp.df$fecha[nrow(pp.df)]}
zpp<-pp.df %>% select(fecha, -anyo, -mes, one_of(input$product1.sel))
zpp<-zoo(x = select(zpp, -fecha), order.by=zpp$fecha)
if(norm.Sel){
nor<-sapply(window(zpp, start = xlim.Sel[1], end = xlim.Sel[1]+11/12),
function(x) ifelse(is.na(100/mean(x)), 1, 100/mean(x)))
if(NCOL(zpp)>1) nor<-matrix(rep(nor,each=NROW(zpp)),ncol=NCOL(zpp))
zpp<-zpp * nor
}
#breaks.zpp = xlim.Sel[1]+seq.int(0,(xlim.Sel[2]-xlim.Sel[1])*12, length.out = 12)/12
if(input$onefacet1.sel){
g<-autoplot(zpp, na.rm = TRUE, facets = NULL)
}else{
g<-autoplot(zpp, na.rm = TRUE)
if(NCOL(zpp)>1) g<-g + facet_free()
}
g<-g + scale_x_yearmon(limits=xlim.Sel, format = "%b %Y") #,breaks=breaks.zpp
g<-g + geom_line(size=0.5, na.rm = TRUE)
g<-g + geom_smooth(se=F, size=1, na.rm = TRUE)
g<-g + xlab("Fecha")+ggtitle("Consumo mensual")
if(norm.Sel) {
g<-g + ylab("%")
}else{
g<-g + ylab("kt/mes")
}
g<-g + theme(axis.text = element_text(size = 12),
plot.title = element_text(size = 16, face='bold'),
strip.text = element_text(size = 16, face='bold'),
axis.title.x = element_text(size = 14),
axis.title.y = element_text(size = 14),
panel.border = element_rect(linetype = 'solid', color = 'red', fill = NA),
strip.background = element_rect(linetype = 'solid', color = 'darkred', fill = 'gray'),
panel.grid.major= element_line(size = 0.25, colour = "red", linetype = "dotted"),
panel.grid.minor = element_blank(),
legend.position = 'bottom',
legend.text = element_text(size = 14),
legend.title=element_blank())
ggplotly(g)
}
}) #, height = 750, width = 'auto')
output$producto1.uisel<-renderUI({
product2.list<-NULL
for(f in input$familia1.sel){
product2.list<-c(product2.list, products.pp[grep(paste0("^",f), products.pp)])
}
selectInput("product2.sel",
label = h4("Producto"),
choices = product2.list,
selected = 'GOIL.total',
multiple = FALSE)
})
output$plot3 <- renderPlot({
if(!is.null(input$product2.sel)){
xlim.Sel<-c(as.yearmon(paste0(input$fechas3.sel[1],"-01-01")),
as.yearmon(paste0(input$fechas3.sel[2],"-01-01")))
if(fechas1.rango[2] == input$fechas3.sel[2]){xlim.Sel[2]<-pp.df$fecha[nrow(pp.df)]}
z<-pp.df %>% select(fecha, -anyo, -mes, one_of(input$product2.sel))
z<-zoo(x = select(z, -fecha), order.by=z$fecha)
z<-na.locf(z)
fit<-seas(as.ts(z), forecast.save = "fct", forecast.probability = 0.95)
zz0<-data.frame(fecha=as.character(as.yearmon(time(original(fit)))),
original=drop(coredata(original(fit))),
stringsAsFactors = FALSE)
zz<-data.frame(fecha=as.character(as.yearmon(time(final(fit)))),
outlier=coredata(outlier(fit)),
final=coredata(final(fit)),
trend=coredata(trend(fit)),
stringsAsFactors = FALSE)
zz<- left_join(zz, zz0, by='fecha')
zzf<-data.frame(fecha=as.character(as.yearmon(time(series(fit, 'forecast.forecasts')))),
series(fit, 'forecast.forecasts'),
stringsAsFactors = FALSE)
zz<- full_join(zz, zzf, by='fecha') %>% mutate(fecha=as.yearmon(fecha))
g<-ggplot(zz, na.rm = TRUE)
g<-g + geom_line(aes(x=fecha, y=original, color='original'), size=0.5, na.rm = TRUE)
g<-g + geom_text(aes(x=fecha, y=original, label=outlier), na.rm = TRUE)
g<-g + geom_label(aes(x=fecha, y=original, label=outlier, color='outlier'), na.rm = TRUE)
g<-g + geom_line(aes(x=fecha, y=final, color='final'), size=1, na.rm = TRUE)
g<-g + geom_line(aes(x=fecha, y=trend, color='trend'), size=1.5, na.rm = TRUE)
g<-g + geom_line(aes(x=fecha, y=forecast, color='forecast'), size=1, na.rm = TRUE)
g<-g + geom_ribbon(aes(x=fecha, ymin = lowerci, ymax = upperci), fill = "grey70", alpha=0.5)
g<-g + scale_color_manual(values=c('original'='chartreuse4', 'final'='black', 'trend'='blue', 'forecast'='red', 'outlier'='orange1'),
breaks=c('original', 'final', 'trend', 'forecast', 'outlier'))
g<-g + ylab("kt")+ggtitle("Consumo mensual")
g<-g + scale_x_yearmon(limits=xlim.Sel)
g<-g + theme(panel.background = element_rect(colour = "red"),
plot.title = element_text(size = 16, face='bold', color='blue'),
panel.grid.major= element_line(size = 0.25, colour = "red", linetype = "dotted"),
axis.text.x = element_text(size = 14),
axis.title.x = element_blank(),
axis.title.y = element_text(size = 14),
legend.position = 'bottom',
legend.text = element_text(size = 14),
legend.title=element_blank())
g
}
}, height = 750, width = 'auto')
})
|
#' Level 1 CAT decision tree generator
#'
#' Generates a list of nodes lists for the first level of the CAT decision tree
#'
#' @param bank matrix of the item bank. Rows represent items, and columns
#' represent parameters. If the model is \code{"GRM"}, the first column
#' represents the \code{alpha} parameters and the next columns represent the
#' \code{beta} parameters. If the model is \code{"NRM"}, odd columns represent
#' the \code{alpha} parameters and even columns represent \code{beta}
#' parameters
#' @param crit item selection criterion. Options: "MEPV" for Minimum
#' Expected Posterior Variance and "MFI" for Maximum Fisher Information
#' @param dens_vec vector of the a priori density function values of the
#' evaluated ability levels
#' @param C vector of item capacities
#' @param nres vector of number of possible responses for every item
#' @param prob_array 3-D array of probability responses. Dim 1 represent items,
#' dim 2 represent evaluated ability levels and dim 3 represent possible
#' responses
#' @return A list of lists. Each of these lists represent a node of the first
#' level of the decision tree
#' @author Javier Rodr?guez-Cuadrado
#'
#' @export
create_level_1 = function(bank, crit, dens_vec, C, nres, prob_array) {
#Create the matrix of "associated values" for the linear programming solver
switch(crit,
MEPV = {
E = create_E_MEPV(bank, dens_vec, nres, prob_array, C)
minmax = F
},
MFI = {
E = create_E_MFI(bank, estimate(dens_vec)[[1]], nres, C)
minmax = T
}
)
#Calculate the item exposure in the nodes
X = item_selector(E, 1, C, minmax)
#Select those with non-zero exposure (the items selected for the first level
#nodes)
item_sel = which(X != 0)
#Initialise the list of node lists
nodes = list()
#Fill the node lists
for (i in 1:length(item_sel)) {
est = estimate(dens_vec) #Calculate the estimation and the SE
nodes[[i]] = create_node(10000+i, dens_vec, item_sel[i], c(),
est[[1]],
est[[2]],
data.frame(matrix(nrow = 0, ncol = 3)),
X[item_sel[i]], E[item_sel[i]])
colnames(nodes[[i]]$ID_sons) = c("ID_son", "Response", "Probability")
}
return(nodes) #Return the list of node lists
}
|
/R/create_level_1.R
|
no_license
|
cran/cat.dt
|
R
| false
| false
| 2,453
|
r
|
#' Level 1 CAT decision tree generator
#'
#' Generates a list of nodes lists for the first level of the CAT decision tree
#'
#' @param bank matrix of the item bank. Rows represent items, and columns
#' represent parameters. If the model is \code{"GRM"}, the first column
#' represents the \code{alpha} parameters and the next columns represent the
#' \code{beta} parameters. If the model is \code{"NRM"}, odd columns represent
#' the \code{alpha} parameters and even columns represent \code{beta}
#' parameters
#' @param crit item selection criterion. Options: "MEPV" for Minimum
#' Expected Posterior Variance and "MFI" for Maximum Fisher Information
#' @param dens_vec vector of the a priori density function values of the
#' evaluated ability levels
#' @param C vector of item capacities
#' @param nres vector of number of possible responses for every item
#' @param prob_array 3-D array of probability responses. Dim 1 represent items,
#' dim 2 represent evaluated ability levels and dim 3 represent possible
#' responses
#' @return A list of lists. Each of these lists represent a node of the first
#' level of the decision tree
#' @author Javier Rodr?guez-Cuadrado
#'
#' @export
create_level_1 = function(bank, crit, dens_vec, C, nres, prob_array) {
#Create the matrix of "associated values" for the linear programming solver
switch(crit,
MEPV = {
E = create_E_MEPV(bank, dens_vec, nres, prob_array, C)
minmax = F
},
MFI = {
E = create_E_MFI(bank, estimate(dens_vec)[[1]], nres, C)
minmax = T
}
)
#Calculate the item exposure in the nodes
X = item_selector(E, 1, C, minmax)
#Select those with non-zero exposure (the items selected for the first level
#nodes)
item_sel = which(X != 0)
#Initialise the list of node lists
nodes = list()
#Fill the node lists
for (i in 1:length(item_sel)) {
est = estimate(dens_vec) #Calculate the estimation and the SE
nodes[[i]] = create_node(10000+i, dens_vec, item_sel[i], c(),
est[[1]],
est[[2]],
data.frame(matrix(nrow = 0, ncol = 3)),
X[item_sel[i]], E[item_sel[i]])
colnames(nodes[[i]]$ID_sons) = c("ID_son", "Response", "Probability")
}
return(nodes) #Return the list of node lists
}
|
\name{findVar}
\alias{findVar}
\title{Recursively explore a list}
\usage{
findVar(object, pattern, ...)
}
\arguments{
\item{object}{A list.}
\item{pattern}{a function (must return a logical).}
\item{...}{Optional arguments to be passed to
\code{grepl}.}
}
\value{
A list with the desired variable (\code{$var}) and the
results of the function \code{fun} at the matching depth.
Returns \code{NULL} if no match.
}
\description{
Recursively explore a 'list' object until a variable name
match with the given pattern.
}
\examples{
l <- list(a=1, b=list(c=2))
findVar(l, grepl, pattern="a")
findVar(l, grepl, pattern="b")
findVar(l, grepl, pattern="c")
is.null(findVar(l, grepl, pattern="C")) # TRUE
findVar(l, grepl, pattern="C", ignore.case=TRUE)
}
\seealso{
Other checkUtils: \code{\link{checkVar}};
\code{\link{findDefaultVars}}; \code{\link{findVars}}
}
\keyword{internal}
|
/man/findVar.Rd
|
no_license
|
SESjo/SES
|
R
| false
| false
| 884
|
rd
|
\name{findVar}
\alias{findVar}
\title{Recursively explore a list}
\usage{
findVar(object, pattern, ...)
}
\arguments{
\item{object}{A list.}
\item{pattern}{a function (must return a logical).}
\item{...}{Optional arguments to be passed to
\code{grepl}.}
}
\value{
A list with the desired variable (\code{$var}) and the
results of the function \code{fun} at the matching depth.
Returns \code{NULL} if no match.
}
\description{
Recursively explore a 'list' object until a variable name
match with the given pattern.
}
\examples{
l <- list(a=1, b=list(c=2))
findVar(l, grepl, pattern="a")
findVar(l, grepl, pattern="b")
findVar(l, grepl, pattern="c")
is.null(findVar(l, grepl, pattern="C")) # TRUE
findVar(l, grepl, pattern="C", ignore.case=TRUE)
}
\seealso{
Other checkUtils: \code{\link{checkVar}};
\code{\link{findDefaultVars}}; \code{\link{findVars}}
}
\keyword{internal}
|
library(shiny)
shinyUI(
fluidPage(
titlePanel("Interactive Normal Distribution"),
sidebarLayout(
sidebarPanel(
sliderInput("obs", "Select the number of observations", min = 1, max = 1000, val = 100),
br(),
sliderInput("mean", "Select the mean for the Normal Distribution", min = -10, max = 10, val = 0),
br(),
sliderInput("sd", "Select the standard deviation of the Normal Disribution", min = 0, max = 10, val = 1),
br(),
radioButtons("colour", "Select the colour of the histogram or line drawing",
choices=c("Blue" = "cadetblue3",
"Purple" = "violet",
"Green" = "darkseagreen")),
downloadButton("down", "Download"),
br(),
helpText("Click the download button to download the data currently being shown")
),
mainPanel(
tabsetPanel(type = "tabs",
tabPanel("Histogram", plotOutput("plot")),
tabPanel("Line Drawing", plotOutput("line"), helpText("Note that the number of observations
no longer matters; this is simply displaying the
distribution with the specified mean and SD.")),
tabPanel("Summary", verbatimTextOutput("summary")),
tabPanel("Table of Generated Values", tableOutput("table")))
)
)
)
)
|
/InteractiveNormalDistribution/ui.r
|
permissive
|
melissavanbussel/shiny-apps
|
R
| false
| false
| 1,525
|
r
|
library(shiny)
shinyUI(
fluidPage(
titlePanel("Interactive Normal Distribution"),
sidebarLayout(
sidebarPanel(
sliderInput("obs", "Select the number of observations", min = 1, max = 1000, val = 100),
br(),
sliderInput("mean", "Select the mean for the Normal Distribution", min = -10, max = 10, val = 0),
br(),
sliderInput("sd", "Select the standard deviation of the Normal Disribution", min = 0, max = 10, val = 1),
br(),
radioButtons("colour", "Select the colour of the histogram or line drawing",
choices=c("Blue" = "cadetblue3",
"Purple" = "violet",
"Green" = "darkseagreen")),
downloadButton("down", "Download"),
br(),
helpText("Click the download button to download the data currently being shown")
),
mainPanel(
tabsetPanel(type = "tabs",
tabPanel("Histogram", plotOutput("plot")),
tabPanel("Line Drawing", plotOutput("line"), helpText("Note that the number of observations
no longer matters; this is simply displaying the
distribution with the specified mean and SD.")),
tabPanel("Summary", verbatimTextOutput("summary")),
tabPanel("Table of Generated Values", tableOutput("table")))
)
)
)
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/help_parameter_estimation_functions.R
\name{load_trial_data}
\alias{load_trial_data}
\title{Function to load the file containing trial data and return it}
\usage{
load_trial_data(file = NULL, sheet = NULL)
}
\arguments{
\item{file, }{name of the file in full}
\item{sheet}{name of the sheet if excel work book is given}
}
\value{
trial data if success, else -1
}
\description{
Function to load the file containing trial data and return it
}
\examples{
load_trial_data(system.file("extdata", "trial_data.csv",
package = "packDAMipd"
))
}
|
/man/load_trial_data.Rd
|
no_license
|
sheejamk/packDAMipd
|
R
| false
| true
| 618
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/help_parameter_estimation_functions.R
\name{load_trial_data}
\alias{load_trial_data}
\title{Function to load the file containing trial data and return it}
\usage{
load_trial_data(file = NULL, sheet = NULL)
}
\arguments{
\item{file, }{name of the file in full}
\item{sheet}{name of the sheet if excel work book is given}
}
\value{
trial data if success, else -1
}
\description{
Function to load the file containing trial data and return it
}
\examples{
load_trial_data(system.file("extdata", "trial_data.csv",
package = "packDAMipd"
))
}
|
testlist <- list(AgeVector = c(-4.73074171454048e-167, 2.2262381097027e-76, -9.12990429452974e-204, 5.97087417427845e-79, 4.7390525269307e-300, 6.58361441690132e-121, 3.58574658665472e-154, -2.94504776827523e-186, 2.62380314702636e-116, -6.78950518864266e+23, 6.99695749856012e-167, 86485.676793021, 1.11271562183704e+230, 1.94114173595984e-186, 1.44833381226225e-178, -6.75217876587581e-69, 1.17166524186752e-15, -4.66902120197297e-64, -1.96807327384856e+304, 4.43806122192432e-53, 9.29588680224717e-276, -6.49633240047463e-239, -1.22140819059424e-138, 5.03155164774999e-80, -6.36956558303921e-38, 7.15714506860012e-155, -1.05546603899445e-274, -3.66720914317747e-169, -6.94681701552128e+38, 2.93126040859825e-33, 2.03804078100055e-84, 3.62794352816579e+190, 3.84224576683191e+202, 2.90661893502594e+44, -5.43046915655589e-132, -1.22315376742253e-152), ExpressionMatrix = structure(c(4.80597147865938e+96, 6.97343932706536e+155, 1.3267342810479e+281, 1.34663897260867e+171, 1.76430141680543e+158, 1.20021255064002e-241, 1.72046093489436e+274, 4.64807629890539e-66, 3.23566990107388e-38, 3.70896378162114e-42, 1.09474740380531e+92, 7.49155705745727e-308, 3.26639180474928e+224, 3.21841801500177e-79, 4.26435540037564e-295, 1.40002857639358e+82, 47573397570345336, 2.00517157311369e-187, 2.74035572944044e+70, 2.89262435086883e-308, 6.65942057982148e-198, 1.10979548758712e-208, 1.40208057226312e-220, 6.25978904299555e-111, 1.06191688875218e+167, 1.1857452172049, 7.01135380962132e-157, 4.49610615342627e-308, 8.04053421408348e+261, 6.23220855980985e+275, 1.91601752509744e+141, 2.27737212344351e-244, 1.6315101795754e+126, 3.83196182917788e+160, 1.53445011275161e-192), .Dim = c(5L, 7L)), permutations = 415362983L)
result <- do.call(myTAI:::cpp_bootMatrix,testlist)
str(result)
|
/myTAI/inst/testfiles/cpp_bootMatrix/AFL_cpp_bootMatrix/cpp_bootMatrix_valgrind_files/1615768303-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 1,803
|
r
|
testlist <- list(AgeVector = c(-4.73074171454048e-167, 2.2262381097027e-76, -9.12990429452974e-204, 5.97087417427845e-79, 4.7390525269307e-300, 6.58361441690132e-121, 3.58574658665472e-154, -2.94504776827523e-186, 2.62380314702636e-116, -6.78950518864266e+23, 6.99695749856012e-167, 86485.676793021, 1.11271562183704e+230, 1.94114173595984e-186, 1.44833381226225e-178, -6.75217876587581e-69, 1.17166524186752e-15, -4.66902120197297e-64, -1.96807327384856e+304, 4.43806122192432e-53, 9.29588680224717e-276, -6.49633240047463e-239, -1.22140819059424e-138, 5.03155164774999e-80, -6.36956558303921e-38, 7.15714506860012e-155, -1.05546603899445e-274, -3.66720914317747e-169, -6.94681701552128e+38, 2.93126040859825e-33, 2.03804078100055e-84, 3.62794352816579e+190, 3.84224576683191e+202, 2.90661893502594e+44, -5.43046915655589e-132, -1.22315376742253e-152), ExpressionMatrix = structure(c(4.80597147865938e+96, 6.97343932706536e+155, 1.3267342810479e+281, 1.34663897260867e+171, 1.76430141680543e+158, 1.20021255064002e-241, 1.72046093489436e+274, 4.64807629890539e-66, 3.23566990107388e-38, 3.70896378162114e-42, 1.09474740380531e+92, 7.49155705745727e-308, 3.26639180474928e+224, 3.21841801500177e-79, 4.26435540037564e-295, 1.40002857639358e+82, 47573397570345336, 2.00517157311369e-187, 2.74035572944044e+70, 2.89262435086883e-308, 6.65942057982148e-198, 1.10979548758712e-208, 1.40208057226312e-220, 6.25978904299555e-111, 1.06191688875218e+167, 1.1857452172049, 7.01135380962132e-157, 4.49610615342627e-308, 8.04053421408348e+261, 6.23220855980985e+275, 1.91601752509744e+141, 2.27737212344351e-244, 1.6315101795754e+126, 3.83196182917788e+160, 1.53445011275161e-192), .Dim = c(5L, 7L)), permutations = 415362983L)
result <- do.call(myTAI:::cpp_bootMatrix,testlist)
str(result)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wtd.rowMeans.R
\name{wtd.rowMeans}
\alias{wtd.rowMeans}
\title{Weighted Mean of each Row - WORK IN PROGRESS}
\usage{
wtd.rowMeans(x, wts = 1, na.rm = FALSE, dims = 1)
}
\arguments{
\item{x}{Data.frame or matrix, required.}
\item{wts}{Weights, optional, defaults to 1 which is unweighted, numeric vector of length equal to number of columns}
\item{na.rm}{Logical value, optional, TRUE by default. Defines whether NA values should be removed before result is found. Otherwise result will be NA when any NA is in a vector.}
\item{dims}{dims=1 is default. Not used. integer: Which dimensions are regarded as 'rows' or 'columns' to sum over. For row*, the sum or mean is over dimensions dims+1, ...; for col* it is over dimensions 1:dims.}
}
\value{
Returns a vector of numbers of length equal to number of rows in df.
}
\description{
Returns weighted mean of each row of a data.frame or matrix, based on specified weights, one weight per column.
}
\examples{
x=data.frame(a=c(NA, 2:10), b=rep(100,10), c=rep(3,10))
w=c(1.1, 2, NA)
cbind(x, wtd.rowMeans(x, w) )
cbind(x, wtd.rowSums(x, w) )
x=data.frame(a=c(NA, 2:4), b=rep(100,4), c=rep(3,4))
w=c(1.1, 2, NA, 0)
print(cbind(x,w, wtd=w*x))
print(wtd.colMeans(x, w, na.rm=TRUE))
#rbind(cbind(x,w,wtd=w*x), c(wtd.colMeans(x,w,na.rm=TRUE), 'wtd.colMeans', rep(NA,length(w))))
x=data.frame(a=c(NA, 2:10), b=rep(100,10), c=rep(3,10))
w=c(1.1, 2, NA, rep(1, 7))
print(cbind(x,w, wtd=w*x))
rbind(cbind(x, w), cbind(wtd.colMeans(x, w, na.rm=TRUE), w='wtd.colMeans') )
print(w*cbind(x,w))
}
\seealso{
\code{\link{wtd.colMeans}} \code{\link{wtd.rowMeans}} \code{\link{wtd.rowSums}} \code{\link{rowMaxs}} \code{\link{rowMins}} \code{\link{colMins}}
}
|
/man/wtd.rowMeans.Rd
|
no_license
|
ejanalysis/analyze.stuff
|
R
| false
| true
| 1,767
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wtd.rowMeans.R
\name{wtd.rowMeans}
\alias{wtd.rowMeans}
\title{Weighted Mean of each Row - WORK IN PROGRESS}
\usage{
wtd.rowMeans(x, wts = 1, na.rm = FALSE, dims = 1)
}
\arguments{
\item{x}{Data.frame or matrix, required.}
\item{wts}{Weights, optional, defaults to 1 which is unweighted, numeric vector of length equal to number of columns}
\item{na.rm}{Logical value, optional, TRUE by default. Defines whether NA values should be removed before result is found. Otherwise result will be NA when any NA is in a vector.}
\item{dims}{dims=1 is default. Not used. integer: Which dimensions are regarded as 'rows' or 'columns' to sum over. For row*, the sum or mean is over dimensions dims+1, ...; for col* it is over dimensions 1:dims.}
}
\value{
Returns a vector of numbers of length equal to number of rows in df.
}
\description{
Returns weighted mean of each row of a data.frame or matrix, based on specified weights, one weight per column.
}
\examples{
x=data.frame(a=c(NA, 2:10), b=rep(100,10), c=rep(3,10))
w=c(1.1, 2, NA)
cbind(x, wtd.rowMeans(x, w) )
cbind(x, wtd.rowSums(x, w) )
x=data.frame(a=c(NA, 2:4), b=rep(100,4), c=rep(3,4))
w=c(1.1, 2, NA, 0)
print(cbind(x,w, wtd=w*x))
print(wtd.colMeans(x, w, na.rm=TRUE))
#rbind(cbind(x,w,wtd=w*x), c(wtd.colMeans(x,w,na.rm=TRUE), 'wtd.colMeans', rep(NA,length(w))))
x=data.frame(a=c(NA, 2:10), b=rep(100,10), c=rep(3,10))
w=c(1.1, 2, NA, rep(1, 7))
print(cbind(x,w, wtd=w*x))
rbind(cbind(x, w), cbind(wtd.colMeans(x, w, na.rm=TRUE), w='wtd.colMeans') )
print(w*cbind(x,w))
}
\seealso{
\code{\link{wtd.colMeans}} \code{\link{wtd.rowMeans}} \code{\link{wtd.rowSums}} \code{\link{rowMaxs}} \code{\link{rowMins}} \code{\link{colMins}}
}
|
generate_figure2_plots <- function() {
fig_dir <- "figs/paper_figs/fig2"
if (!dir.exists(fig_dir)) {
dir.create(fig_dir)
}
# fig 2c
atlas_projection_tko_chimera()
# fig 2d
host_vs_ko_age(T)
# fig 2e
tko_barplot_ct_frequency(mat_nm = "tko_chim_wt10", ko_type = "KO", plot_pdf = T)
tko_barplot_ct_frequency(mat_nm = "tko_chim_wt10", ko_type = c("control", "host"), plot_pdf = T, tag = "control_host")
# fig 2f
plot_chimera_dotplots()
plot_tetraploid_dotplots()
}
atlas_projection_tko_chimera <- function(plot_pdf = F) {
mat_chim <- scdb_mat("tko_chim_wt10")
gset <- scdb_gset("tko_chim_wt10")
feat_genes <- names(gset@gene_set)
ko_cls <- colnames(mat_chim@mat)[mat_chim@cell_metadata[colnames(mat_chim@mat), "cell_type"] == "KO"]
host_cls <- colnames(mat_chim@mat)[mat_chim@cell_metadata[colnames(mat_chim@mat), "cell_type"] == "host"]
fig_dir <- "figs/paper_figs/fig2"
if (!dir.exists(fig_dir)) {
dir.create(fig_dir)
}
mat_query <- mat_chim@mat[, ko_cls]
fn <- "figs/paper_figs/fig2/atlas_projection_tko_chim_ko_cls_new_12.png"
w <- 1000
h <- 1000
if (plot_pdf) {
fn <- gsub(pattern = ".png", replacement = ".pdf", x = fn)
w <- 1000 / 72
h <- 1000 / 72
}
atlas_proj_on_wt10(mat_query = mat_query, feat_genes = feat_genes, fn = fn, cex_points = 1.2, w = w, h = h, plot_pdf = plot_pdf, plot_gray_background = F)
mat_query <- mat_chim@mat[, host_cls]
fn <- "figs/paper_figs/fig2/atlas_projection_tko_chim_host_control_cls_new_12.png"
atlas_proj_on_wt10(mat_query = mat_query, feat_genes = feat_genes, fn = fn, cex_points = 1.2, w = w, h = h, plot_pdf = plot_pdf, plot_gray_background = F)
}
host_vs_ko_age <- function(plot_pdf = F) {
n_cls_min <- 20
rank_to_time <- read.table(file = "data/wt10_transcriptional_rank_developmental_time.txt", stringsAsFactors = F, h = T, sep = "\t")
dev_time <- rank_to_time$developmental_time
fig_dir <- "figs/paper_figs/fig2"
if (!dir.exists(fig_dir)) {
dir.create(fig_dir)
}
chimera_age <- read.table("data/tko_chim_wt10/time_match/time_match_summary.txt", sep = "\t", stringsAsFactors = F, h = T)
f <- (chimera_age$control + chimera_age$host >= n_cls_min) & (chimera_age$KO >= n_cls_min)
time_min <- 6.9
time_max <- 8.2
if (plot_pdf) {
pdf(sprintf("%s/best_time_ko_vs_host_control.pdf", fig_dir), useDingbats = F)
} else {
png(sprintf("%s/best_time_ko_vs_host_control.png", fig_dir))
}
plot(rank_to_time$developmental_time[chimera_age$best_rank_ko[f]],
rank_to_time$developmental_time[chimera_age$best_rank_host_control[f]],
pch = 19,
xlim = c(time_min, time_max), ylim = c(time_min, time_max), main = "KO vs host/control",
xlab = "Time KO cells", ylab = "Time host/control cells", cex = 4, cex.lab = 1
)
abline(a = 0, b = 1, lty = "dashed")
dev.off()
}
tko_barplot_ct_frequency <- function(mat_nm, ko_type = "KO", plot_pdf = F, tag = "KO") {
n_cls_min <- 19
fig_dir <- "figs/paper_figs/fig2"
if (!dir.exists(fig_dir)) {
dir.create(fig_dir)
}
df_chim <- read.table(sprintf("data/%s/time_match/time_match_summary.txt", mat_nm), sep = "\t", stringsAsFactors = F, h = T)
rownames(df_chim) <- df_chim$embryo
mat <- scdb_mat(mat_nm)
mc_wt <- scdb_mc("sing_emb_wt10_recolored")
col_to_rank <- c(1:nrow(mc_wt@color_key))
names(col_to_rank) <- mc_wt@color_key$color
col_to_ct <- mc_wt@color_key$group
names(col_to_ct) <- mc_wt@color_key$color
excluded_colors <- c("#F6BFCB", "#7F6874")
included_colors <- setdiff(unique(mc_wt@color_key$color), excluded_colors)
chim_embryos <- df_chim$embryo[(df_chim$KO > n_cls_min) & (df_chim$control + df_chim$host > n_cls_min)]
chim_embryos <- chim_embryos[order(df_chim[chim_embryos, "best_rank_host_control"])]
tmp <- matrix(0, nrow = length(chim_embryos), ncol = length(included_colors))
rownames(tmp) <- chim_embryos
colnames(tmp) <- included_colors
load(file = sprintf("data/%s/color_annotation/cmp_annot.Rda", mat_nm))
query_cls_col <- cmp_annot$query_cls_col
query_cls <- names(query_cls_col)[!(query_cls_col %in% excluded_colors)]
query_cls <- query_cls[mat@cell_metadata[query_cls, "embryo"] %in% chim_embryos]
modified_cell_type_levels <- c(
mc_wt@color_key$group[1:7], c("space1"),
mc_wt@color_key$group[c(9, 10, 11)], c("space2"),
mc_wt@color_key$group[c(8, 12, 13, 14, 15, 16)], c("space3"),
mc_wt@color_key$group[c(17, 18, 19)], c("space4"),
mc_wt@color_key$group[c(20:23)], c("space5"),
mc_wt@color_key$group[c(24:27)]
)
modified_colors <- c(
mc_wt@color_key$color[1:7], c("white"),
mc_wt@color_key$color[c(9, 10, 11)], c("white"),
mc_wt@color_key$color[c(8, 12, 13, 14, 15, 16)], c("white"),
mc_wt@color_key$color[c(17, 18, 19)], c("white"),
mc_wt@color_key$color[c(20:23)], c("white"),
mc_wt@color_key$color[c(24:27)]
)
filtered_cls <- query_cls[mat@cell_metadata[query_cls, "cell_type"] %in% ko_type]
filtered_vs_ct <- table(factor(x = mat@cell_metadata[filtered_cls, "embryo"], levels = chim_embryos), factor(x = col_to_ct[query_cls_col[filtered_cls]], levels = modified_cell_type_levels))
filtered_vs_ct_n <- filtered_vs_ct / rowSums(filtered_vs_ct)
filtered_vs_ct_n[is.na(filtered_vs_ct_n)] <- 0
filtered_vs_ct_n[1:2, c("space1", "space5")] <- 0.04
filtered_vs_ct_n[3:nrow(filtered_vs_ct_n), c("space1", "space3", "space4", "space5")] <- 0.02
filtered_vs_ct_n <- filtered_vs_ct_n / rowSums(filtered_vs_ct_n)
if (plot_pdf) {
pdf(sprintf("%s/barplot_ct_freq_%s.pdf", fig_dir, tag), w = 12, h = 7.5, useDingbats = F)
barplot(t(filtered_vs_ct_n), col = modified_colors, las = 2, axes = F, axisnames = F, border = NA)
dev.off()
} else {
png(sprintf("%s/barplot_ct_freq_%s.png", fig_dir, tag), w = 1250, h = 750)
barplot(t(filtered_vs_ct_n), col = modified_colors, las = 2, axes = F, axisnames = F, border = NA)
dev.off()
}
}
plot_chimera_dotplots <- function(plot_pdf = T, included_transcriptional_ranks = NULL, highlighted_colors = NULL, minimal_number_of_cells_for_p_value = 100) {
ko_color <- "indianred3"
host_color <- "gray30"
mat_nm <- "tko_chim_wt10"
mc_wt <- scdb_mc("sing_emb_wt10_recolored")
if (is.null(included_transcriptional_ranks)) {
included_transcriptional_ranks <- c(125:153)
}
if (is.null(highlighted_colors)) {
highlighted_colors <- mc_wt@color_key$color[c(2, 3, 5, 6, 8, 12, 13, 14, 15, 17, 18, 19, 20, 22, 24, 27)]
}
chim_freq <- chimera_dotplot_frequencies(mat_nm = mat_nm, minimal_number_of_cells = 20, included_transcriptional_ranks = included_transcriptional_ranks)
mc_wt <- scdb_mc("sing_emb_wt10_recolored")
col_to_ct <- mc_wt@color_key$group
col_to_ct[22] <- "Blood"
names(col_to_ct) <- mc_wt@color_key$color
ko_color <- "indianred3"
host_color <- "gray30"
mat_nm <- "tko_chim_wt10"
fig_dir <- "figs/paper_figs/fig2"
if (!dir.exists(fig_dir)) {
dir.create(fig_dir)
}
fig_dir <- "figs/paper_figs/fig2/cell_type_dot_plots"
if (!dir.exists(fig_dir)) {
dir.create(fig_dir)
}
tko_freq_n <- chim_freq$tko
host_freq_n <- chim_freq$host
wt_freq_n <- chim_freq$wt
chim_freq_for_p_value_calculation <- chimera_dotplot_frequencies(mat_nm = mat_nm, minimal_number_of_cells = minimal_number_of_cells_for_p_value, downsample_number_of_cells = minimal_number_of_cells_for_p_value)
tko_freq_ds <- chim_freq_for_p_value_calculation$tko
host_freq_ds <- chim_freq_for_p_value_calculation$host
wt_freq_ds <- chim_freq_for_p_value_calculation$wt
tko_to_wt_p_values <- sapply(highlighted_colors, function(ct_col) {
p_val <- wilcox.test(x = tko_freq_ds[, ct_col], y = wt_freq_ds[, ct_col])
return(p_val$p.value)
})
tko_to_host_p_values <- sapply(highlighted_colors, function(ct_col) {
p_val <- wilcox.test(x = tko_freq_ds[, ct_col], y = host_freq_ds[, ct_col])
return(p_val$p.value)
})
host_to_wt_p_values <- sapply(highlighted_colors, function(ct_col) {
p_val <- wilcox.test(x = host_freq_ds[, ct_col], y = wt_freq_ds[, ct_col])
return(p_val$p.value)
})
if (length(highlighted_colors) > 1) {
q_val_tko <- qvalue(p = tko_to_wt_p_values, pi0 = 1)
q_val_host <- qvalue(p = host_to_wt_p_values, pi0 = 1)
q_val_tko_to_host <- qvalue(p = tko_to_host_p_values, pi0 = 1)
} else {
q_val_tko <- list(qvalues = tko_to_wt_p_values)
q_val_host <- list(qvalues = host_to_wt_p_values)
q_val_tko_to_host <- list(qvalues = tko_to_host_p_values)
}
q_to_signif <- function(v) {
v_signif <- sapply(v, function(x) {
if (x >= 0.05) {
a <- "ns"
} else {
a <- "*"
}
return(a)
})
return(v_signif)
}
genotype_color <- c("TKO" = ko_color, "Host/Control" = host_color, "WT" = "gray70")
stat_comparison <- data.frame(
group1 = c(rep("TKO", length(highlighted_colors)), rep("Host/Control", length(highlighted_colors)), rep("TKO", length(highlighted_colors))),
group2 = c(rep("Host/Control", length(highlighted_colors)), rep("WT", length(highlighted_colors)), rep("WT", length(highlighted_colors))),
cell_type = c(col_to_ct[names(q_val_tko_to_host$qvalues)], col_to_ct[names(q_val_host$qvalues)], col_to_ct[names(q_val_tko$qvalues)]),
cell_type_color = c(names(q_val_tko_to_host$qvalues), names(q_val_host$qvalues), names(q_val_tko$qvalues)),
q.val = c(q_val_tko_to_host$qvalues, q_val_host$qvalues, q_val_tko$qvalues),
q.signif = c(q_to_signif(q_val_tko_to_host$qvalues), q_to_signif(q_val_host$qvalues), q_to_signif(q_val_tko$qvalues)), stringsAsFactors = F
)
plot_list <- list()
col_to_ct[20] <- "Haematoendothelial prog."
col_to_ct[14] <- "Later. & interm. mesoderm"
for (ct_col in highlighted_colors) {
main_tag <- gsub("/", "_", col_to_ct[ct_col])
df_plot_points <- data.frame(
genotype = factor(x = c(rep("TKO", nrow(tko_freq_n)), rep("Host/Control", nrow(host_freq_n)), rep("WT", nrow(wt_freq_n))), levels = c("TKO", "Host/Control", "WT")),
freq = c(tko_freq_n[, ct_col], host_freq_n[, ct_col], wt_freq_n[, ct_col])
)
my_comparisons <- list(c("TKO", "Host/Control"), c("TKO", "WT"), c("Host/Control", "WT"))
stat.test <- compare_means(data = df_plot_points, formula = freq ~ genotype)
stat_f <- stat_comparison[stat_comparison$cell_type_color == ct_col, ]
p <- ggplot(data = df_plot_points, aes(x = genotype, y = freq)) +
geom_dotplot(aes(fill = genotype), dotsize = 1.3, binaxis = "y", stackdir = "center", show.legend = F) +
stat_pvalue_manual(stat_f, y.position = max(df_plot_points$freq) * 1.1, step.increase = 0.1, label = "q.signif") +
scale_fill_manual(values = genotype_color) +
ggtitle(label = main_tag) +
theme(plot.title = element_text(hjust = 0.5, size = 10)) +
ylab("") +
ylim(0, max(df_plot_points$freq) * 1.4) +
xlab("")
# theme(axis.text.x = element_text(size=14))
# stat_compare_means(label = "p.signif",comparisons = my_comparisons) +
plot_list[[ct_col]] <- p
if (plot_pdf) {
ggsave(filename = sprintf("%s/2N_%s.pdf", fig_dir, main_tag), width = 3, height = 2.3, plot = p)
} else {
ggsave(filename = sprintf("%s/2N_%s.png", fig_dir, main_tag), width = 3, height = 2.3, plot = p)
}
}
p_all <- grid.arrange(grobs = plot_list, ncol = 4, nrow = 4)
if (plot_pdf) {
ggsave(filename = sprintf("%s/2N_all_cell_types.pdf", fig_dir), width = 8.5, height = 6.5, plot = p_all)
} else {
ggsave(filename = sprintf("%s/2N_all_cell_types.png", fig_dir), width = 8.5, height = 6.5, plot = p_all)
}
}
aggregate_blood_subtypes_into_one_type <- function(color_vector) {
# I replace Blood progenitors color and Erythroid 2 color by Erythroid 1 color #C72228
color_vector_names <- names(color_vector)
color_vector <- as.character(color_vector)
color_vector[color_vector %in% c("#c9a997", "#EF4E22")] <- "#C72228"
names(color_vector) <- color_vector_names
return(color_vector)
}
downsample_cells_indexed_by_metadata <- function(cells, cells_metadata, n_downsample, seed = NULL) {
n_cells_per_metadata <- table(cells_metadata)
included_metadata_levels <- names(n_cells_per_metadata)[n_cells_per_metadata >= n_downsample]
f <- cells_metadata %in% included_metadata_levels
cells <- cells[f]
cells_metadata <- cells_metadata[f]
cells_ds <- tapply(cells, cells_metadata, function(v) {
if (!is.null(seed)) {
set.seed(seed)
}
v_ds <- sample(v, size = n_downsample)
return(v_ds)
})
cells_ds <- unlist(cells_ds)
return(cells_ds)
}
chimera_dotplot_frequencies <- function(mat_nm, minimal_number_of_cells = 20, downsample_number_of_cells = NULL, included_transcriptional_ranks = c(125:153)) {
mat_chim <- scdb_mat(mat_nm)
# time window Et7.75 - Et8.1
df_chim <- read.table(sprintf("data/%s/time_match/time_match_summary.txt", mat_nm), sep = "\t", h = T, stringsAsFactors = F)
rownames(df_chim) <- df_chim$embryo
if (mat_nm == "tko_chim_wt10") {
f <- (df_chim$control + df_chim$host >= minimal_number_of_cells) & (df_chim$KO >= minimal_number_of_cells)
} else {
f <- (df_chim$host >= minimal_number_of_cells) & (df_chim[, 1] >= minimal_number_of_cells)
}
df_chim <- df_chim[f, ]
if (mat_nm == "tko_chim_wt10") {
f <- df_chim[, "best_rank_host_control"] %in% included_transcriptional_ranks
} else {
f <- df_chim[, "best_rank_host"] %in% included_transcriptional_ranks
}
df_chim <- df_chim[f, ]
included_chimeras <- df_chim$embryo
mc_wt <- scdb_mc("sing_emb_wt10_recolored")
# visceral and extraembryonic endoderm are excluded
included_colors <- mc_wt@color_key$color[1:27]
load(file = sprintf("data/%s/color_annotation/cmp_annot.Rda", mat_nm))
# Modify blood subtypes color to one color
query_cells_color <- aggregate_blood_subtypes_into_one_type(cmp_annot$query_cls_col)
wt_cells_color <- mc_wt@colors[mc_wt@mc]
names(wt_cells_color) <- names(mc_wt@mc)
wt_cells_color <- aggregate_blood_subtypes_into_one_type(wt_cells_color)
# remove Blood progenitors and Erythroid 2 from color levels
included_colors <- included_colors[-c(21, 23)]
query_cells_color <- query_cells_color[query_cells_color %in% included_colors]
wt_cells_color <- wt_cells_color[wt_cells_color %in% included_colors]
ko_cells <- names(query_cells_color)[mat_chim@cell_metadata[names(query_cells_color), "cell_type"] %in% c("KO", "DKO12", "DKO13", "DKO23")]
host_cells <- names(query_cells_color)[mat_chim@cell_metadata[names(query_cells_color), "cell_type"] %in% c("control", "host")]
wt_cells <- names(wt_cells_color)
# downsample cells to common number per embryo
if (!is.null(downsample_number_of_cells)) {
if (minimal_number_of_cells < downsample_number_of_cells) {
stop("minimal_number_of_cells smaller than downsample_number_of_cells")
}
ko_cells <- downsample_cells_indexed_by_metadata(cells = ko_cells, cells_metadata = mat_chim@cell_metadata[ko_cells, "embryo"], n_downsample = downsample_number_of_cells, seed = 123)
host_cells <- downsample_cells_indexed_by_metadata(cells = host_cells, cells_metadata = mat_chim@cell_metadata[host_cells, "embryo"], n_downsample = downsample_number_of_cells, seed = 123)
wt_cells <- downsample_cells_indexed_by_metadata(cells = wt_cells, cells_metadata = mat_chim@cell_metadata[wt_cells, "transcriptional_rank"], n_downsample = downsample_number_of_cells, seed = 123)
}
# compute two way tables
ko_emb_vs_ct <- compute_two_way_table(
values_row = mat_chim@cell_metadata[ko_cells, "embryo"],
values_col = query_cells_color[ko_cells],
included_levels_row = included_chimeras,
included_levels_col = included_colors, normalize_rows = T
)
host_emb_vs_ct <- compute_two_way_table(
values_row = mat_chim@cell_metadata[host_cells, "embryo"],
values_col = query_cells_color[host_cells],
included_levels_row = included_chimeras,
included_levels_col = included_colors, normalize_rows = T
)
wt10_emb_vs_ct <- compute_two_way_table(
values_row = mat_chim@cell_metadata[wt_cells, "transcriptional_rank"],
values_col = wt_cells_color[wt_cells],
included_levels_row = included_transcriptional_ranks,
included_levels_col = included_colors, normalize_rows = F
)
f_wt <- rowSums(wt10_emb_vs_ct) > 0
wt10_emb_vs_ct <- wt10_emb_vs_ct[f_wt, ]
wt10_emb_vs_ct <- wt10_emb_vs_ct / rowSums(wt10_emb_vs_ct)
return(list(wt = wt10_emb_vs_ct, tko = ko_emb_vs_ct, host = host_emb_vs_ct))
}
tetraploid_dotplot_frequencies <- function(mat_nm, minimal_number_of_cells = 20, downsample_number_of_cells = NULL, included_transcriptional_ranks = c(125:153)) {
mat <- scdb_mat(mat_nm)
# time window Et7.75 - Et8.1
df_tetra <- read.table(sprintf("data/%s/time_match/time_match_summary.txt", mat_nm), sep = "\t", h = T, stringsAsFactors = F)
rownames(df_tetra) <- df_tetra$embryo
f <- (df_tetra[, 1] >= minimal_number_of_cells)
df_tetra <- df_tetra[f, ]
f <- df_tetra[, "best_query"] %in% included_transcriptional_ranks
df_tetra <- df_tetra[f, ]
included_chimeras <- df_tetra$embryo[]
mc_wt <- scdb_mc("sing_emb_wt10_recolored")
# visceral and extraembryonic endoderm are excluded
included_colors <- mc_wt@color_key$color[1:27]
load(file = sprintf("data/%s/color_annotation/cmp_annot.Rda", mat_nm))
# Modify blood subtypes color to one color
query_cells_color <- aggregate_blood_subtypes_into_one_type(cmp_annot$query_cls_col)
wt_cells_color <- mc_wt@colors[mc_wt@mc]
names(wt_cells_color) <- names(mc_wt@mc)
wt_cells_color <- aggregate_blood_subtypes_into_one_type(wt_cells_color)
# remove Blood progenitors and Erythroid 2 from color levels
included_colors <- included_colors[-c(21, 23)]
query_cells_color <- query_cells_color[query_cells_color %in% included_colors]
wt_cells_color <- wt_cells_color[wt_cells_color %in% included_colors]
query_cells <- names(query_cells_color)[mat@cell_metadata[names(query_cells_color), "cell_type"] %in% c("KO", "control")]
wt_cells <- names(wt_cells_color)
# downsample cells to common number per embryo
if (!is.null(downsample_number_of_cells)) {
if (minimal_number_of_cells < downsample_number_of_cells) {
stop("minimal_number_of_cells smaller than downsample_number_of_cells")
}
query_cells <- downsample_cells_indexed_by_metadata(cells = query_cells, cells_metadata = mat@cell_metadata[query_cells, "embryo"], n_downsample = minimal_number_of_cells, seed = 123)
wt_cells <- downsample_cells_indexed_by_metadata(cells = wt_cells, cells_metadata = mat@cell_metadata[wt_cells, "transcriptional_rank"], n_downsample = minimal_number_of_cells, seed = 123)
}
# compute two way tables
query_emb_vs_ct <- compute_two_way_table(
values_row = mat@cell_metadata[query_cells, "embryo"],
values_col = query_cells_color[query_cells],
included_levels_row = included_chimeras,
included_levels_col = included_colors, normalize_rows = T
)
wt10_emb_vs_ct <- compute_two_way_table(
values_row = mat@cell_metadata[wt_cells, "transcriptional_rank"],
values_col = wt_cells_color[wt_cells],
included_levels_row = included_transcriptional_ranks,
included_levels_col = included_colors, normalize_rows = F
)
f_wt <- rowSums(wt10_emb_vs_ct) > 0
wt10_emb_vs_ct <- wt10_emb_vs_ct[f_wt, ]
wt10_emb_vs_ct <- wt10_emb_vs_ct / rowSums(wt10_emb_vs_ct)
return(list(wt = wt10_emb_vs_ct, query = query_emb_vs_ct))
}
compute_two_way_table <- function(values_row, values_col, included_levels_row = NULL, included_levels_col = NULL, normalize_rows = F) {
if (length(values_row) != length(values_col)) {
stop("values_row and values_col don't have the same length")
}
if (!is.null(included_levels_row)) {
f <- values_row %in% included_levels_row
values_row <- values_row[f]
values_row <- factor(x = values_row, levels = included_levels_row)
values_col <- values_col[f]
}
if (!is.null(included_levels_col)) {
f <- values_col %in% included_levels_col
values_row <- values_row[f]
values_col <- values_col[f]
values_col <- factor(x = values_col, levels = included_levels_col)
}
row_vs_col_freq <- table(values_row, values_col)
if (normalize_rows) {
row_vs_col_freq <- row_vs_col_freq / rowSums(row_vs_col_freq)
}
return(row_vs_col_freq)
}
plot_tetraploid_dotplots <- function(plot_pdf = T, included_transcriptional_ranks = NULL, highlighted_colors = NULL) {
minimal_number_of_cells <- 250
ko_color <- "indianred3"
host_color <- "gray30"
mc_wt <- scdb_mc("sing_emb_wt10_recolored")
if (is.null(included_transcriptional_ranks)) {
included_transcriptional_ranks <- c(125:153)
}
if (is.null(highlighted_colors)) {
highlighted_colors <- mc_wt@color_key$color[c(2, 3, 5, 6, 8, 12, 13, 14, 15, 17, 18, 19, 20, 22, 24, 27)]
}
tko_tetra_freq <- tetraploid_dotplot_frequencies(mat_nm = "tko_tetra_wt10", minimal_number_of_cells = 20, included_transcriptional_ranks = included_transcriptional_ranks)
control_tetra_freq <- tetraploid_dotplot_frequencies(mat_nm = "control_tetra_all_wt10", minimal_number_of_cells = 20)
mc_wt <- scdb_mc("sing_emb_wt10_recolored")
col_to_ct <- mc_wt@color_key$group
col_to_ct[22] <- "Blood"
names(col_to_ct) <- mc_wt@color_key$color
ko_color <- "indianred3"
host_color <- "gray30"
fig_dir <- "figs/paper_figs/fig2"
if (!dir.exists(fig_dir)) {
dir.create(fig_dir)
}
fig_dir <- "figs/paper_figs/fig2/cell_type_dot_plots_4N"
if (!dir.exists(fig_dir)) {
dir.create(fig_dir)
}
tko_freq_n <- tko_tetra_freq$query
ctrl_freq_n <- control_tetra_freq$query
wt_freq_n <- tko_tetra_freq$wt
tko_tetra_freq_ds <- tetraploid_dotplot_frequencies(mat_nm = "tko_tetra_wt10", minimal_number_of_cells = minimal_number_of_cells, downsample_number_of_cells = minimal_number_of_cells)
control_tetra_freq_ds <- tetraploid_dotplot_frequencies(mat_nm = "control_tetra_all_wt10", minimal_number_of_cells = minimal_number_of_cells, downsample_number_of_cells = minimal_number_of_cells)
tko_freq_ds <- tko_tetra_freq_ds$query
ctrl_freq_ds <- control_tetra_freq_ds$query
wt_freq_ds <- tko_tetra_freq_ds$wt
tko_to_wt_p_values <- sapply(highlighted_colors, function(ct_col) {
p_val <- wilcox.test(x = tko_freq_ds[, ct_col], y = wt_freq_ds[, ct_col])
return(p_val$p.value)
})
tko_to_ctrl_p_values <- sapply(highlighted_colors, function(ct_col) {
p_val <- wilcox.test(x = tko_freq_ds[, ct_col], y = ctrl_freq_ds[, ct_col])
return(p_val$p.value)
})
ctrl_to_wt_p_values <- sapply(highlighted_colors, function(ct_col) {
p_val <- wilcox.test(x = ctrl_freq_ds[, ct_col], y = wt_freq_ds[, ct_col])
return(p_val$p.value)
})
if (length(highlighted_colors) > 1) {
q_val_tko <- qvalue(p = tko_to_wt_p_values, pi0 = 1)
q_val_ctrl <- qvalue(p = ctrl_to_wt_p_values, pi0 = 1)
q_val_tko_to_ctrl <- qvalue(p = tko_to_ctrl_p_values, pi0 = 1)
} else {
q_val_tko <- list(qvalues = tko_to_wt_p_values)
q_val_ctrl <- list(qvalues = ctrl_to_wt_p_values)
q_val_tko_to_ctrl <- list(qvalues = tko_to_ctrl_p_values)
}
q_to_signif <- function(v) {
v_signif <- sapply(v, function(x) {
if (x >= 0.05) {
a <- "ns"
} else {
a <- "*"
}
return(a)
})
return(v_signif)
}
genotype_color <- c("TKO" = ko_color, "Ctrl" = host_color, "WT" = "gray70")
stat_comparison <- data.frame(
group1 = c(rep("TKO", length(highlighted_colors)), rep("Ctrl", length(highlighted_colors)), rep("TKO", length(highlighted_colors))),
group2 = c(rep("Ctrl", length(highlighted_colors)), rep("WT", length(highlighted_colors)), rep("WT", length(highlighted_colors))),
cell_type = c(col_to_ct[names(q_val_tko_to_ctrl$qvalues)], col_to_ct[names(q_val_ctrl$qvalues)], col_to_ct[names(q_val_tko$qvalues)]),
cell_type_color = c(names(q_val_tko_to_ctrl$qvalues), names(q_val_ctrl$qvalues), names(q_val_tko$qvalues)),
q.val = c(q_val_tko_to_ctrl$qvalues, q_val_ctrl$qvalues, q_val_tko$qvalues),
q.signif = c(q_to_signif(q_val_tko_to_ctrl$qvalues), q_to_signif(q_val_ctrl$qvalues), q_to_signif(q_val_tko$qvalues)), stringsAsFactors = F
)
plot_list <- list()
col_to_ct[20] <- "Haematoendothelial prog."
col_to_ct[14] <- "Later. & interm. mesoderm"
for (ct_col in highlighted_colors) {
main_tag <- gsub("/", "_", col_to_ct[ct_col])
df_plot_points <- data.frame(
genotype = factor(x = c(rep("TKO", nrow(tko_freq_n)), rep("Ctrl", nrow(ctrl_freq_n)), rep("WT", nrow(wt_freq_n))), levels = c("TKO", "Ctrl", "WT")),
freq = c(tko_freq_n[, ct_col], ctrl_freq_n[, ct_col], wt_freq_n[, ct_col])
)
my_comparisons <- list(c("TKO", "Ctrl"), c("TKO", "WT"), c("Ctrl", "WT"))
stat.test <- compare_means(data = df_plot_points, formula = freq ~ genotype)
stat_f <- stat_comparison[stat_comparison$cell_type_color == ct_col, ]
p <- ggplot(data = df_plot_points, aes(x = genotype, y = freq)) +
geom_dotplot(aes(fill = genotype), dotsize = 1.3, binaxis = "y", stackdir = "center", show.legend = F) +
stat_pvalue_manual(stat_f, y.position = max(df_plot_points$freq) * 1.1, step.increase = 0.1, label = "q.signif") +
scale_fill_manual(values = genotype_color) +
ggtitle(label = main_tag) +
theme(plot.title = element_text(hjust = 0.5, size = 10)) +
ylab("") +
ylim(0, max(df_plot_points$freq) * 1.4) +
xlab("")
# theme(axis.text.x = element_text(size=14))
# stat_compare_means(label = "p.signif",comparisons = my_comparisons) +
plot_list[[ct_col]] <- p
if (plot_pdf) {
ggsave(filename = sprintf("%s/4N_%s.pdf", fig_dir, main_tag), width = 3, height = 2.3, plot = p)
} else {
ggsave(filename = sprintf("%s/4N_%s.png", fig_dir, main_tag), width = 3, height = 2.3, plot = p)
}
}
p_all <- grid.arrange(grobs = plot_list, ncol = 4, nrow = 4)
if (plot_pdf) {
ggsave(filename = sprintf("%s/4N_all_cell_types.pdf", fig_dir), width = 8.5, height = 6.5, plot = p_all)
} else {
ggsave(filename = sprintf("%s/4N_all_cell_types.png", fig_dir), width = 8.5, height = 6.5, plot = p_all)
}
}
|
/scripts/paper_figures/fig2.R
|
no_license
|
tanaylab/tet-gastrulation
|
R
| false
| false
| 27,669
|
r
|
generate_figure2_plots <- function() {
fig_dir <- "figs/paper_figs/fig2"
if (!dir.exists(fig_dir)) {
dir.create(fig_dir)
}
# fig 2c
atlas_projection_tko_chimera()
# fig 2d
host_vs_ko_age(T)
# fig 2e
tko_barplot_ct_frequency(mat_nm = "tko_chim_wt10", ko_type = "KO", plot_pdf = T)
tko_barplot_ct_frequency(mat_nm = "tko_chim_wt10", ko_type = c("control", "host"), plot_pdf = T, tag = "control_host")
# fig 2f
plot_chimera_dotplots()
plot_tetraploid_dotplots()
}
atlas_projection_tko_chimera <- function(plot_pdf = F) {
mat_chim <- scdb_mat("tko_chim_wt10")
gset <- scdb_gset("tko_chim_wt10")
feat_genes <- names(gset@gene_set)
ko_cls <- colnames(mat_chim@mat)[mat_chim@cell_metadata[colnames(mat_chim@mat), "cell_type"] == "KO"]
host_cls <- colnames(mat_chim@mat)[mat_chim@cell_metadata[colnames(mat_chim@mat), "cell_type"] == "host"]
fig_dir <- "figs/paper_figs/fig2"
if (!dir.exists(fig_dir)) {
dir.create(fig_dir)
}
mat_query <- mat_chim@mat[, ko_cls]
fn <- "figs/paper_figs/fig2/atlas_projection_tko_chim_ko_cls_new_12.png"
w <- 1000
h <- 1000
if (plot_pdf) {
fn <- gsub(pattern = ".png", replacement = ".pdf", x = fn)
w <- 1000 / 72
h <- 1000 / 72
}
atlas_proj_on_wt10(mat_query = mat_query, feat_genes = feat_genes, fn = fn, cex_points = 1.2, w = w, h = h, plot_pdf = plot_pdf, plot_gray_background = F)
mat_query <- mat_chim@mat[, host_cls]
fn <- "figs/paper_figs/fig2/atlas_projection_tko_chim_host_control_cls_new_12.png"
atlas_proj_on_wt10(mat_query = mat_query, feat_genes = feat_genes, fn = fn, cex_points = 1.2, w = w, h = h, plot_pdf = plot_pdf, plot_gray_background = F)
}
host_vs_ko_age <- function(plot_pdf = F) {
n_cls_min <- 20
rank_to_time <- read.table(file = "data/wt10_transcriptional_rank_developmental_time.txt", stringsAsFactors = F, h = T, sep = "\t")
dev_time <- rank_to_time$developmental_time
fig_dir <- "figs/paper_figs/fig2"
if (!dir.exists(fig_dir)) {
dir.create(fig_dir)
}
chimera_age <- read.table("data/tko_chim_wt10/time_match/time_match_summary.txt", sep = "\t", stringsAsFactors = F, h = T)
f <- (chimera_age$control + chimera_age$host >= n_cls_min) & (chimera_age$KO >= n_cls_min)
time_min <- 6.9
time_max <- 8.2
if (plot_pdf) {
pdf(sprintf("%s/best_time_ko_vs_host_control.pdf", fig_dir), useDingbats = F)
} else {
png(sprintf("%s/best_time_ko_vs_host_control.png", fig_dir))
}
plot(rank_to_time$developmental_time[chimera_age$best_rank_ko[f]],
rank_to_time$developmental_time[chimera_age$best_rank_host_control[f]],
pch = 19,
xlim = c(time_min, time_max), ylim = c(time_min, time_max), main = "KO vs host/control",
xlab = "Time KO cells", ylab = "Time host/control cells", cex = 4, cex.lab = 1
)
abline(a = 0, b = 1, lty = "dashed")
dev.off()
}
tko_barplot_ct_frequency <- function(mat_nm, ko_type = "KO", plot_pdf = F, tag = "KO") {
n_cls_min <- 19
fig_dir <- "figs/paper_figs/fig2"
if (!dir.exists(fig_dir)) {
dir.create(fig_dir)
}
df_chim <- read.table(sprintf("data/%s/time_match/time_match_summary.txt", mat_nm), sep = "\t", stringsAsFactors = F, h = T)
rownames(df_chim) <- df_chim$embryo
mat <- scdb_mat(mat_nm)
mc_wt <- scdb_mc("sing_emb_wt10_recolored")
col_to_rank <- c(1:nrow(mc_wt@color_key))
names(col_to_rank) <- mc_wt@color_key$color
col_to_ct <- mc_wt@color_key$group
names(col_to_ct) <- mc_wt@color_key$color
excluded_colors <- c("#F6BFCB", "#7F6874")
included_colors <- setdiff(unique(mc_wt@color_key$color), excluded_colors)
chim_embryos <- df_chim$embryo[(df_chim$KO > n_cls_min) & (df_chim$control + df_chim$host > n_cls_min)]
chim_embryos <- chim_embryos[order(df_chim[chim_embryos, "best_rank_host_control"])]
tmp <- matrix(0, nrow = length(chim_embryos), ncol = length(included_colors))
rownames(tmp) <- chim_embryos
colnames(tmp) <- included_colors
load(file = sprintf("data/%s/color_annotation/cmp_annot.Rda", mat_nm))
query_cls_col <- cmp_annot$query_cls_col
query_cls <- names(query_cls_col)[!(query_cls_col %in% excluded_colors)]
query_cls <- query_cls[mat@cell_metadata[query_cls, "embryo"] %in% chim_embryos]
modified_cell_type_levels <- c(
mc_wt@color_key$group[1:7], c("space1"),
mc_wt@color_key$group[c(9, 10, 11)], c("space2"),
mc_wt@color_key$group[c(8, 12, 13, 14, 15, 16)], c("space3"),
mc_wt@color_key$group[c(17, 18, 19)], c("space4"),
mc_wt@color_key$group[c(20:23)], c("space5"),
mc_wt@color_key$group[c(24:27)]
)
modified_colors <- c(
mc_wt@color_key$color[1:7], c("white"),
mc_wt@color_key$color[c(9, 10, 11)], c("white"),
mc_wt@color_key$color[c(8, 12, 13, 14, 15, 16)], c("white"),
mc_wt@color_key$color[c(17, 18, 19)], c("white"),
mc_wt@color_key$color[c(20:23)], c("white"),
mc_wt@color_key$color[c(24:27)]
)
filtered_cls <- query_cls[mat@cell_metadata[query_cls, "cell_type"] %in% ko_type]
filtered_vs_ct <- table(factor(x = mat@cell_metadata[filtered_cls, "embryo"], levels = chim_embryos), factor(x = col_to_ct[query_cls_col[filtered_cls]], levels = modified_cell_type_levels))
filtered_vs_ct_n <- filtered_vs_ct / rowSums(filtered_vs_ct)
filtered_vs_ct_n[is.na(filtered_vs_ct_n)] <- 0
filtered_vs_ct_n[1:2, c("space1", "space5")] <- 0.04
filtered_vs_ct_n[3:nrow(filtered_vs_ct_n), c("space1", "space3", "space4", "space5")] <- 0.02
filtered_vs_ct_n <- filtered_vs_ct_n / rowSums(filtered_vs_ct_n)
if (plot_pdf) {
pdf(sprintf("%s/barplot_ct_freq_%s.pdf", fig_dir, tag), w = 12, h = 7.5, useDingbats = F)
barplot(t(filtered_vs_ct_n), col = modified_colors, las = 2, axes = F, axisnames = F, border = NA)
dev.off()
} else {
png(sprintf("%s/barplot_ct_freq_%s.png", fig_dir, tag), w = 1250, h = 750)
barplot(t(filtered_vs_ct_n), col = modified_colors, las = 2, axes = F, axisnames = F, border = NA)
dev.off()
}
}
plot_chimera_dotplots <- function(plot_pdf = T, included_transcriptional_ranks = NULL, highlighted_colors = NULL, minimal_number_of_cells_for_p_value = 100) {
ko_color <- "indianred3"
host_color <- "gray30"
mat_nm <- "tko_chim_wt10"
mc_wt <- scdb_mc("sing_emb_wt10_recolored")
if (is.null(included_transcriptional_ranks)) {
included_transcriptional_ranks <- c(125:153)
}
if (is.null(highlighted_colors)) {
highlighted_colors <- mc_wt@color_key$color[c(2, 3, 5, 6, 8, 12, 13, 14, 15, 17, 18, 19, 20, 22, 24, 27)]
}
chim_freq <- chimera_dotplot_frequencies(mat_nm = mat_nm, minimal_number_of_cells = 20, included_transcriptional_ranks = included_transcriptional_ranks)
mc_wt <- scdb_mc("sing_emb_wt10_recolored")
col_to_ct <- mc_wt@color_key$group
col_to_ct[22] <- "Blood"
names(col_to_ct) <- mc_wt@color_key$color
ko_color <- "indianred3"
host_color <- "gray30"
mat_nm <- "tko_chim_wt10"
fig_dir <- "figs/paper_figs/fig2"
if (!dir.exists(fig_dir)) {
dir.create(fig_dir)
}
fig_dir <- "figs/paper_figs/fig2/cell_type_dot_plots"
if (!dir.exists(fig_dir)) {
dir.create(fig_dir)
}
tko_freq_n <- chim_freq$tko
host_freq_n <- chim_freq$host
wt_freq_n <- chim_freq$wt
chim_freq_for_p_value_calculation <- chimera_dotplot_frequencies(mat_nm = mat_nm, minimal_number_of_cells = minimal_number_of_cells_for_p_value, downsample_number_of_cells = minimal_number_of_cells_for_p_value)
tko_freq_ds <- chim_freq_for_p_value_calculation$tko
host_freq_ds <- chim_freq_for_p_value_calculation$host
wt_freq_ds <- chim_freq_for_p_value_calculation$wt
tko_to_wt_p_values <- sapply(highlighted_colors, function(ct_col) {
p_val <- wilcox.test(x = tko_freq_ds[, ct_col], y = wt_freq_ds[, ct_col])
return(p_val$p.value)
})
tko_to_host_p_values <- sapply(highlighted_colors, function(ct_col) {
p_val <- wilcox.test(x = tko_freq_ds[, ct_col], y = host_freq_ds[, ct_col])
return(p_val$p.value)
})
host_to_wt_p_values <- sapply(highlighted_colors, function(ct_col) {
p_val <- wilcox.test(x = host_freq_ds[, ct_col], y = wt_freq_ds[, ct_col])
return(p_val$p.value)
})
if (length(highlighted_colors) > 1) {
q_val_tko <- qvalue(p = tko_to_wt_p_values, pi0 = 1)
q_val_host <- qvalue(p = host_to_wt_p_values, pi0 = 1)
q_val_tko_to_host <- qvalue(p = tko_to_host_p_values, pi0 = 1)
} else {
q_val_tko <- list(qvalues = tko_to_wt_p_values)
q_val_host <- list(qvalues = host_to_wt_p_values)
q_val_tko_to_host <- list(qvalues = tko_to_host_p_values)
}
q_to_signif <- function(v) {
v_signif <- sapply(v, function(x) {
if (x >= 0.05) {
a <- "ns"
} else {
a <- "*"
}
return(a)
})
return(v_signif)
}
genotype_color <- c("TKO" = ko_color, "Host/Control" = host_color, "WT" = "gray70")
stat_comparison <- data.frame(
group1 = c(rep("TKO", length(highlighted_colors)), rep("Host/Control", length(highlighted_colors)), rep("TKO", length(highlighted_colors))),
group2 = c(rep("Host/Control", length(highlighted_colors)), rep("WT", length(highlighted_colors)), rep("WT", length(highlighted_colors))),
cell_type = c(col_to_ct[names(q_val_tko_to_host$qvalues)], col_to_ct[names(q_val_host$qvalues)], col_to_ct[names(q_val_tko$qvalues)]),
cell_type_color = c(names(q_val_tko_to_host$qvalues), names(q_val_host$qvalues), names(q_val_tko$qvalues)),
q.val = c(q_val_tko_to_host$qvalues, q_val_host$qvalues, q_val_tko$qvalues),
q.signif = c(q_to_signif(q_val_tko_to_host$qvalues), q_to_signif(q_val_host$qvalues), q_to_signif(q_val_tko$qvalues)), stringsAsFactors = F
)
plot_list <- list()
col_to_ct[20] <- "Haematoendothelial prog."
col_to_ct[14] <- "Later. & interm. mesoderm"
for (ct_col in highlighted_colors) {
main_tag <- gsub("/", "_", col_to_ct[ct_col])
df_plot_points <- data.frame(
genotype = factor(x = c(rep("TKO", nrow(tko_freq_n)), rep("Host/Control", nrow(host_freq_n)), rep("WT", nrow(wt_freq_n))), levels = c("TKO", "Host/Control", "WT")),
freq = c(tko_freq_n[, ct_col], host_freq_n[, ct_col], wt_freq_n[, ct_col])
)
my_comparisons <- list(c("TKO", "Host/Control"), c("TKO", "WT"), c("Host/Control", "WT"))
stat.test <- compare_means(data = df_plot_points, formula = freq ~ genotype)
stat_f <- stat_comparison[stat_comparison$cell_type_color == ct_col, ]
p <- ggplot(data = df_plot_points, aes(x = genotype, y = freq)) +
geom_dotplot(aes(fill = genotype), dotsize = 1.3, binaxis = "y", stackdir = "center", show.legend = F) +
stat_pvalue_manual(stat_f, y.position = max(df_plot_points$freq) * 1.1, step.increase = 0.1, label = "q.signif") +
scale_fill_manual(values = genotype_color) +
ggtitle(label = main_tag) +
theme(plot.title = element_text(hjust = 0.5, size = 10)) +
ylab("") +
ylim(0, max(df_plot_points$freq) * 1.4) +
xlab("")
# theme(axis.text.x = element_text(size=14))
# stat_compare_means(label = "p.signif",comparisons = my_comparisons) +
plot_list[[ct_col]] <- p
if (plot_pdf) {
ggsave(filename = sprintf("%s/2N_%s.pdf", fig_dir, main_tag), width = 3, height = 2.3, plot = p)
} else {
ggsave(filename = sprintf("%s/2N_%s.png", fig_dir, main_tag), width = 3, height = 2.3, plot = p)
}
}
p_all <- grid.arrange(grobs = plot_list, ncol = 4, nrow = 4)
if (plot_pdf) {
ggsave(filename = sprintf("%s/2N_all_cell_types.pdf", fig_dir), width = 8.5, height = 6.5, plot = p_all)
} else {
ggsave(filename = sprintf("%s/2N_all_cell_types.png", fig_dir), width = 8.5, height = 6.5, plot = p_all)
}
}
aggregate_blood_subtypes_into_one_type <- function(color_vector) {
# I replace Blood progenitors color and Erythroid 2 color by Erythroid 1 color #C72228
color_vector_names <- names(color_vector)
color_vector <- as.character(color_vector)
color_vector[color_vector %in% c("#c9a997", "#EF4E22")] <- "#C72228"
names(color_vector) <- color_vector_names
return(color_vector)
}
downsample_cells_indexed_by_metadata <- function(cells, cells_metadata, n_downsample, seed = NULL) {
n_cells_per_metadata <- table(cells_metadata)
included_metadata_levels <- names(n_cells_per_metadata)[n_cells_per_metadata >= n_downsample]
f <- cells_metadata %in% included_metadata_levels
cells <- cells[f]
cells_metadata <- cells_metadata[f]
cells_ds <- tapply(cells, cells_metadata, function(v) {
if (!is.null(seed)) {
set.seed(seed)
}
v_ds <- sample(v, size = n_downsample)
return(v_ds)
})
cells_ds <- unlist(cells_ds)
return(cells_ds)
}
chimera_dotplot_frequencies <- function(mat_nm, minimal_number_of_cells = 20, downsample_number_of_cells = NULL, included_transcriptional_ranks = c(125:153)) {
mat_chim <- scdb_mat(mat_nm)
# time window Et7.75 - Et8.1
df_chim <- read.table(sprintf("data/%s/time_match/time_match_summary.txt", mat_nm), sep = "\t", h = T, stringsAsFactors = F)
rownames(df_chim) <- df_chim$embryo
if (mat_nm == "tko_chim_wt10") {
f <- (df_chim$control + df_chim$host >= minimal_number_of_cells) & (df_chim$KO >= minimal_number_of_cells)
} else {
f <- (df_chim$host >= minimal_number_of_cells) & (df_chim[, 1] >= minimal_number_of_cells)
}
df_chim <- df_chim[f, ]
if (mat_nm == "tko_chim_wt10") {
f <- df_chim[, "best_rank_host_control"] %in% included_transcriptional_ranks
} else {
f <- df_chim[, "best_rank_host"] %in% included_transcriptional_ranks
}
df_chim <- df_chim[f, ]
included_chimeras <- df_chim$embryo
mc_wt <- scdb_mc("sing_emb_wt10_recolored")
# visceral and extraembryonic endoderm are excluded
included_colors <- mc_wt@color_key$color[1:27]
load(file = sprintf("data/%s/color_annotation/cmp_annot.Rda", mat_nm))
# Modify blood subtypes color to one color
query_cells_color <- aggregate_blood_subtypes_into_one_type(cmp_annot$query_cls_col)
wt_cells_color <- mc_wt@colors[mc_wt@mc]
names(wt_cells_color) <- names(mc_wt@mc)
wt_cells_color <- aggregate_blood_subtypes_into_one_type(wt_cells_color)
# remove Blood progenitors and Erythroid 2 from color levels
included_colors <- included_colors[-c(21, 23)]
query_cells_color <- query_cells_color[query_cells_color %in% included_colors]
wt_cells_color <- wt_cells_color[wt_cells_color %in% included_colors]
ko_cells <- names(query_cells_color)[mat_chim@cell_metadata[names(query_cells_color), "cell_type"] %in% c("KO", "DKO12", "DKO13", "DKO23")]
host_cells <- names(query_cells_color)[mat_chim@cell_metadata[names(query_cells_color), "cell_type"] %in% c("control", "host")]
wt_cells <- names(wt_cells_color)
# downsample cells to common number per embryo
if (!is.null(downsample_number_of_cells)) {
if (minimal_number_of_cells < downsample_number_of_cells) {
stop("minimal_number_of_cells smaller than downsample_number_of_cells")
}
ko_cells <- downsample_cells_indexed_by_metadata(cells = ko_cells, cells_metadata = mat_chim@cell_metadata[ko_cells, "embryo"], n_downsample = downsample_number_of_cells, seed = 123)
host_cells <- downsample_cells_indexed_by_metadata(cells = host_cells, cells_metadata = mat_chim@cell_metadata[host_cells, "embryo"], n_downsample = downsample_number_of_cells, seed = 123)
wt_cells <- downsample_cells_indexed_by_metadata(cells = wt_cells, cells_metadata = mat_chim@cell_metadata[wt_cells, "transcriptional_rank"], n_downsample = downsample_number_of_cells, seed = 123)
}
# compute two way tables
ko_emb_vs_ct <- compute_two_way_table(
values_row = mat_chim@cell_metadata[ko_cells, "embryo"],
values_col = query_cells_color[ko_cells],
included_levels_row = included_chimeras,
included_levels_col = included_colors, normalize_rows = T
)
host_emb_vs_ct <- compute_two_way_table(
values_row = mat_chim@cell_metadata[host_cells, "embryo"],
values_col = query_cells_color[host_cells],
included_levels_row = included_chimeras,
included_levels_col = included_colors, normalize_rows = T
)
wt10_emb_vs_ct <- compute_two_way_table(
values_row = mat_chim@cell_metadata[wt_cells, "transcriptional_rank"],
values_col = wt_cells_color[wt_cells],
included_levels_row = included_transcriptional_ranks,
included_levels_col = included_colors, normalize_rows = F
)
f_wt <- rowSums(wt10_emb_vs_ct) > 0
wt10_emb_vs_ct <- wt10_emb_vs_ct[f_wt, ]
wt10_emb_vs_ct <- wt10_emb_vs_ct / rowSums(wt10_emb_vs_ct)
return(list(wt = wt10_emb_vs_ct, tko = ko_emb_vs_ct, host = host_emb_vs_ct))
}
tetraploid_dotplot_frequencies <- function(mat_nm, minimal_number_of_cells = 20, downsample_number_of_cells = NULL, included_transcriptional_ranks = c(125:153)) {
mat <- scdb_mat(mat_nm)
# time window Et7.75 - Et8.1
df_tetra <- read.table(sprintf("data/%s/time_match/time_match_summary.txt", mat_nm), sep = "\t", h = T, stringsAsFactors = F)
rownames(df_tetra) <- df_tetra$embryo
f <- (df_tetra[, 1] >= minimal_number_of_cells)
df_tetra <- df_tetra[f, ]
f <- df_tetra[, "best_query"] %in% included_transcriptional_ranks
df_tetra <- df_tetra[f, ]
included_chimeras <- df_tetra$embryo[]
mc_wt <- scdb_mc("sing_emb_wt10_recolored")
# visceral and extraembryonic endoderm are excluded
included_colors <- mc_wt@color_key$color[1:27]
load(file = sprintf("data/%s/color_annotation/cmp_annot.Rda", mat_nm))
# Modify blood subtypes color to one color
query_cells_color <- aggregate_blood_subtypes_into_one_type(cmp_annot$query_cls_col)
wt_cells_color <- mc_wt@colors[mc_wt@mc]
names(wt_cells_color) <- names(mc_wt@mc)
wt_cells_color <- aggregate_blood_subtypes_into_one_type(wt_cells_color)
# remove Blood progenitors and Erythroid 2 from color levels
included_colors <- included_colors[-c(21, 23)]
query_cells_color <- query_cells_color[query_cells_color %in% included_colors]
wt_cells_color <- wt_cells_color[wt_cells_color %in% included_colors]
query_cells <- names(query_cells_color)[mat@cell_metadata[names(query_cells_color), "cell_type"] %in% c("KO", "control")]
wt_cells <- names(wt_cells_color)
# downsample cells to common number per embryo
if (!is.null(downsample_number_of_cells)) {
if (minimal_number_of_cells < downsample_number_of_cells) {
stop("minimal_number_of_cells smaller than downsample_number_of_cells")
}
query_cells <- downsample_cells_indexed_by_metadata(cells = query_cells, cells_metadata = mat@cell_metadata[query_cells, "embryo"], n_downsample = minimal_number_of_cells, seed = 123)
wt_cells <- downsample_cells_indexed_by_metadata(cells = wt_cells, cells_metadata = mat@cell_metadata[wt_cells, "transcriptional_rank"], n_downsample = minimal_number_of_cells, seed = 123)
}
# compute two way tables
query_emb_vs_ct <- compute_two_way_table(
values_row = mat@cell_metadata[query_cells, "embryo"],
values_col = query_cells_color[query_cells],
included_levels_row = included_chimeras,
included_levels_col = included_colors, normalize_rows = T
)
wt10_emb_vs_ct <- compute_two_way_table(
values_row = mat@cell_metadata[wt_cells, "transcriptional_rank"],
values_col = wt_cells_color[wt_cells],
included_levels_row = included_transcriptional_ranks,
included_levels_col = included_colors, normalize_rows = F
)
f_wt <- rowSums(wt10_emb_vs_ct) > 0
wt10_emb_vs_ct <- wt10_emb_vs_ct[f_wt, ]
wt10_emb_vs_ct <- wt10_emb_vs_ct / rowSums(wt10_emb_vs_ct)
return(list(wt = wt10_emb_vs_ct, query = query_emb_vs_ct))
}
compute_two_way_table <- function(values_row, values_col, included_levels_row = NULL, included_levels_col = NULL, normalize_rows = F) {
if (length(values_row) != length(values_col)) {
stop("values_row and values_col don't have the same length")
}
if (!is.null(included_levels_row)) {
f <- values_row %in% included_levels_row
values_row <- values_row[f]
values_row <- factor(x = values_row, levels = included_levels_row)
values_col <- values_col[f]
}
if (!is.null(included_levels_col)) {
f <- values_col %in% included_levels_col
values_row <- values_row[f]
values_col <- values_col[f]
values_col <- factor(x = values_col, levels = included_levels_col)
}
row_vs_col_freq <- table(values_row, values_col)
if (normalize_rows) {
row_vs_col_freq <- row_vs_col_freq / rowSums(row_vs_col_freq)
}
return(row_vs_col_freq)
}
plot_tetraploid_dotplots <- function(plot_pdf = T, included_transcriptional_ranks = NULL, highlighted_colors = NULL) {
minimal_number_of_cells <- 250
ko_color <- "indianred3"
host_color <- "gray30"
mc_wt <- scdb_mc("sing_emb_wt10_recolored")
if (is.null(included_transcriptional_ranks)) {
included_transcriptional_ranks <- c(125:153)
}
if (is.null(highlighted_colors)) {
highlighted_colors <- mc_wt@color_key$color[c(2, 3, 5, 6, 8, 12, 13, 14, 15, 17, 18, 19, 20, 22, 24, 27)]
}
tko_tetra_freq <- tetraploid_dotplot_frequencies(mat_nm = "tko_tetra_wt10", minimal_number_of_cells = 20, included_transcriptional_ranks = included_transcriptional_ranks)
control_tetra_freq <- tetraploid_dotplot_frequencies(mat_nm = "control_tetra_all_wt10", minimal_number_of_cells = 20)
mc_wt <- scdb_mc("sing_emb_wt10_recolored")
col_to_ct <- mc_wt@color_key$group
col_to_ct[22] <- "Blood"
names(col_to_ct) <- mc_wt@color_key$color
ko_color <- "indianred3"
host_color <- "gray30"
fig_dir <- "figs/paper_figs/fig2"
if (!dir.exists(fig_dir)) {
dir.create(fig_dir)
}
fig_dir <- "figs/paper_figs/fig2/cell_type_dot_plots_4N"
if (!dir.exists(fig_dir)) {
dir.create(fig_dir)
}
tko_freq_n <- tko_tetra_freq$query
ctrl_freq_n <- control_tetra_freq$query
wt_freq_n <- tko_tetra_freq$wt
tko_tetra_freq_ds <- tetraploid_dotplot_frequencies(mat_nm = "tko_tetra_wt10", minimal_number_of_cells = minimal_number_of_cells, downsample_number_of_cells = minimal_number_of_cells)
control_tetra_freq_ds <- tetraploid_dotplot_frequencies(mat_nm = "control_tetra_all_wt10", minimal_number_of_cells = minimal_number_of_cells, downsample_number_of_cells = minimal_number_of_cells)
tko_freq_ds <- tko_tetra_freq_ds$query
ctrl_freq_ds <- control_tetra_freq_ds$query
wt_freq_ds <- tko_tetra_freq_ds$wt
tko_to_wt_p_values <- sapply(highlighted_colors, function(ct_col) {
p_val <- wilcox.test(x = tko_freq_ds[, ct_col], y = wt_freq_ds[, ct_col])
return(p_val$p.value)
})
tko_to_ctrl_p_values <- sapply(highlighted_colors, function(ct_col) {
p_val <- wilcox.test(x = tko_freq_ds[, ct_col], y = ctrl_freq_ds[, ct_col])
return(p_val$p.value)
})
ctrl_to_wt_p_values <- sapply(highlighted_colors, function(ct_col) {
p_val <- wilcox.test(x = ctrl_freq_ds[, ct_col], y = wt_freq_ds[, ct_col])
return(p_val$p.value)
})
if (length(highlighted_colors) > 1) {
q_val_tko <- qvalue(p = tko_to_wt_p_values, pi0 = 1)
q_val_ctrl <- qvalue(p = ctrl_to_wt_p_values, pi0 = 1)
q_val_tko_to_ctrl <- qvalue(p = tko_to_ctrl_p_values, pi0 = 1)
} else {
q_val_tko <- list(qvalues = tko_to_wt_p_values)
q_val_ctrl <- list(qvalues = ctrl_to_wt_p_values)
q_val_tko_to_ctrl <- list(qvalues = tko_to_ctrl_p_values)
}
q_to_signif <- function(v) {
v_signif <- sapply(v, function(x) {
if (x >= 0.05) {
a <- "ns"
} else {
a <- "*"
}
return(a)
})
return(v_signif)
}
genotype_color <- c("TKO" = ko_color, "Ctrl" = host_color, "WT" = "gray70")
stat_comparison <- data.frame(
group1 = c(rep("TKO", length(highlighted_colors)), rep("Ctrl", length(highlighted_colors)), rep("TKO", length(highlighted_colors))),
group2 = c(rep("Ctrl", length(highlighted_colors)), rep("WT", length(highlighted_colors)), rep("WT", length(highlighted_colors))),
cell_type = c(col_to_ct[names(q_val_tko_to_ctrl$qvalues)], col_to_ct[names(q_val_ctrl$qvalues)], col_to_ct[names(q_val_tko$qvalues)]),
cell_type_color = c(names(q_val_tko_to_ctrl$qvalues), names(q_val_ctrl$qvalues), names(q_val_tko$qvalues)),
q.val = c(q_val_tko_to_ctrl$qvalues, q_val_ctrl$qvalues, q_val_tko$qvalues),
q.signif = c(q_to_signif(q_val_tko_to_ctrl$qvalues), q_to_signif(q_val_ctrl$qvalues), q_to_signif(q_val_tko$qvalues)), stringsAsFactors = F
)
plot_list <- list()
col_to_ct[20] <- "Haematoendothelial prog."
col_to_ct[14] <- "Later. & interm. mesoderm"
for (ct_col in highlighted_colors) {
main_tag <- gsub("/", "_", col_to_ct[ct_col])
df_plot_points <- data.frame(
genotype = factor(x = c(rep("TKO", nrow(tko_freq_n)), rep("Ctrl", nrow(ctrl_freq_n)), rep("WT", nrow(wt_freq_n))), levels = c("TKO", "Ctrl", "WT")),
freq = c(tko_freq_n[, ct_col], ctrl_freq_n[, ct_col], wt_freq_n[, ct_col])
)
my_comparisons <- list(c("TKO", "Ctrl"), c("TKO", "WT"), c("Ctrl", "WT"))
stat.test <- compare_means(data = df_plot_points, formula = freq ~ genotype)
stat_f <- stat_comparison[stat_comparison$cell_type_color == ct_col, ]
p <- ggplot(data = df_plot_points, aes(x = genotype, y = freq)) +
geom_dotplot(aes(fill = genotype), dotsize = 1.3, binaxis = "y", stackdir = "center", show.legend = F) +
stat_pvalue_manual(stat_f, y.position = max(df_plot_points$freq) * 1.1, step.increase = 0.1, label = "q.signif") +
scale_fill_manual(values = genotype_color) +
ggtitle(label = main_tag) +
theme(plot.title = element_text(hjust = 0.5, size = 10)) +
ylab("") +
ylim(0, max(df_plot_points$freq) * 1.4) +
xlab("")
# theme(axis.text.x = element_text(size=14))
# stat_compare_means(label = "p.signif",comparisons = my_comparisons) +
plot_list[[ct_col]] <- p
if (plot_pdf) {
ggsave(filename = sprintf("%s/4N_%s.pdf", fig_dir, main_tag), width = 3, height = 2.3, plot = p)
} else {
ggsave(filename = sprintf("%s/4N_%s.png", fig_dir, main_tag), width = 3, height = 2.3, plot = p)
}
}
p_all <- grid.arrange(grobs = plot_list, ncol = 4, nrow = 4)
if (plot_pdf) {
ggsave(filename = sprintf("%s/4N_all_cell_types.pdf", fig_dir), width = 8.5, height = 6.5, plot = p_all)
} else {
ggsave(filename = sprintf("%s/4N_all_cell_types.png", fig_dir), width = 8.5, height = 6.5, plot = p_all)
}
}
|
#' @title Provides the t-test and chi square's p value and statistic for binary targets and provides it in a dataframe for a set of columns or for a whole dataframe.In case of ANOVA provides all possible tests's summary
#' @description 1.Provides the t-test and chi square's p value and statistic and provides it in a dataframe for a set of columns or for a whole dataframe.
#' 2.In case of ANOVA() provides p values in form of a dataframe
#' Assumption: No indidivual columns are provided for function Hyp_test().In such case a normal one on one t-test or chi would be better.
#' @param For ANOVA()----->data
#' @return NULL
#' @examples ANOVA_test(df)
#' @export ANOVA_testing
#' @import xlsx
ANOVA_testing<-function(data,filename=NULL){
target1=c() #segregating anova targets
for (i in 1:ncol(data))
{
if((length(unique(data[,i]))>2) & (length(unique(data[,i]))<15))
{
target1[i]<-i
}
if(length(unique(data[,i]))>15 & length(unique(data[,i]))<20 | length(unique(data[,i]))==2){
print("---------------------------------------------------------------------")
print(paste("ANOVA not possible for ",names(data)[i],"as a target since it is a categorical variable with insufficient levels for anova"))
}
}
target1=target1[!is.na(target1)]
target1=as.data.frame(data[,target1])
numeric=c() #segregating Numerical variable for ANOVA
for (i in 1:ncol(data)){
if (length(unique(data[,i]))>20){
numeric[i]<-i
}
if(length(unique(data[,i]))<20){
print("---------------------------------------------------------------------")
print(paste(names(data)[i]," is not a numerical variable for testing ANOVA"))
}
}
numeric=numeric[!is.na(numeric)]
Numerical=as.data.frame(data[,numeric])
names_11<-c()
names_22<-c()
ANOVA_1<-c()
for (i in 1:ncol(target1))
{
for (j in 1:ncol(Numerical)){
names_11[j]<-names(target1)[i]
names_22[j]<-names(Numerical)[j]
aov_1=aov(Numerical[,j]~target1[,i])
ANOVA_1[j]<-unlist(summary(aov_1))['Pr(>F)1']
}
#print(ANOVA_1)
a=data.frame(Dep_Variable=c(names_11),Ind_Variable=c(names_22),P_value=c(ANOVA_1))
if(is.null(filename)){
write.xlsx(a,file="ANOVA.xlsx",sheetName = names(target1)[i],append=TRUE)
}
else{
write.xlsx(a,file=paste(filename,".xlsx"),sheetName = names(target1)[i],append=TRUE)
}
}
}
|
/R/ANOVA_testing.R
|
no_license
|
Manistrikes23493/PackageHyptest
|
R
| false
| false
| 2,514
|
r
|
#' @title Provides the t-test and chi square's p value and statistic for binary targets and provides it in a dataframe for a set of columns or for a whole dataframe.In case of ANOVA provides all possible tests's summary
#' @description 1.Provides the t-test and chi square's p value and statistic and provides it in a dataframe for a set of columns or for a whole dataframe.
#' 2.In case of ANOVA() provides p values in form of a dataframe
#' Assumption: No indidivual columns are provided for function Hyp_test().In such case a normal one on one t-test or chi would be better.
#' @param For ANOVA()----->data
#' @return NULL
#' @examples ANOVA_test(df)
#' @export ANOVA_testing
#' @import xlsx
ANOVA_testing<-function(data,filename=NULL){
target1=c() #segregating anova targets
for (i in 1:ncol(data))
{
if((length(unique(data[,i]))>2) & (length(unique(data[,i]))<15))
{
target1[i]<-i
}
if(length(unique(data[,i]))>15 & length(unique(data[,i]))<20 | length(unique(data[,i]))==2){
print("---------------------------------------------------------------------")
print(paste("ANOVA not possible for ",names(data)[i],"as a target since it is a categorical variable with insufficient levels for anova"))
}
}
target1=target1[!is.na(target1)]
target1=as.data.frame(data[,target1])
numeric=c() #segregating Numerical variable for ANOVA
for (i in 1:ncol(data)){
if (length(unique(data[,i]))>20){
numeric[i]<-i
}
if(length(unique(data[,i]))<20){
print("---------------------------------------------------------------------")
print(paste(names(data)[i]," is not a numerical variable for testing ANOVA"))
}
}
numeric=numeric[!is.na(numeric)]
Numerical=as.data.frame(data[,numeric])
names_11<-c()
names_22<-c()
ANOVA_1<-c()
for (i in 1:ncol(target1))
{
for (j in 1:ncol(Numerical)){
names_11[j]<-names(target1)[i]
names_22[j]<-names(Numerical)[j]
aov_1=aov(Numerical[,j]~target1[,i])
ANOVA_1[j]<-unlist(summary(aov_1))['Pr(>F)1']
}
#print(ANOVA_1)
a=data.frame(Dep_Variable=c(names_11),Ind_Variable=c(names_22),P_value=c(ANOVA_1))
if(is.null(filename)){
write.xlsx(a,file="ANOVA.xlsx",sheetName = names(target1)[i],append=TRUE)
}
else{
write.xlsx(a,file=paste(filename,".xlsx"),sheetName = names(target1)[i],append=TRUE)
}
}
}
|
context("canvasXpress Web Charts - Area")
ifelse(interactive(), source("tests/cX-function.R"), source("../cX-function.R"))
test_that("cXarea1", {
result <- cXarea1()
if (interactive()) { print(result) }
expect_s3_class(result, "canvasXpress")
expect_s3_class(result, "htmlwidget")
})
test_that("cXarea2", {
result <- cXarea2()
if (interactive()) { print(result) }
expect_s3_class(result, "canvasXpress")
expect_s3_class(result, "htmlwidget")
})
test_that("cXarea3", {
result <- cXarea3()
if (interactive()) { print(result) }
expect_s3_class(result, "canvasXpress")
expect_s3_class(result, "htmlwidget")
})
test_that("cXarea4", {
result <- cXarea4()
if (interactive()) { print(result) }
expect_s3_class(result, "canvasXpress")
expect_s3_class(result, "htmlwidget")
})
test_that("cXarea5", {
result <- cXarea5()
if (interactive()) { print(result) }
expect_s3_class(result, "canvasXpress")
expect_s3_class(result, "htmlwidget")
})
test_that("cXarea6", {
result <- cXarea6()
if (interactive()) { print(result) }
expect_s3_class(result, "canvasXpress")
expect_s3_class(result, "htmlwidget")
})
|
/tests/testthat/test-area.R
|
no_license
|
ginberg/canvasXpress
|
R
| false
| false
| 1,223
|
r
|
context("canvasXpress Web Charts - Area")
ifelse(interactive(), source("tests/cX-function.R"), source("../cX-function.R"))
test_that("cXarea1", {
result <- cXarea1()
if (interactive()) { print(result) }
expect_s3_class(result, "canvasXpress")
expect_s3_class(result, "htmlwidget")
})
test_that("cXarea2", {
result <- cXarea2()
if (interactive()) { print(result) }
expect_s3_class(result, "canvasXpress")
expect_s3_class(result, "htmlwidget")
})
test_that("cXarea3", {
result <- cXarea3()
if (interactive()) { print(result) }
expect_s3_class(result, "canvasXpress")
expect_s3_class(result, "htmlwidget")
})
test_that("cXarea4", {
result <- cXarea4()
if (interactive()) { print(result) }
expect_s3_class(result, "canvasXpress")
expect_s3_class(result, "htmlwidget")
})
test_that("cXarea5", {
result <- cXarea5()
if (interactive()) { print(result) }
expect_s3_class(result, "canvasXpress")
expect_s3_class(result, "htmlwidget")
})
test_that("cXarea6", {
result <- cXarea6()
if (interactive()) { print(result) }
expect_s3_class(result, "canvasXpress")
expect_s3_class(result, "htmlwidget")
})
|
# ------------------------------------------------
# The Separation of Between-Person and Within-Person Components
# of Individual Change Over Time: A Latent Curve Model
# With Structured Residuals
#
# Curran, Howard, Bainter, Lane, McGinley
#
# Journal of Consulting and Clinical Psychology 2014
# doi.org/10.1037/a0035297
# ------------------------------------------------
# read in the data
lcmsr.sim <- read.table("data/currandemo.dat", col.names = c("id", "gen", "trt", paste0("alc", 1:5), paste0("dep", 1:5)))
# load necessary libraries
# install.packages("lavaan") # install lavaan if you haven't.
library(lavaan)
# -------------------------------
# univariate unconditional model for alcohol
# -------------------------------
# model 1
alc.mod1 <- '
# ALCOHOL #
# random intercept
alc =~ 1*alc1 + 1*alc2 + 1*alc3 + 1*alc4 + 1*alc5
alc ~ 1
alc ~~ alc
# create structured residuals
alc1 ~~ 0*alc1
alc2 ~~ 0*alc2
alc3 ~~ 0*alc3
alc4 ~~ 0*alc4
alc5 ~~ 0*alc5
salc1 =~ 1*alc1
salc2 =~ 1*alc2
salc3 =~ 1*alc3
salc4 =~ 1*alc4
salc5 =~ 1*alc5
salc1 ~ 0
salc2 ~ 0
salc3 ~ 0
salc4 ~ 0
salc5 ~ 0
salc1 ~~ salc1
salc2 ~~ salc2
salc3 ~~ salc3
salc4 ~~ salc4
salc5 ~~ salc5
'
alc.fit1 <- lavaan(alc.mod1, lcmsr.sim)
# summary(alc.fit1, fit.measures = T)
# model 2
alc.mod2 <- '
# ALCOHOL #
# random intercept
alc.i =~ 1*alc1 + 1*alc2 + 1*alc3 + 1*alc4 + 1*alc5
alc.i ~ 1
alc.i ~~ alc.i
# random slope
alc.s =~ 0*alc1 + 1*alc2 + 2*alc3 + 3*alc4 + 4*alc5
alc.s ~ 1
alc.s ~~ alc.s
alc.i ~~ alc.s
# create structured residuals
alc1 ~~ 0*alc1
alc2 ~~ 0*alc2
alc3 ~~ 0*alc3
alc4 ~~ 0*alc4
alc5 ~~ 0*alc5
salc1 =~ 1*alc1
salc2 =~ 1*alc2
salc3 =~ 1*alc3
salc4 =~ 1*alc4
salc5 =~ 1*alc5
salc1 ~ 0
salc2 ~ 0
salc3 ~ 0
salc4 ~ 0
salc5 ~ 0
salc1 ~~ salc1
salc2 ~~ salc2
salc3 ~~ salc3
salc4 ~~ salc4
salc5 ~~ salc5
'
alc.fit2 <- lavaan(alc.mod2, lcmsr.sim)
# summary(alc.fit2, fit.measures = T)
# print(anova(alc.fit1, alc.fit2))
alc.mod3 <- '
# ALCOHOL #
# random intercept
alc.i =~ 1*alc1 + 1*alc2 + 1*alc3 + 1*alc4 + 1*alc5
alc.i ~ 1
alc.i ~~ alc.i
# random slope
alc.s =~ 0*alc1 + 1*alc2 + 2*alc3 + 3*alc4 + 4*alc5
alc.s ~ 1
alc.s ~~ alc.s
alc.i ~~ alc.s
# create structured residuals
alc1 ~~ 0*alc1
alc2 ~~ 0*alc2
alc3 ~~ 0*alc3
alc4 ~~ 0*alc4
alc5 ~~ 0*alc5
salc1 =~ 1*alc1
salc2 =~ 1*alc2
salc3 =~ 1*alc3
salc4 =~ 1*alc4
salc5 =~ 1*alc5
salc1 ~ 0
salc2 ~ 0
salc3 ~ 0
salc4 ~ 0
salc5 ~ 0
salc1 ~~ salc1
salc2 ~~ salc2
salc3 ~~ salc3
salc4 ~~ salc4
salc5 ~~ salc5
# add auto-regressive paths
salc2 ~ pyy*salc1
salc3 ~ pyy*salc2
salc4 ~ pyy*salc3
salc5 ~ pyy*salc4
'
alc.fit3 <- lavaan(alc.mod3, lcmsr.sim)
# summary(alc.fit3, fit.measures = T)
# print(anova(alc.fit3, alc.fit2))
# -------------------------------
# univariate unconditional model for depression
# -------------------------------
# model 1
dep.mod1 <- '
# DEPRESSION #
# random intercept
dep =~ 1*dep1 + 1*dep2 + 1*dep3 + 1*dep4 + 1*dep5
dep ~ 1
dep ~~ dep
# create structured residuals
dep1 ~~ 0*dep1
dep2 ~~ 0*dep2
dep3 ~~ 0*dep3
dep4 ~~ 0*dep4
dep5 ~~ 0*dep5
sdep1 =~ 1*dep1
sdep2 =~ 1*dep2
sdep3 =~ 1*dep3
sdep4 =~ 1*dep4
sdep5 =~ 1*dep5
sdep1 ~ 0
sdep2 ~ 0
sdep3 ~ 0
sdep4 ~ 0
sdep5 ~ 0
sdep1 ~~ sdep1
sdep2 ~~ sdep2
sdep3 ~~ sdep3
sdep4 ~~ sdep4
sdep5 ~~ sdep5
'
dep.fit1 <- lavaan(dep.mod1, lcmsr.sim)
# summary(dep.fit1, fit.measures = T)
# model 2
dep.mod2 <- '
# DEPRESSION #
# random intercept
dep.i =~ 1*dep1 + 1*dep2 + 1*dep3 + 1*dep4 + 1*dep5
dep.i ~ 1
dep.i ~~ dep.i
# random slope
dep.s =~ 0*dep1 + 1*dep2 + 2*dep3 + 3*dep4 + 4*dep5
dep.s ~ 1
dep.s ~~ dep.s
dep.s ~~ dep.i
# create structured residuals
dep1 ~~ 0*dep1
dep2 ~~ 0*dep2
dep3 ~~ 0*dep3
dep4 ~~ 0*dep4
dep5 ~~ 0*dep5
sdep1 =~ 1*dep1
sdep2 =~ 1*dep2
sdep3 =~ 1*dep3
sdep4 =~ 1*dep4
sdep5 =~ 1*dep5
sdep1 ~ 0
sdep2 ~ 0
sdep3 ~ 0
sdep4 ~ 0
sdep5 ~ 0
sdep1 ~~ sdep1
sdep2 ~~ sdep2
sdep3 ~~ sdep3
sdep4 ~~ sdep4
sdep5 ~~ sdep5
'
dep.fit2 <- lavaan(dep.mod2, lcmsr.sim)
# summary(dep.fit2, fit.measures = T)
# print(anova(dep.fit1, dep.fit2))
# model 3
dep.mod3 <- '
# DEPRESSION #
# random intercept
dep =~ 1*dep1 + 1*dep2 + 1*dep3 + 1*dep4 + 1*dep5
dep ~ 1
dep ~~ dep
# create structured residuals
dep1 ~~ 0*dep1
dep2 ~~ 0*dep2
dep3 ~~ 0*dep3
dep4 ~~ 0*dep4
dep5 ~~ 0*dep5
sdep1 =~ 1*dep1
sdep2 =~ 1*dep2
sdep3 =~ 1*dep3
sdep4 =~ 1*dep4
sdep5 =~ 1*dep5
sdep1 ~ 0
sdep2 ~ 0
sdep3 ~ 0
sdep4 ~ 0
sdep5 ~ 0
sdep1 ~~ sdep1
sdep2 ~~ sdep2
sdep3 ~~ sdep3
sdep4 ~~ sdep4
sdep5 ~~ sdep5
# add auto-regressive paths
sdep2 ~ pzz*sdep1
sdep3 ~ pzz*sdep2
sdep4 ~ pzz*sdep3
sdep5 ~ pzz*sdep4
'
dep.fit3 <- lavaan(dep.mod3, lcmsr.sim)
# summary(dep.fit3, fit.measures = T)
print(anova(dep.fit1, dep.fit3))
# -------------------------------
# bivariate unconditional model for alcohol & depression
# -------------------------------
# model 1
ad.mod1 <- '
# ---------------------------
# latent factors
# ---------------------------
# ALCOHOL
# random intercept
alc.i =~ 1*alc1 + 1*alc2 + 1*alc3 + 1*alc4 + 1*alc5
alc.i ~ 1
alc.i ~~ alc.i
# random slope
alc.s =~ 0*alc1 + 1*alc2 + 2*alc3 + 3*alc4 + 4*alc5
alc.s ~ 1
alc.s ~~ alc.s
alc.i ~~ alc.s
# DEPRESSION
# random intercept
dep.i =~ 1*dep1 + 1*dep2 + 1*dep3 + 1*dep4 + 1*dep5
dep.i ~ 1
dep.i ~~ dep.i
dep.i ~~ alc.i
dep.i ~~ alc.s
# ---------------------------
# create structured residuals
# ---------------------------
# ALCOHOL
alc1 ~~ 0*alc1
alc2 ~~ 0*alc2
alc3 ~~ 0*alc3
alc4 ~~ 0*alc4
alc5 ~~ 0*alc5
salc1 =~ 1*alc1
salc2 =~ 1*alc2
salc3 =~ 1*alc3
salc4 =~ 1*alc4
salc5 =~ 1*alc5
salc1 ~ 0
salc2 ~ 0
salc3 ~ 0
salc4 ~ 0
salc5 ~ 0
salc1 ~~ salc1
salc2 ~~ salc2
salc3 ~~ salc3
salc4 ~~ salc4
salc5 ~~ salc5
# DEPRESSION
dep1 ~~ 0*dep1
dep2 ~~ 0*dep2
dep3 ~~ 0*dep3
dep4 ~~ 0*dep4
dep5 ~~ 0*dep5
sdep1 =~ 1*dep1
sdep2 =~ 1*dep2
sdep3 =~ 1*dep3
sdep4 =~ 1*dep4
sdep5 =~ 1*dep5
sdep1 ~ 0
sdep2 ~ 0
sdep3 ~ 0
sdep4 ~ 0
sdep5 ~ 0
sdep1 ~~ sdep1
sdep2 ~~ sdep2
sdep3 ~~ sdep3
sdep4 ~~ sdep4
sdep5 ~~ sdep5
salc1 ~~ sdep1
salc2 ~~ vzy*sdep2
salc3 ~~ vzy*sdep3
salc4 ~~ vzy*sdep4
salc5 ~~ vzy*sdep5
# ---------------------------
# residual regressions
# ---------------------------
# ALCOHOL
salc2 ~ pyy*salc1
salc3 ~ pyy*salc2
salc4 ~ pyy*salc3
salc5 ~ pyy*salc4
# DEPRESSION
sdep2 ~ pzz*sdep1
sdep3 ~ pzz*sdep2
sdep4 ~ pzz*sdep3
sdep5 ~ pzz*sdep4
'
ad.fit1 <- lavaan(ad.mod1, lcmsr.sim)
summary(ad.fit1, fit.measures = T)
# model 2
ad.mod2 <- '
# ---------------------------
# latent factors
# ---------------------------
# ALCOHOL
# random intercept
alc.i =~ 1*alc1 + 1*alc2 + 1*alc3 + 1*alc4 + 1*alc5
alc.i ~ 1
alc.i ~~ alc.i
# random slope
alc.s =~ 0*alc1 + 1*alc2 + 2*alc3 + 3*alc4 + 4*alc5
alc.s ~ 1
alc.s ~~ alc.s
alc.i ~~ alc.s
# DEPRESSION
# random intercept
dep.i =~ 1*dep1 + 1*dep2 + 1*dep3 + 1*dep4 + 1*dep5
dep.i ~ 1
dep.i ~~ dep.i
dep.i ~~ alc.i
dep.i ~~ alc.s
# ---------------------------
# create structured residuals
# ---------------------------
# ALCOHOL
alc1 ~~ 0*alc1
alc2 ~~ 0*alc2
alc3 ~~ 0*alc3
alc4 ~~ 0*alc4
alc5 ~~ 0*alc5
salc1 =~ 1*alc1
salc2 =~ 1*alc2
salc3 =~ 1*alc3
salc4 =~ 1*alc4
salc5 =~ 1*alc5
salc1 ~ 0
salc2 ~ 0
salc3 ~ 0
salc4 ~ 0
salc5 ~ 0
salc1 ~~ salc1
salc2 ~~ salc2
salc3 ~~ salc3
salc4 ~~ salc4
salc5 ~~ salc5
# DEPRESSION
dep1 ~~ 0*dep1
dep2 ~~ 0*dep2
dep3 ~~ 0*dep3
dep4 ~~ 0*dep4
dep5 ~~ 0*dep5
sdep1 =~ 1*dep1
sdep2 =~ 1*dep2
sdep3 =~ 1*dep3
sdep4 =~ 1*dep4
sdep5 =~ 1*dep5
sdep1 ~ 0
sdep2 ~ 0
sdep3 ~ 0
sdep4 ~ 0
sdep5 ~ 0
sdep1 ~~ sdep1
sdep2 ~~ sdep2
sdep3 ~~ sdep3
sdep4 ~~ sdep4
sdep5 ~~ sdep5
salc1 ~~ sdep1
salc2 ~~ p1*sdep2
salc3 ~~ p1*sdep3
salc4 ~~ p1*sdep4
salc5 ~~ p1*sdep5
# ---------------------------
# residual regressions
# ---------------------------
# ALCOHOL
salc2 ~ p2*salc1 + sdep1
salc3 ~ p2*salc2 + sdep2
salc4 ~ p2*salc3 + sdep3
salc5 ~ p2*salc4 + sdep4
# DEPRESSION
sdep2 ~ p3*sdep1
sdep3 ~ p3*sdep2
sdep4 ~ p3*sdep3
sdep5 ~ p3*sdep4
'
ad.fit2 <- lavaan(ad.mod2, lcmsr.sim)
# summary(ad.fit2, fit.measures = T)
# print(anova(ad.fit1, ad.fit2))
# model 3
ad.mod3 <- '
# ---------------------------
# latent factors
# ---------------------------
# ALCOHOL
# random intercept
alc.i =~ 1*alc1 + 1*alc2 + 1*alc3 + 1*alc4 + 1*alc5
alc.i ~ 1
alc.i ~~ alc.i
# random slope
alc.s =~ 0*alc1 + 1*alc2 + 2*alc3 + 3*alc4 + 4*alc5
alc.s ~ 1
alc.s ~~ alc.s
alc.i ~~ alc.s
# DEPRESSION
# random intercept
dep.i =~ 1*dep1 + 1*dep2 + 1*dep3 + 1*dep4 + 1*dep5
dep.i ~ 1
dep.i ~~ dep.i
dep.i ~~ alc.i
dep.i ~~ alc.s
# ---------------------------
# create structured residuals
# ---------------------------
# ALCOHOL
alc1 ~~ 0*alc1
alc2 ~~ 0*alc2
alc3 ~~ 0*alc3
alc4 ~~ 0*alc4
alc5 ~~ 0*alc5
salc1 =~ 1*alc1
salc2 =~ 1*alc2
salc3 =~ 1*alc3
salc4 =~ 1*alc4
salc5 =~ 1*alc5
salc1 ~ 0
salc2 ~ 0
salc3 ~ 0
salc4 ~ 0
salc5 ~ 0
salc1 ~~ salc1
salc2 ~~ salc2
salc3 ~~ salc3
salc4 ~~ salc4
salc5 ~~ salc5
# DEPRESSION
dep1 ~~ 0*dep1
dep2 ~~ 0*dep2
dep3 ~~ 0*dep3
dep4 ~~ 0*dep4
dep5 ~~ 0*dep5
sdep1 =~ 1*dep1
sdep2 =~ 1*dep2
sdep3 =~ 1*dep3
sdep4 =~ 1*dep4
sdep5 =~ 1*dep5
sdep1 ~ 0
sdep2 ~ 0
sdep3 ~ 0
sdep4 ~ 0
sdep5 ~ 0
sdep1 ~~ sdep1
sdep2 ~~ sdep2
sdep3 ~~ sdep3
sdep4 ~~ sdep4
sdep5 ~~ sdep5
salc1 ~~ sdep1
salc2 ~~ p1*sdep2
salc3 ~~ p1*sdep3
salc4 ~~ p1*sdep4
salc5 ~~ p1*sdep5
# ---------------------------
# residual regressions
# ---------------------------
# ALCOHOL
salc2 ~ p2*salc1 + p4*sdep1
salc3 ~ p2*salc2 + p4*sdep2
salc4 ~ p2*salc3 + p4*sdep3
salc5 ~ p2*salc4 + p4*sdep4
# DEPRESSION
sdep2 ~ p3*sdep1
sdep3 ~ p3*sdep2
sdep4 ~ p3*sdep3
sdep5 ~ p3*sdep4
'
ad.fit3 <- lavaan(ad.mod3, lcmsr.sim)
# summary(ad.fit3, fit.measures = T)
# print(anova(ad.fit2, ad.fit3))
# model 4
ad.mod4 <- '
# ---------------------------
# latent factors
# ---------------------------
# ALCOHOL
# random intercept
alc.i =~ 1*alc1 + 1*alc2 + 1*alc3 + 1*alc4 + 1*alc5
alc.i ~ 1
alc.i ~~ alc.i
# random slope
alc.s =~ 0*alc1 + 1*alc2 + 2*alc3 + 3*alc4 + 4*alc5
alc.s ~ 1
alc.s ~~ alc.s
alc.i ~~ alc.s
# DEPRESSION
# random intercept
dep.i =~ 1*dep1 + 1*dep2 + 1*dep3 + 1*dep4 + 1*dep5
dep.i ~ 1
dep.i ~~ dep.i
dep.i ~~ alc.i
dep.i ~~ alc.s
# ---------------------------
# create structured residuals
# ---------------------------
# ALCOHOL
alc1 ~~ 0*alc1
alc2 ~~ 0*alc2
alc3 ~~ 0*alc3
alc4 ~~ 0*alc4
alc5 ~~ 0*alc5
salc1 =~ 1*alc1
salc2 =~ 1*alc2
salc3 =~ 1*alc3
salc4 =~ 1*alc4
salc5 =~ 1*alc5
salc1 ~ 0
salc2 ~ 0
salc3 ~ 0
salc4 ~ 0
salc5 ~ 0
salc1 ~~ salc1
salc2 ~~ salc2
salc3 ~~ salc3
salc4 ~~ salc4
salc5 ~~ salc5
# DEPRESSION
dep1 ~~ 0*dep1
dep2 ~~ 0*dep2
dep3 ~~ 0*dep3
dep4 ~~ 0*dep4
dep5 ~~ 0*dep5
sdep1 =~ 1*dep1
sdep2 =~ 1*dep2
sdep3 =~ 1*dep3
sdep4 =~ 1*dep4
sdep5 =~ 1*dep5
sdep1 ~ 0
sdep2 ~ 0
sdep3 ~ 0
sdep4 ~ 0
sdep5 ~ 0
sdep1 ~~ sdep1
sdep2 ~~ sdep2
sdep3 ~~ sdep3
sdep4 ~~ sdep4
sdep5 ~~ sdep5
salc1 ~~ sdep1
salc2 ~~ p1*sdep2
salc3 ~~ p1*sdep3
salc4 ~~ p1*sdep4
salc5 ~~ p1*sdep5
# ---------------------------
# residual regressions
# ---------------------------
# ALCOHOL
salc2 ~ p2*salc1
salc3 ~ p2*salc2
salc4 ~ p2*salc3
salc5 ~ p2*salc4
# DEPRESSION
sdep2 ~ p3*sdep1 + salc1
sdep3 ~ p3*sdep2 + salc2
sdep4 ~ p3*sdep3 + salc3
sdep5 ~ p3*sdep4 + salc4
'
ad.fit4 <- lavaan(ad.mod4, lcmsr.sim)
# summary(ad.fit4, fit.measures = T)
# print(anova(ad.fit1, ad.fit4))
# model 5
ad.mod5 <- '
# ---------------------------
# latent factors
# ---------------------------
# ALCOHOL
# random intercept
alc.i =~ 1*alc1 + 1*alc2 + 1*alc3 + 1*alc4 + 1*alc5
alc.i ~ 1
alc.i ~~ alc.i
# random slope
alc.s =~ 0*alc1 + 1*alc2 + 2*alc3 + 3*alc4 + 4*alc5
alc.s ~ 1
alc.s ~~ alc.s
alc.i ~~ alc.s
# DEPRESSION
# random intercept
dep.i =~ 1*dep1 + 1*dep2 + 1*dep3 + 1*dep4 + 1*dep5
dep.i ~ 1
dep.i ~~ dep.i
dep.i ~~ alc.i
dep.i ~~ alc.s
# ---------------------------
# create structured residuals
# ---------------------------
# ALCOHOL
alc1 ~~ 0*alc1
alc2 ~~ 0*alc2
alc3 ~~ 0*alc3
alc4 ~~ 0*alc4
alc5 ~~ 0*alc5
salc1 =~ 1*alc1
salc2 =~ 1*alc2
salc3 =~ 1*alc3
salc4 =~ 1*alc4
salc5 =~ 1*alc5
salc1 ~ 0
salc2 ~ 0
salc3 ~ 0
salc4 ~ 0
salc5 ~ 0
salc1 ~~ salc1
salc2 ~~ salc2
salc3 ~~ salc3
salc4 ~~ salc4
salc5 ~~ salc5
# DEPRESSION
dep1 ~~ 0*dep1
dep2 ~~ 0*dep2
dep3 ~~ 0*dep3
dep4 ~~ 0*dep4
dep5 ~~ 0*dep5
sdep1 =~ 1*dep1
sdep2 =~ 1*dep2
sdep3 =~ 1*dep3
sdep4 =~ 1*dep4
sdep5 =~ 1*dep5
sdep1 ~ 0
sdep2 ~ 0
sdep3 ~ 0
sdep4 ~ 0
sdep5 ~ 0
sdep1 ~~ sdep1
sdep2 ~~ sdep2
sdep3 ~~ sdep3
sdep4 ~~ sdep4
sdep5 ~~ sdep5
salc1 ~~ sdep1
salc2 ~~ p1*sdep2
salc3 ~~ p1*sdep3
salc4 ~~ p1*sdep4
salc5 ~~ p1*sdep5
# ---------------------------
# residual regressions
# ---------------------------
# ALCOHOL
salc2 ~ p2*salc1
salc3 ~ p2*salc2
salc4 ~ p2*salc3
salc5 ~ p2*salc4
# DEPRESSION
sdep2 ~ p3*sdep1 + p4*salc1
sdep3 ~ p3*sdep2 + p4*salc2
sdep4 ~ p3*sdep3 + p4*salc3
sdep5 ~ p3*sdep4 + p4*salc4
'
ad.fit5 <- lavaan(ad.mod5, lcmsr.sim)
# summary(ad.fit5, fit.measures = T)
# print(anova(ad.fit4, ad.fit5))
# model 6
ad.mod6 <- '
# ---------------------------
# latent factors
# ---------------------------
# ALCOHOL
# random intercept
alc.i =~ 1*alc1 + 1*alc2 + 1*alc3 + 1*alc4 + 1*alc5
alc.i ~ 1
alc.i ~~ alc.i
# random slope
alc.s =~ 0*alc1 + 1*alc2 + 2*alc3 + 3*alc4 + 4*alc5
alc.s ~ 1
alc.s ~~ alc.s
alc.i ~~ alc.s
# DEPRESSION
# random intercept
dep.i =~ 1*dep1 + 1*dep2 + 1*dep3 + 1*dep4 + 1*dep5
dep.i ~ 1
dep.i ~~ dep.i
dep.i ~~ alc.i
dep.i ~~ alc.s
# ---------------------------
# create structured residuals
# ---------------------------
# ALCOHOL
alc1 ~~ 0*alc1
alc2 ~~ 0*alc2
alc3 ~~ 0*alc3
alc4 ~~ 0*alc4
alc5 ~~ 0*alc5
salc1 =~ 1*alc1
salc2 =~ 1*alc2
salc3 =~ 1*alc3
salc4 =~ 1*alc4
salc5 =~ 1*alc5
salc1 ~ 0
salc2 ~ 0
salc3 ~ 0
salc4 ~ 0
salc5 ~ 0
salc1 ~~ salc1
salc2 ~~ salc2
salc3 ~~ salc3
salc4 ~~ salc4
salc5 ~~ salc5
# DEPRESSION
dep1 ~~ 0*dep1
dep2 ~~ 0*dep2
dep3 ~~ 0*dep3
dep4 ~~ 0*dep4
dep5 ~~ 0*dep5
sdep1 =~ 1*dep1
sdep2 =~ 1*dep2
sdep3 =~ 1*dep3
sdep4 =~ 1*dep4
sdep5 =~ 1*dep5
sdep1 ~ 0
sdep2 ~ 0
sdep3 ~ 0
sdep4 ~ 0
sdep5 ~ 0
sdep1 ~~ sdep1
sdep2 ~~ sdep2
sdep3 ~~ sdep3
sdep4 ~~ sdep4
sdep5 ~~ sdep5
salc1 ~~ sdep1
salc2 ~~ p1*sdep2
salc3 ~~ p1*sdep3
salc4 ~~ p1*sdep4
salc5 ~~ p1*sdep5
# ---------------------------
# residual regressions
# ---------------------------
# ALCOHOL
salc2 ~ p2*salc1
salc3 ~ p2*salc2
salc4 ~ p2*salc3
salc5 ~ p2*salc4
# DEPRESSION
sdep2 ~ p3*sdep1 + p4*salc1
sdep3 ~ p3*sdep2 + p5*salc2
sdep4 ~ p3*sdep3 + p6*salc3
sdep5 ~ p3*sdep4 + p7*salc4
kappa := p5 - p4
p5 == p4 + 1*kappa
p6 == p4 + 2*kappa
p7 == p4 + 3*kappa
'
ad.fit6 <- lavaan(ad.mod6, lcmsr.sim)
# summary(ad.fit6, fit.measures = T)
# print(anova(ad.fit4, ad.fit6))
# model 7
ad.mod7 <- '
# ---------------------------
# latent factors
# ---------------------------
# ALCOHOL
# random intercept
alc.i =~ 1*alc1 + 1*alc2 + 1*alc3 + 1*alc4 + 1*alc5
alc.i ~ 1
alc.i ~~ alc.i
# random slope
alc.s =~ 0*alc1 + 1*alc2 + 2*alc3 + 3*alc4 + 4*alc5
alc.s ~ 1
alc.s ~~ alc.s
alc.i ~~ alc.s
# DEPRESSION
# random intercept
dep.i =~ 1*dep1 + 1*dep2 + 1*dep3 + 1*dep4 + 1*dep5
dep.i ~ 1
dep.i ~~ dep.i
dep.i ~~ alc.i
dep.i ~~ alc.s
# ---------------------------
# create structured residuals
# ---------------------------
# ALCOHOL
alc1 ~~ 0*alc1
alc2 ~~ 0*alc2
alc3 ~~ 0*alc3
alc4 ~~ 0*alc4
alc5 ~~ 0*alc5
salc1 =~ 1*alc1
salc2 =~ 1*alc2
salc3 =~ 1*alc3
salc4 =~ 1*alc4
salc5 =~ 1*alc5
salc1 ~ 0
salc2 ~ 0
salc3 ~ 0
salc4 ~ 0
salc5 ~ 0
salc1 ~~ salc1
salc2 ~~ salc2
salc3 ~~ salc3
salc4 ~~ salc4
salc5 ~~ salc5
# DEPRESSION
dep1 ~~ 0*dep1
dep2 ~~ 0*dep2
dep3 ~~ 0*dep3
dep4 ~~ 0*dep4
dep5 ~~ 0*dep5
sdep1 =~ 1*dep1
sdep2 =~ 1*dep2
sdep3 =~ 1*dep3
sdep4 =~ 1*dep4
sdep5 =~ 1*dep5
sdep1 ~ 0
sdep2 ~ 0
sdep3 ~ 0
sdep4 ~ 0
sdep5 ~ 0
sdep1 ~~ sdep1
sdep2 ~~ sdep2
sdep3 ~~ sdep3
sdep4 ~~ sdep4
sdep5 ~~ sdep5
salc1 ~~ sdep1
salc2 ~~ p1*sdep2
salc3 ~~ p1*sdep3
salc4 ~~ p1*sdep4
salc5 ~~ p1*sdep5
# ---------------------------
# residual regressions
# ---------------------------
# ALCOHOL
salc2 ~ p2*salc1 + p4*sdep1
salc3 ~ p2*salc2 + p4*sdep2
salc4 ~ p2*salc3 + p4*sdep3
salc5 ~ p2*salc4 + p4*sdep4
# DEPRESSION
sdep2 ~ p3*sdep1 + p5*salc1
sdep3 ~ p3*sdep2 + p6*salc2
sdep4 ~ p3*sdep3 + p7*salc3
sdep5 ~ p3*sdep4 + p8*salc4
kappa := p6 - p5
p6 == p5 + 1*kappa
p7 == p5 + 2*kappa
p8 == p5 + 3*kappa
'
ad.fit7 <- lavaan(ad.mod7, lcmsr.sim)
# summary(ad.fit7, fit.measures = T)
# -------------------------------
# bivariate model for alcohol & depression
# conditional on gender & treatment
# -------------------------------
# model 8
ad.mod8 <- '
# ---------------------------
# latent factors
# ---------------------------
# ALCOHOL
# random intercept
alc.i =~ 1*alc1 + 1*alc2 + 1*alc3 + 1*alc4 + 1*alc5
alc.i ~ 1 + gen + trt
alc.i ~~ alc.i
# random slope
alc.s =~ 0*alc1 + 1*alc2 + 2*alc3 + 3*alc4 + 4*alc5
alc.s ~ 1 + gen + trt
alc.s ~~ alc.s
alc.i ~~ alc.s
# DEPRESSION
# random intercept
dep.i =~ 1*dep1 + 1*dep2 + 1*dep3 + 1*dep4 + 1*dep5
dep.i ~ 1 + gen + trt
dep.i ~~ dep.i
dep.i ~~ alc.i
dep.i ~~ alc.s
# ---------------------------
# create structured residuals
# ---------------------------
# ALCOHOL
alc1 ~~ 0*alc1
alc2 ~~ 0*alc2
alc3 ~~ 0*alc3
alc4 ~~ 0*alc4
alc5 ~~ 0*alc5
salc1 =~ 1*alc1
salc2 =~ 1*alc2
salc3 =~ 1*alc3
salc4 =~ 1*alc4
salc5 =~ 1*alc5
salc1 ~ 0
salc2 ~ 0
salc3 ~ 0
salc4 ~ 0
salc5 ~ 0
salc1 ~~ salc1
salc2 ~~ salc2
salc3 ~~ salc3
salc4 ~~ salc4
salc5 ~~ salc5
# DEPRESSION
dep1 ~~ 0*dep1
dep2 ~~ 0*dep2
dep3 ~~ 0*dep3
dep4 ~~ 0*dep4
dep5 ~~ 0*dep5
sdep1 =~ 1*dep1
sdep2 =~ 1*dep2
sdep3 =~ 1*dep3
sdep4 =~ 1*dep4
sdep5 =~ 1*dep5
sdep1 ~ 0
sdep2 ~ 0
sdep3 ~ 0
sdep4 ~ 0
sdep5 ~ 0
sdep1 ~~ sdep1
sdep2 ~~ sdep2
sdep3 ~~ sdep3
sdep4 ~~ sdep4
sdep5 ~~ sdep5
salc1 ~~ sdep1
salc2 ~~ p1*sdep2
salc3 ~~ p1*sdep3
salc4 ~~ p1*sdep4
salc5 ~~ p1*sdep5
# ---------------------------
# residual regressions
# ---------------------------
# ALCOHOL
salc2 ~ p2*salc1 + p4*sdep1
salc3 ~ p2*salc2 + p4*sdep2
salc4 ~ p2*salc3 + p4*sdep3
salc5 ~ p2*salc4 + p4*sdep4
# DEPRESSION
sdep2 ~ p3*sdep1 + p5*salc1
sdep3 ~ p3*sdep2 + p6*salc2
sdep4 ~ p3*sdep3 + p7*salc3
sdep5 ~ p3*sdep4 + p8*salc4
kappa := p6 - p5
p6 == p5 + 1*kappa
p7 == p5 + 2*kappa
p8 == p5 + 3*kappa
'
ad.fit8 <- lavaan(ad.mod8, lcmsr.sim)
summary(ad.fit8, fit.measures = T)
|
/curran2014-separation.R
|
no_license
|
cddesja/lavaan-reproducible
|
R
| false
| false
| 18,118
|
r
|
# ------------------------------------------------
# The Separation of Between-Person and Within-Person Components
# of Individual Change Over Time: A Latent Curve Model
# With Structured Residuals
#
# Curran, Howard, Bainter, Lane, McGinley
#
# Journal of Consulting and Clinical Psychology 2014
# doi.org/10.1037/a0035297
# ------------------------------------------------
# read in the data
lcmsr.sim <- read.table("data/currandemo.dat", col.names = c("id", "gen", "trt", paste0("alc", 1:5), paste0("dep", 1:5)))
# load necessary libraries
# install.packages("lavaan") # install lavaan if you haven't.
library(lavaan)
# -------------------------------
# univariate unconditional model for alcohol
# -------------------------------
# model 1
alc.mod1 <- '
# ALCOHOL #
# random intercept
alc =~ 1*alc1 + 1*alc2 + 1*alc3 + 1*alc4 + 1*alc5
alc ~ 1
alc ~~ alc
# create structured residuals
alc1 ~~ 0*alc1
alc2 ~~ 0*alc2
alc3 ~~ 0*alc3
alc4 ~~ 0*alc4
alc5 ~~ 0*alc5
salc1 =~ 1*alc1
salc2 =~ 1*alc2
salc3 =~ 1*alc3
salc4 =~ 1*alc4
salc5 =~ 1*alc5
salc1 ~ 0
salc2 ~ 0
salc3 ~ 0
salc4 ~ 0
salc5 ~ 0
salc1 ~~ salc1
salc2 ~~ salc2
salc3 ~~ salc3
salc4 ~~ salc4
salc5 ~~ salc5
'
alc.fit1 <- lavaan(alc.mod1, lcmsr.sim)
# summary(alc.fit1, fit.measures = T)
# model 2
alc.mod2 <- '
# ALCOHOL #
# random intercept
alc.i =~ 1*alc1 + 1*alc2 + 1*alc3 + 1*alc4 + 1*alc5
alc.i ~ 1
alc.i ~~ alc.i
# random slope
alc.s =~ 0*alc1 + 1*alc2 + 2*alc3 + 3*alc4 + 4*alc5
alc.s ~ 1
alc.s ~~ alc.s
alc.i ~~ alc.s
# create structured residuals
alc1 ~~ 0*alc1
alc2 ~~ 0*alc2
alc3 ~~ 0*alc3
alc4 ~~ 0*alc4
alc5 ~~ 0*alc5
salc1 =~ 1*alc1
salc2 =~ 1*alc2
salc3 =~ 1*alc3
salc4 =~ 1*alc4
salc5 =~ 1*alc5
salc1 ~ 0
salc2 ~ 0
salc3 ~ 0
salc4 ~ 0
salc5 ~ 0
salc1 ~~ salc1
salc2 ~~ salc2
salc3 ~~ salc3
salc4 ~~ salc4
salc5 ~~ salc5
'
alc.fit2 <- lavaan(alc.mod2, lcmsr.sim)
# summary(alc.fit2, fit.measures = T)
# print(anova(alc.fit1, alc.fit2))
alc.mod3 <- '
# ALCOHOL #
# random intercept
alc.i =~ 1*alc1 + 1*alc2 + 1*alc3 + 1*alc4 + 1*alc5
alc.i ~ 1
alc.i ~~ alc.i
# random slope
alc.s =~ 0*alc1 + 1*alc2 + 2*alc3 + 3*alc4 + 4*alc5
alc.s ~ 1
alc.s ~~ alc.s
alc.i ~~ alc.s
# create structured residuals
alc1 ~~ 0*alc1
alc2 ~~ 0*alc2
alc3 ~~ 0*alc3
alc4 ~~ 0*alc4
alc5 ~~ 0*alc5
salc1 =~ 1*alc1
salc2 =~ 1*alc2
salc3 =~ 1*alc3
salc4 =~ 1*alc4
salc5 =~ 1*alc5
salc1 ~ 0
salc2 ~ 0
salc3 ~ 0
salc4 ~ 0
salc5 ~ 0
salc1 ~~ salc1
salc2 ~~ salc2
salc3 ~~ salc3
salc4 ~~ salc4
salc5 ~~ salc5
# add auto-regressive paths
salc2 ~ pyy*salc1
salc3 ~ pyy*salc2
salc4 ~ pyy*salc3
salc5 ~ pyy*salc4
'
alc.fit3 <- lavaan(alc.mod3, lcmsr.sim)
# summary(alc.fit3, fit.measures = T)
# print(anova(alc.fit3, alc.fit2))
# -------------------------------
# univariate unconditional model for depression
# -------------------------------
# model 1
dep.mod1 <- '
# DEPRESSION #
# random intercept
dep =~ 1*dep1 + 1*dep2 + 1*dep3 + 1*dep4 + 1*dep5
dep ~ 1
dep ~~ dep
# create structured residuals
dep1 ~~ 0*dep1
dep2 ~~ 0*dep2
dep3 ~~ 0*dep3
dep4 ~~ 0*dep4
dep5 ~~ 0*dep5
sdep1 =~ 1*dep1
sdep2 =~ 1*dep2
sdep3 =~ 1*dep3
sdep4 =~ 1*dep4
sdep5 =~ 1*dep5
sdep1 ~ 0
sdep2 ~ 0
sdep3 ~ 0
sdep4 ~ 0
sdep5 ~ 0
sdep1 ~~ sdep1
sdep2 ~~ sdep2
sdep3 ~~ sdep3
sdep4 ~~ sdep4
sdep5 ~~ sdep5
'
dep.fit1 <- lavaan(dep.mod1, lcmsr.sim)
# summary(dep.fit1, fit.measures = T)
# model 2
dep.mod2 <- '
# DEPRESSION #
# random intercept
dep.i =~ 1*dep1 + 1*dep2 + 1*dep3 + 1*dep4 + 1*dep5
dep.i ~ 1
dep.i ~~ dep.i
# random slope
dep.s =~ 0*dep1 + 1*dep2 + 2*dep3 + 3*dep4 + 4*dep5
dep.s ~ 1
dep.s ~~ dep.s
dep.s ~~ dep.i
# create structured residuals
dep1 ~~ 0*dep1
dep2 ~~ 0*dep2
dep3 ~~ 0*dep3
dep4 ~~ 0*dep4
dep5 ~~ 0*dep5
sdep1 =~ 1*dep1
sdep2 =~ 1*dep2
sdep3 =~ 1*dep3
sdep4 =~ 1*dep4
sdep5 =~ 1*dep5
sdep1 ~ 0
sdep2 ~ 0
sdep3 ~ 0
sdep4 ~ 0
sdep5 ~ 0
sdep1 ~~ sdep1
sdep2 ~~ sdep2
sdep3 ~~ sdep3
sdep4 ~~ sdep4
sdep5 ~~ sdep5
'
dep.fit2 <- lavaan(dep.mod2, lcmsr.sim)
# summary(dep.fit2, fit.measures = T)
# print(anova(dep.fit1, dep.fit2))
# model 3
dep.mod3 <- '
# DEPRESSION #
# random intercept
dep =~ 1*dep1 + 1*dep2 + 1*dep3 + 1*dep4 + 1*dep5
dep ~ 1
dep ~~ dep
# create structured residuals
dep1 ~~ 0*dep1
dep2 ~~ 0*dep2
dep3 ~~ 0*dep3
dep4 ~~ 0*dep4
dep5 ~~ 0*dep5
sdep1 =~ 1*dep1
sdep2 =~ 1*dep2
sdep3 =~ 1*dep3
sdep4 =~ 1*dep4
sdep5 =~ 1*dep5
sdep1 ~ 0
sdep2 ~ 0
sdep3 ~ 0
sdep4 ~ 0
sdep5 ~ 0
sdep1 ~~ sdep1
sdep2 ~~ sdep2
sdep3 ~~ sdep3
sdep4 ~~ sdep4
sdep5 ~~ sdep5
# add auto-regressive paths
sdep2 ~ pzz*sdep1
sdep3 ~ pzz*sdep2
sdep4 ~ pzz*sdep3
sdep5 ~ pzz*sdep4
'
dep.fit3 <- lavaan(dep.mod3, lcmsr.sim)
# summary(dep.fit3, fit.measures = T)
print(anova(dep.fit1, dep.fit3))
# -------------------------------
# bivariate unconditional model for alcohol & depression
# -------------------------------
# model 1
ad.mod1 <- '
# ---------------------------
# latent factors
# ---------------------------
# ALCOHOL
# random intercept
alc.i =~ 1*alc1 + 1*alc2 + 1*alc3 + 1*alc4 + 1*alc5
alc.i ~ 1
alc.i ~~ alc.i
# random slope
alc.s =~ 0*alc1 + 1*alc2 + 2*alc3 + 3*alc4 + 4*alc5
alc.s ~ 1
alc.s ~~ alc.s
alc.i ~~ alc.s
# DEPRESSION
# random intercept
dep.i =~ 1*dep1 + 1*dep2 + 1*dep3 + 1*dep4 + 1*dep5
dep.i ~ 1
dep.i ~~ dep.i
dep.i ~~ alc.i
dep.i ~~ alc.s
# ---------------------------
# create structured residuals
# ---------------------------
# ALCOHOL
alc1 ~~ 0*alc1
alc2 ~~ 0*alc2
alc3 ~~ 0*alc3
alc4 ~~ 0*alc4
alc5 ~~ 0*alc5
salc1 =~ 1*alc1
salc2 =~ 1*alc2
salc3 =~ 1*alc3
salc4 =~ 1*alc4
salc5 =~ 1*alc5
salc1 ~ 0
salc2 ~ 0
salc3 ~ 0
salc4 ~ 0
salc5 ~ 0
salc1 ~~ salc1
salc2 ~~ salc2
salc3 ~~ salc3
salc4 ~~ salc4
salc5 ~~ salc5
# DEPRESSION
dep1 ~~ 0*dep1
dep2 ~~ 0*dep2
dep3 ~~ 0*dep3
dep4 ~~ 0*dep4
dep5 ~~ 0*dep5
sdep1 =~ 1*dep1
sdep2 =~ 1*dep2
sdep3 =~ 1*dep3
sdep4 =~ 1*dep4
sdep5 =~ 1*dep5
sdep1 ~ 0
sdep2 ~ 0
sdep3 ~ 0
sdep4 ~ 0
sdep5 ~ 0
sdep1 ~~ sdep1
sdep2 ~~ sdep2
sdep3 ~~ sdep3
sdep4 ~~ sdep4
sdep5 ~~ sdep5
salc1 ~~ sdep1
salc2 ~~ vzy*sdep2
salc3 ~~ vzy*sdep3
salc4 ~~ vzy*sdep4
salc5 ~~ vzy*sdep5
# ---------------------------
# residual regressions
# ---------------------------
# ALCOHOL
salc2 ~ pyy*salc1
salc3 ~ pyy*salc2
salc4 ~ pyy*salc3
salc5 ~ pyy*salc4
# DEPRESSION
sdep2 ~ pzz*sdep1
sdep3 ~ pzz*sdep2
sdep4 ~ pzz*sdep3
sdep5 ~ pzz*sdep4
'
ad.fit1 <- lavaan(ad.mod1, lcmsr.sim)
summary(ad.fit1, fit.measures = T)
# model 2
ad.mod2 <- '
# ---------------------------
# latent factors
# ---------------------------
# ALCOHOL
# random intercept
alc.i =~ 1*alc1 + 1*alc2 + 1*alc3 + 1*alc4 + 1*alc5
alc.i ~ 1
alc.i ~~ alc.i
# random slope
alc.s =~ 0*alc1 + 1*alc2 + 2*alc3 + 3*alc4 + 4*alc5
alc.s ~ 1
alc.s ~~ alc.s
alc.i ~~ alc.s
# DEPRESSION
# random intercept
dep.i =~ 1*dep1 + 1*dep2 + 1*dep3 + 1*dep4 + 1*dep5
dep.i ~ 1
dep.i ~~ dep.i
dep.i ~~ alc.i
dep.i ~~ alc.s
# ---------------------------
# create structured residuals
# ---------------------------
# ALCOHOL
alc1 ~~ 0*alc1
alc2 ~~ 0*alc2
alc3 ~~ 0*alc3
alc4 ~~ 0*alc4
alc5 ~~ 0*alc5
salc1 =~ 1*alc1
salc2 =~ 1*alc2
salc3 =~ 1*alc3
salc4 =~ 1*alc4
salc5 =~ 1*alc5
salc1 ~ 0
salc2 ~ 0
salc3 ~ 0
salc4 ~ 0
salc5 ~ 0
salc1 ~~ salc1
salc2 ~~ salc2
salc3 ~~ salc3
salc4 ~~ salc4
salc5 ~~ salc5
# DEPRESSION
dep1 ~~ 0*dep1
dep2 ~~ 0*dep2
dep3 ~~ 0*dep3
dep4 ~~ 0*dep4
dep5 ~~ 0*dep5
sdep1 =~ 1*dep1
sdep2 =~ 1*dep2
sdep3 =~ 1*dep3
sdep4 =~ 1*dep4
sdep5 =~ 1*dep5
sdep1 ~ 0
sdep2 ~ 0
sdep3 ~ 0
sdep4 ~ 0
sdep5 ~ 0
sdep1 ~~ sdep1
sdep2 ~~ sdep2
sdep3 ~~ sdep3
sdep4 ~~ sdep4
sdep5 ~~ sdep5
salc1 ~~ sdep1
salc2 ~~ p1*sdep2
salc3 ~~ p1*sdep3
salc4 ~~ p1*sdep4
salc5 ~~ p1*sdep5
# ---------------------------
# residual regressions
# ---------------------------
# ALCOHOL
salc2 ~ p2*salc1 + sdep1
salc3 ~ p2*salc2 + sdep2
salc4 ~ p2*salc3 + sdep3
salc5 ~ p2*salc4 + sdep4
# DEPRESSION
sdep2 ~ p3*sdep1
sdep3 ~ p3*sdep2
sdep4 ~ p3*sdep3
sdep5 ~ p3*sdep4
'
ad.fit2 <- lavaan(ad.mod2, lcmsr.sim)
# summary(ad.fit2, fit.measures = T)
# print(anova(ad.fit1, ad.fit2))
# model 3
ad.mod3 <- '
# ---------------------------
# latent factors
# ---------------------------
# ALCOHOL
# random intercept
alc.i =~ 1*alc1 + 1*alc2 + 1*alc3 + 1*alc4 + 1*alc5
alc.i ~ 1
alc.i ~~ alc.i
# random slope
alc.s =~ 0*alc1 + 1*alc2 + 2*alc3 + 3*alc4 + 4*alc5
alc.s ~ 1
alc.s ~~ alc.s
alc.i ~~ alc.s
# DEPRESSION
# random intercept
dep.i =~ 1*dep1 + 1*dep2 + 1*dep3 + 1*dep4 + 1*dep5
dep.i ~ 1
dep.i ~~ dep.i
dep.i ~~ alc.i
dep.i ~~ alc.s
# ---------------------------
# create structured residuals
# ---------------------------
# ALCOHOL
alc1 ~~ 0*alc1
alc2 ~~ 0*alc2
alc3 ~~ 0*alc3
alc4 ~~ 0*alc4
alc5 ~~ 0*alc5
salc1 =~ 1*alc1
salc2 =~ 1*alc2
salc3 =~ 1*alc3
salc4 =~ 1*alc4
salc5 =~ 1*alc5
salc1 ~ 0
salc2 ~ 0
salc3 ~ 0
salc4 ~ 0
salc5 ~ 0
salc1 ~~ salc1
salc2 ~~ salc2
salc3 ~~ salc3
salc4 ~~ salc4
salc5 ~~ salc5
# DEPRESSION
dep1 ~~ 0*dep1
dep2 ~~ 0*dep2
dep3 ~~ 0*dep3
dep4 ~~ 0*dep4
dep5 ~~ 0*dep5
sdep1 =~ 1*dep1
sdep2 =~ 1*dep2
sdep3 =~ 1*dep3
sdep4 =~ 1*dep4
sdep5 =~ 1*dep5
sdep1 ~ 0
sdep2 ~ 0
sdep3 ~ 0
sdep4 ~ 0
sdep5 ~ 0
sdep1 ~~ sdep1
sdep2 ~~ sdep2
sdep3 ~~ sdep3
sdep4 ~~ sdep4
sdep5 ~~ sdep5
salc1 ~~ sdep1
salc2 ~~ p1*sdep2
salc3 ~~ p1*sdep3
salc4 ~~ p1*sdep4
salc5 ~~ p1*sdep5
# ---------------------------
# residual regressions
# ---------------------------
# ALCOHOL
salc2 ~ p2*salc1 + p4*sdep1
salc3 ~ p2*salc2 + p4*sdep2
salc4 ~ p2*salc3 + p4*sdep3
salc5 ~ p2*salc4 + p4*sdep4
# DEPRESSION
sdep2 ~ p3*sdep1
sdep3 ~ p3*sdep2
sdep4 ~ p3*sdep3
sdep5 ~ p3*sdep4
'
ad.fit3 <- lavaan(ad.mod3, lcmsr.sim)
# summary(ad.fit3, fit.measures = T)
# print(anova(ad.fit2, ad.fit3))
# model 4
ad.mod4 <- '
# ---------------------------
# latent factors
# ---------------------------
# ALCOHOL
# random intercept
alc.i =~ 1*alc1 + 1*alc2 + 1*alc3 + 1*alc4 + 1*alc5
alc.i ~ 1
alc.i ~~ alc.i
# random slope
alc.s =~ 0*alc1 + 1*alc2 + 2*alc3 + 3*alc4 + 4*alc5
alc.s ~ 1
alc.s ~~ alc.s
alc.i ~~ alc.s
# DEPRESSION
# random intercept
dep.i =~ 1*dep1 + 1*dep2 + 1*dep3 + 1*dep4 + 1*dep5
dep.i ~ 1
dep.i ~~ dep.i
dep.i ~~ alc.i
dep.i ~~ alc.s
# ---------------------------
# create structured residuals
# ---------------------------
# ALCOHOL
alc1 ~~ 0*alc1
alc2 ~~ 0*alc2
alc3 ~~ 0*alc3
alc4 ~~ 0*alc4
alc5 ~~ 0*alc5
salc1 =~ 1*alc1
salc2 =~ 1*alc2
salc3 =~ 1*alc3
salc4 =~ 1*alc4
salc5 =~ 1*alc5
salc1 ~ 0
salc2 ~ 0
salc3 ~ 0
salc4 ~ 0
salc5 ~ 0
salc1 ~~ salc1
salc2 ~~ salc2
salc3 ~~ salc3
salc4 ~~ salc4
salc5 ~~ salc5
# DEPRESSION
dep1 ~~ 0*dep1
dep2 ~~ 0*dep2
dep3 ~~ 0*dep3
dep4 ~~ 0*dep4
dep5 ~~ 0*dep5
sdep1 =~ 1*dep1
sdep2 =~ 1*dep2
sdep3 =~ 1*dep3
sdep4 =~ 1*dep4
sdep5 =~ 1*dep5
sdep1 ~ 0
sdep2 ~ 0
sdep3 ~ 0
sdep4 ~ 0
sdep5 ~ 0
sdep1 ~~ sdep1
sdep2 ~~ sdep2
sdep3 ~~ sdep3
sdep4 ~~ sdep4
sdep5 ~~ sdep5
salc1 ~~ sdep1
salc2 ~~ p1*sdep2
salc3 ~~ p1*sdep3
salc4 ~~ p1*sdep4
salc5 ~~ p1*sdep5
# ---------------------------
# residual regressions
# ---------------------------
# ALCOHOL
salc2 ~ p2*salc1
salc3 ~ p2*salc2
salc4 ~ p2*salc3
salc5 ~ p2*salc4
# DEPRESSION
sdep2 ~ p3*sdep1 + salc1
sdep3 ~ p3*sdep2 + salc2
sdep4 ~ p3*sdep3 + salc3
sdep5 ~ p3*sdep4 + salc4
'
ad.fit4 <- lavaan(ad.mod4, lcmsr.sim)
# summary(ad.fit4, fit.measures = T)
# print(anova(ad.fit1, ad.fit4))
# model 5
ad.mod5 <- '
# ---------------------------
# latent factors
# ---------------------------
# ALCOHOL
# random intercept
alc.i =~ 1*alc1 + 1*alc2 + 1*alc3 + 1*alc4 + 1*alc5
alc.i ~ 1
alc.i ~~ alc.i
# random slope
alc.s =~ 0*alc1 + 1*alc2 + 2*alc3 + 3*alc4 + 4*alc5
alc.s ~ 1
alc.s ~~ alc.s
alc.i ~~ alc.s
# DEPRESSION
# random intercept
dep.i =~ 1*dep1 + 1*dep2 + 1*dep3 + 1*dep4 + 1*dep5
dep.i ~ 1
dep.i ~~ dep.i
dep.i ~~ alc.i
dep.i ~~ alc.s
# ---------------------------
# create structured residuals
# ---------------------------
# ALCOHOL
alc1 ~~ 0*alc1
alc2 ~~ 0*alc2
alc3 ~~ 0*alc3
alc4 ~~ 0*alc4
alc5 ~~ 0*alc5
salc1 =~ 1*alc1
salc2 =~ 1*alc2
salc3 =~ 1*alc3
salc4 =~ 1*alc4
salc5 =~ 1*alc5
salc1 ~ 0
salc2 ~ 0
salc3 ~ 0
salc4 ~ 0
salc5 ~ 0
salc1 ~~ salc1
salc2 ~~ salc2
salc3 ~~ salc3
salc4 ~~ salc4
salc5 ~~ salc5
# DEPRESSION
dep1 ~~ 0*dep1
dep2 ~~ 0*dep2
dep3 ~~ 0*dep3
dep4 ~~ 0*dep4
dep5 ~~ 0*dep5
sdep1 =~ 1*dep1
sdep2 =~ 1*dep2
sdep3 =~ 1*dep3
sdep4 =~ 1*dep4
sdep5 =~ 1*dep5
sdep1 ~ 0
sdep2 ~ 0
sdep3 ~ 0
sdep4 ~ 0
sdep5 ~ 0
sdep1 ~~ sdep1
sdep2 ~~ sdep2
sdep3 ~~ sdep3
sdep4 ~~ sdep4
sdep5 ~~ sdep5
salc1 ~~ sdep1
salc2 ~~ p1*sdep2
salc3 ~~ p1*sdep3
salc4 ~~ p1*sdep4
salc5 ~~ p1*sdep5
# ---------------------------
# residual regressions
# ---------------------------
# ALCOHOL
salc2 ~ p2*salc1
salc3 ~ p2*salc2
salc4 ~ p2*salc3
salc5 ~ p2*salc4
# DEPRESSION
sdep2 ~ p3*sdep1 + p4*salc1
sdep3 ~ p3*sdep2 + p4*salc2
sdep4 ~ p3*sdep3 + p4*salc3
sdep5 ~ p3*sdep4 + p4*salc4
'
ad.fit5 <- lavaan(ad.mod5, lcmsr.sim)
# summary(ad.fit5, fit.measures = T)
# print(anova(ad.fit4, ad.fit5))
# model 6
ad.mod6 <- '
# ---------------------------
# latent factors
# ---------------------------
# ALCOHOL
# random intercept
alc.i =~ 1*alc1 + 1*alc2 + 1*alc3 + 1*alc4 + 1*alc5
alc.i ~ 1
alc.i ~~ alc.i
# random slope
alc.s =~ 0*alc1 + 1*alc2 + 2*alc3 + 3*alc4 + 4*alc5
alc.s ~ 1
alc.s ~~ alc.s
alc.i ~~ alc.s
# DEPRESSION
# random intercept
dep.i =~ 1*dep1 + 1*dep2 + 1*dep3 + 1*dep4 + 1*dep5
dep.i ~ 1
dep.i ~~ dep.i
dep.i ~~ alc.i
dep.i ~~ alc.s
# ---------------------------
# create structured residuals
# ---------------------------
# ALCOHOL
alc1 ~~ 0*alc1
alc2 ~~ 0*alc2
alc3 ~~ 0*alc3
alc4 ~~ 0*alc4
alc5 ~~ 0*alc5
salc1 =~ 1*alc1
salc2 =~ 1*alc2
salc3 =~ 1*alc3
salc4 =~ 1*alc4
salc5 =~ 1*alc5
salc1 ~ 0
salc2 ~ 0
salc3 ~ 0
salc4 ~ 0
salc5 ~ 0
salc1 ~~ salc1
salc2 ~~ salc2
salc3 ~~ salc3
salc4 ~~ salc4
salc5 ~~ salc5
# DEPRESSION
dep1 ~~ 0*dep1
dep2 ~~ 0*dep2
dep3 ~~ 0*dep3
dep4 ~~ 0*dep4
dep5 ~~ 0*dep5
sdep1 =~ 1*dep1
sdep2 =~ 1*dep2
sdep3 =~ 1*dep3
sdep4 =~ 1*dep4
sdep5 =~ 1*dep5
sdep1 ~ 0
sdep2 ~ 0
sdep3 ~ 0
sdep4 ~ 0
sdep5 ~ 0
sdep1 ~~ sdep1
sdep2 ~~ sdep2
sdep3 ~~ sdep3
sdep4 ~~ sdep4
sdep5 ~~ sdep5
salc1 ~~ sdep1
salc2 ~~ p1*sdep2
salc3 ~~ p1*sdep3
salc4 ~~ p1*sdep4
salc5 ~~ p1*sdep5
# ---------------------------
# residual regressions
# ---------------------------
# ALCOHOL
salc2 ~ p2*salc1
salc3 ~ p2*salc2
salc4 ~ p2*salc3
salc5 ~ p2*salc4
# DEPRESSION
sdep2 ~ p3*sdep1 + p4*salc1
sdep3 ~ p3*sdep2 + p5*salc2
sdep4 ~ p3*sdep3 + p6*salc3
sdep5 ~ p3*sdep4 + p7*salc4
kappa := p5 - p4
p5 == p4 + 1*kappa
p6 == p4 + 2*kappa
p7 == p4 + 3*kappa
'
ad.fit6 <- lavaan(ad.mod6, lcmsr.sim)
# summary(ad.fit6, fit.measures = T)
# print(anova(ad.fit4, ad.fit6))
# model 7
ad.mod7 <- '
# ---------------------------
# latent factors
# ---------------------------
# ALCOHOL
# random intercept
alc.i =~ 1*alc1 + 1*alc2 + 1*alc3 + 1*alc4 + 1*alc5
alc.i ~ 1
alc.i ~~ alc.i
# random slope
alc.s =~ 0*alc1 + 1*alc2 + 2*alc3 + 3*alc4 + 4*alc5
alc.s ~ 1
alc.s ~~ alc.s
alc.i ~~ alc.s
# DEPRESSION
# random intercept
dep.i =~ 1*dep1 + 1*dep2 + 1*dep3 + 1*dep4 + 1*dep5
dep.i ~ 1
dep.i ~~ dep.i
dep.i ~~ alc.i
dep.i ~~ alc.s
# ---------------------------
# create structured residuals
# ---------------------------
# ALCOHOL
alc1 ~~ 0*alc1
alc2 ~~ 0*alc2
alc3 ~~ 0*alc3
alc4 ~~ 0*alc4
alc5 ~~ 0*alc5
salc1 =~ 1*alc1
salc2 =~ 1*alc2
salc3 =~ 1*alc3
salc4 =~ 1*alc4
salc5 =~ 1*alc5
salc1 ~ 0
salc2 ~ 0
salc3 ~ 0
salc4 ~ 0
salc5 ~ 0
salc1 ~~ salc1
salc2 ~~ salc2
salc3 ~~ salc3
salc4 ~~ salc4
salc5 ~~ salc5
# DEPRESSION
dep1 ~~ 0*dep1
dep2 ~~ 0*dep2
dep3 ~~ 0*dep3
dep4 ~~ 0*dep4
dep5 ~~ 0*dep5
sdep1 =~ 1*dep1
sdep2 =~ 1*dep2
sdep3 =~ 1*dep3
sdep4 =~ 1*dep4
sdep5 =~ 1*dep5
sdep1 ~ 0
sdep2 ~ 0
sdep3 ~ 0
sdep4 ~ 0
sdep5 ~ 0
sdep1 ~~ sdep1
sdep2 ~~ sdep2
sdep3 ~~ sdep3
sdep4 ~~ sdep4
sdep5 ~~ sdep5
salc1 ~~ sdep1
salc2 ~~ p1*sdep2
salc3 ~~ p1*sdep3
salc4 ~~ p1*sdep4
salc5 ~~ p1*sdep5
# ---------------------------
# residual regressions
# ---------------------------
# ALCOHOL
salc2 ~ p2*salc1 + p4*sdep1
salc3 ~ p2*salc2 + p4*sdep2
salc4 ~ p2*salc3 + p4*sdep3
salc5 ~ p2*salc4 + p4*sdep4
# DEPRESSION
sdep2 ~ p3*sdep1 + p5*salc1
sdep3 ~ p3*sdep2 + p6*salc2
sdep4 ~ p3*sdep3 + p7*salc3
sdep5 ~ p3*sdep4 + p8*salc4
kappa := p6 - p5
p6 == p5 + 1*kappa
p7 == p5 + 2*kappa
p8 == p5 + 3*kappa
'
ad.fit7 <- lavaan(ad.mod7, lcmsr.sim)
# summary(ad.fit7, fit.measures = T)
# -------------------------------
# bivariate model for alcohol & depression
# conditional on gender & treatment
# -------------------------------
# model 8
ad.mod8 <- '
# ---------------------------
# latent factors
# ---------------------------
# ALCOHOL
# random intercept
alc.i =~ 1*alc1 + 1*alc2 + 1*alc3 + 1*alc4 + 1*alc5
alc.i ~ 1 + gen + trt
alc.i ~~ alc.i
# random slope
alc.s =~ 0*alc1 + 1*alc2 + 2*alc3 + 3*alc4 + 4*alc5
alc.s ~ 1 + gen + trt
alc.s ~~ alc.s
alc.i ~~ alc.s
# DEPRESSION
# random intercept
dep.i =~ 1*dep1 + 1*dep2 + 1*dep3 + 1*dep4 + 1*dep5
dep.i ~ 1 + gen + trt
dep.i ~~ dep.i
dep.i ~~ alc.i
dep.i ~~ alc.s
# ---------------------------
# create structured residuals
# ---------------------------
# ALCOHOL
alc1 ~~ 0*alc1
alc2 ~~ 0*alc2
alc3 ~~ 0*alc3
alc4 ~~ 0*alc4
alc5 ~~ 0*alc5
salc1 =~ 1*alc1
salc2 =~ 1*alc2
salc3 =~ 1*alc3
salc4 =~ 1*alc4
salc5 =~ 1*alc5
salc1 ~ 0
salc2 ~ 0
salc3 ~ 0
salc4 ~ 0
salc5 ~ 0
salc1 ~~ salc1
salc2 ~~ salc2
salc3 ~~ salc3
salc4 ~~ salc4
salc5 ~~ salc5
# DEPRESSION
dep1 ~~ 0*dep1
dep2 ~~ 0*dep2
dep3 ~~ 0*dep3
dep4 ~~ 0*dep4
dep5 ~~ 0*dep5
sdep1 =~ 1*dep1
sdep2 =~ 1*dep2
sdep3 =~ 1*dep3
sdep4 =~ 1*dep4
sdep5 =~ 1*dep5
sdep1 ~ 0
sdep2 ~ 0
sdep3 ~ 0
sdep4 ~ 0
sdep5 ~ 0
sdep1 ~~ sdep1
sdep2 ~~ sdep2
sdep3 ~~ sdep3
sdep4 ~~ sdep4
sdep5 ~~ sdep5
salc1 ~~ sdep1
salc2 ~~ p1*sdep2
salc3 ~~ p1*sdep3
salc4 ~~ p1*sdep4
salc5 ~~ p1*sdep5
# ---------------------------
# residual regressions
# ---------------------------
# ALCOHOL
salc2 ~ p2*salc1 + p4*sdep1
salc3 ~ p2*salc2 + p4*sdep2
salc4 ~ p2*salc3 + p4*sdep3
salc5 ~ p2*salc4 + p4*sdep4
# DEPRESSION
sdep2 ~ p3*sdep1 + p5*salc1
sdep3 ~ p3*sdep2 + p6*salc2
sdep4 ~ p3*sdep3 + p7*salc3
sdep5 ~ p3*sdep4 + p8*salc4
kappa := p6 - p5
p6 == p5 + 1*kappa
p7 == p5 + 2*kappa
p8 == p5 + 3*kappa
'
ad.fit8 <- lavaan(ad.mod8, lcmsr.sim)
summary(ad.fit8, fit.measures = T)
|
setwd("your\\dir")
rm(list = ls())
## current data set name
db <- "nr"
switch (db,
en = {
cat("en data\n")
flush.console()
sd <- read.table("e_simmat_dc.txt")
sd <- as.matrix(sd)
st <- read.table("e_simmat_dg.txt")
st <- as.matrix(st)
Y <- read.table("e_admat_dgc.txt")
Y <- as.matrix(Y)
Y <- t(Y)
},
ic = {
cat("ic data\n")
flush.console()
sd <- read.table("ic_simmat_dc.txt")
sd <- as.matrix(sd)
st <- read.table("ic_simmat_dg.txt")
st <- as.matrix(st)
Y <- read.table("ic_admat_dgc.txt")
Y <- as.matrix(Y)
Y <- t(Y)
},
gpcr = {
cat("gpcr data\n")
flush.console()
sd <- read.table("gpcr_simmat_dc.txt")
sd <- as.matrix(sd)
st <- read.table("gpcr_simmat_dg.txt")
st <- as.matrix(st)
Y <- read.table("gpcr_admat_dgc.txt")
Y <- as.matrix(Y)
Y <- t(Y)
},
nr = {
cat("nr data\n")
flush.console()
sd <- read.table("nr_simmat_dc.txt")
sd <- as.matrix(sd)
st <- read.table("nr_simmat_dg.txt")
st <- as.matrix(st)
Y <- read.table("nr_admat_dgc.txt")
Y <- as.matrix(Y)
Y <- t(Y)
},
stop("db should be one of the follows:
{en, ic, gpcr, nr}\n")
)
## load required packages
pkgs <- c("matrixcalc", "data.table", "Rcpp", "ROCR",
"Bolstad2", "MESS", "nloptr")
rPkgs <- lapply(pkgs, require, character.only = TRUE)
## source required R files
rSourceNames <- c(
"doCVPositiveOnly.R",
"doCVPositiveOnly3.R",
"evalMetrics.R",
"combineKernels.R",
"eigDecomp.R",
"kronRls.R",
"kronRlsC.R",
"kronRlsMKL.R",
"optWeights.R"
)
rSN <- lapply(rSourceNames, source, verbose = FALSE)
## sourceCPP required C++ files
cppSourceNames <- c("fastKF.cpp", "fastKgipMat.cpp",
"log1pexp.cpp", "sigmoid.cpp")
cppSN <- lapply(cppSourceNames, sourceCpp, verbose = FALSE)
## convert to kernel
isKernel <- TRUE
if (isKernel) {
if (!isSymmetric(sd)) {
sd <- (sd + t(sd)) / 2
}
epsilon <- 0.1
while (!is.positive.semi.definite(sd)) {
sd <- sd + epsilon * diag(nrow(sd))
}
if (!isSymmetric(st)) {
st <- (st + t(st)) / 2
}
epsilon <- 0.1
while (!is.positive.semi.definite(st)) {
st <- st + epsilon * diag(nrow(st))
}
}
Y <- t(Y)
tmp <- sd
sd <- st
st <- tmp
Y[1:3, 1:3]
## do cross-validation
kfold <- 10
numSplit <- 5
## DT-Hybrid method
savedFolds <- doCVPositiveOnly3(Y, kfold = kfold, numSplit = numSplit)
## saving results
resMetrics <- matrix(NA, nrow = kfold, ncol = 1)
colnames(resMetrics) <- c("MPR")
resMetrics <- as.data.frame(resMetrics)
finalResult <- vector("list", length = numSplit)
## alpha and beta
resAB <- matrix(NA, nrow = kfold, ncol = 4)
colnames(resAB) <- c("optAlpha1", "optAlpha2", "optBeta1", "optBeta2")
resAB <- as.data.frame(resAB)
finalAB <- vector("list", length = numSplit)
## main loop
for (i in 1:numSplit) {
for (j in 1:kfold) {
cat("numSplit:", i, "/", numSplit, ";", "kfold:", j,
"/", kfold, "\n")
flush.console()
## training set with the test set links removed
Yfold <- savedFolds[[i]][[j]][[6]]
KgipD <- fastKgipMat(Yfold, 1)
KgipT <- fastKgipMat(t(Yfold), 1)
## extract test set
testSet <- savedFolds[[i]][[j]][[1]]
knownDrugIndex <- savedFolds[[i]][[j]][[4]]
knownTargetIndex <- savedFolds[[i]][[j]][[5]]
testIndexRow <- savedFolds[[i]][[j]][[2]]
testIndexCol <- savedFolds[[i]][[j]][[3]]
lmd <- 1
sgm <- 0.25
maxiter <- 20
## kronrlsMKL
MKL <- kronRlsMKL(
K1 = list(sd = sd, KgipD = KgipD),
K2 = list(st = st, KgipT = KgipT),
Yfold = Yfold,
lmd = lmd,
sgm = sgm,
maxiter = maxiter
)
Ypred <- MKL$Yhat
resAB[j, 1:2] <- MKL$alph
resAB[j, 3:4] <- MKL$bta
## result
result2 <- evalMetrics(Ypred = Ypred, testSet = testSet)
resMetrics[j, ] <- result2
}
finalResult[[i]] <- resMetrics
finalAB[[i]] <- resAB
}
# combine result
resCom <- as.data.frame(data.table::rbindlist(finalResult))
resMean <- colMeans(resCom)
se <- sqrt(var(resCom[, 1]) / length(resCom[, 1]))
cat("kronRLS-MKL:", "MPR =", round(resMean, 3), "+\\-", round(se, 3), "\n")
flush.console()
|
/KronRLSMKL/demo_kronRlsMKL.R
|
no_license
|
minghao2016/chemogenomicAlg4DTIpred
|
R
| false
| false
| 4,697
|
r
|
setwd("your\\dir")
rm(list = ls())
## current data set name
db <- "nr"
switch (db,
en = {
cat("en data\n")
flush.console()
sd <- read.table("e_simmat_dc.txt")
sd <- as.matrix(sd)
st <- read.table("e_simmat_dg.txt")
st <- as.matrix(st)
Y <- read.table("e_admat_dgc.txt")
Y <- as.matrix(Y)
Y <- t(Y)
},
ic = {
cat("ic data\n")
flush.console()
sd <- read.table("ic_simmat_dc.txt")
sd <- as.matrix(sd)
st <- read.table("ic_simmat_dg.txt")
st <- as.matrix(st)
Y <- read.table("ic_admat_dgc.txt")
Y <- as.matrix(Y)
Y <- t(Y)
},
gpcr = {
cat("gpcr data\n")
flush.console()
sd <- read.table("gpcr_simmat_dc.txt")
sd <- as.matrix(sd)
st <- read.table("gpcr_simmat_dg.txt")
st <- as.matrix(st)
Y <- read.table("gpcr_admat_dgc.txt")
Y <- as.matrix(Y)
Y <- t(Y)
},
nr = {
cat("nr data\n")
flush.console()
sd <- read.table("nr_simmat_dc.txt")
sd <- as.matrix(sd)
st <- read.table("nr_simmat_dg.txt")
st <- as.matrix(st)
Y <- read.table("nr_admat_dgc.txt")
Y <- as.matrix(Y)
Y <- t(Y)
},
stop("db should be one of the follows:
{en, ic, gpcr, nr}\n")
)
## load required packages
pkgs <- c("matrixcalc", "data.table", "Rcpp", "ROCR",
"Bolstad2", "MESS", "nloptr")
rPkgs <- lapply(pkgs, require, character.only = TRUE)
## source required R files
rSourceNames <- c(
"doCVPositiveOnly.R",
"doCVPositiveOnly3.R",
"evalMetrics.R",
"combineKernels.R",
"eigDecomp.R",
"kronRls.R",
"kronRlsC.R",
"kronRlsMKL.R",
"optWeights.R"
)
rSN <- lapply(rSourceNames, source, verbose = FALSE)
## sourceCPP required C++ files
cppSourceNames <- c("fastKF.cpp", "fastKgipMat.cpp",
"log1pexp.cpp", "sigmoid.cpp")
cppSN <- lapply(cppSourceNames, sourceCpp, verbose = FALSE)
## convert to kernel
isKernel <- TRUE
if (isKernel) {
if (!isSymmetric(sd)) {
sd <- (sd + t(sd)) / 2
}
epsilon <- 0.1
while (!is.positive.semi.definite(sd)) {
sd <- sd + epsilon * diag(nrow(sd))
}
if (!isSymmetric(st)) {
st <- (st + t(st)) / 2
}
epsilon <- 0.1
while (!is.positive.semi.definite(st)) {
st <- st + epsilon * diag(nrow(st))
}
}
Y <- t(Y)
tmp <- sd
sd <- st
st <- tmp
Y[1:3, 1:3]
## do cross-validation
kfold <- 10
numSplit <- 5
## DT-Hybrid method
savedFolds <- doCVPositiveOnly3(Y, kfold = kfold, numSplit = numSplit)
## saving results
resMetrics <- matrix(NA, nrow = kfold, ncol = 1)
colnames(resMetrics) <- c("MPR")
resMetrics <- as.data.frame(resMetrics)
finalResult <- vector("list", length = numSplit)
## alpha and beta
resAB <- matrix(NA, nrow = kfold, ncol = 4)
colnames(resAB) <- c("optAlpha1", "optAlpha2", "optBeta1", "optBeta2")
resAB <- as.data.frame(resAB)
finalAB <- vector("list", length = numSplit)
## main loop
for (i in 1:numSplit) {
for (j in 1:kfold) {
cat("numSplit:", i, "/", numSplit, ";", "kfold:", j,
"/", kfold, "\n")
flush.console()
## training set with the test set links removed
Yfold <- savedFolds[[i]][[j]][[6]]
KgipD <- fastKgipMat(Yfold, 1)
KgipT <- fastKgipMat(t(Yfold), 1)
## extract test set
testSet <- savedFolds[[i]][[j]][[1]]
knownDrugIndex <- savedFolds[[i]][[j]][[4]]
knownTargetIndex <- savedFolds[[i]][[j]][[5]]
testIndexRow <- savedFolds[[i]][[j]][[2]]
testIndexCol <- savedFolds[[i]][[j]][[3]]
lmd <- 1
sgm <- 0.25
maxiter <- 20
## kronrlsMKL
MKL <- kronRlsMKL(
K1 = list(sd = sd, KgipD = KgipD),
K2 = list(st = st, KgipT = KgipT),
Yfold = Yfold,
lmd = lmd,
sgm = sgm,
maxiter = maxiter
)
Ypred <- MKL$Yhat
resAB[j, 1:2] <- MKL$alph
resAB[j, 3:4] <- MKL$bta
## result
result2 <- evalMetrics(Ypred = Ypred, testSet = testSet)
resMetrics[j, ] <- result2
}
finalResult[[i]] <- resMetrics
finalAB[[i]] <- resAB
}
# combine result
resCom <- as.data.frame(data.table::rbindlist(finalResult))
resMean <- colMeans(resCom)
se <- sqrt(var(resCom[, 1]) / length(resCom[, 1]))
cat("kronRLS-MKL:", "MPR =", round(resMean, 3), "+\\-", round(se, 3), "\n")
flush.console()
|
library(plotly)
library(dplyr)
setwd('D:/Buren_files/MEGA/papersAle/Frank_etal_2016_rebuttal/IcelandCod')
## cod ----
icecod <- read.table('Cod_Iceland.txt', header = T, sep = ' ')
icecod17 <- read.table('Cod_Iceland_2017.txt', header = T, sep = ' ')
with(icecod, plot(year, SSB))
with(icecod17, plot(year, age4.biom))
ggplotly(ggplot(data = icecod, aes(x = year, y = SSB)) + geom_point())
ggplotly(
ggplot(data = filter(icecod, year > 1977 & year < 1999),
aes(x = year, y = SSB)) +
geom_point()
)
ggplotly(
ggplot(data = filter(icecod17, year > 1977 & year < 1999),
aes(x = year, y = age4.biom)) +
geom_point()
)
## capelin ----
# read data
abun <- read.csv('D:/Buren_files/MEGA/papersAle/Frank_etal_2016_rebuttal/IcelandCapelinStockBiomass/data/iceland_capelin_abundance.csv', header = T) %>%
mutate(abun = abundance * 1000000000)
meanw <- read.csv('D:/Buren_files/MEGA/papersAle/Frank_etal_2016_rebuttal/IcelandCapelinStockBiomass/data/iceland_capelin_meanweight.csv', header = T)
left_join(abun, meanw, by = c('year', 'age', 'maturity')) %>%
mutate(biomass = abun * meanweight * 1e-12) %>% # stock biomass in million tonnes
group_by(year) %>%
summarize(stockbiomass = sum(biomass, na.rm = T)) %>%
ggplot(aes(x = year, y = stockbiomass)) + geom_line() + ylab('stock biomass (million tonnes)')
icelandcap <- left_join(abun, meanw, by = c('year', 'age', 'maturity')) %>%
mutate(biomass = abun * meanweight * 1e-12) %>% # stock biomass in million tonnes
group_by(year) %>%
summarize(stockbiomass = sum(biomass, na.rm = T))
select(icecod17, year, age4.biom) %>%
left_join(icelandcap) %>%
mutate(ratiocapcod = (stockbiomass*1000000)/age4.biom) %>%
filter(year > 1977 & year < 1999) %>%
ggplot(aes(x = year, y = ratiocapcod)) + geom_line()
select(icecod, year, SSB) %>%
left_join(icelandcap) %>%
mutate(ratiocapcod = (stockbiomass*1000000)/SSB) %>%
filter(year > 1977 & year < 1999) %>%
ggplot(aes(x = year, y = ratiocapcod)) + geom_line()
## nl capelin and cod ----
nl <- read.csv('cod_capelin_trend.csv', header = T) %>%
mutate(ratiocapcod = (cap_bms/1000)/(cod_bms/1000000))
ggplot(data = nl, aes(x = year, y = ratiocapcod)) + geom_point()
|
/IcelandCod/Cod_Iceland_plot.r
|
no_license
|
adbpatagonia/Frank_etal_rebuttal
|
R
| false
| false
| 2,247
|
r
|
library(plotly)
library(dplyr)
setwd('D:/Buren_files/MEGA/papersAle/Frank_etal_2016_rebuttal/IcelandCod')
## cod ----
icecod <- read.table('Cod_Iceland.txt', header = T, sep = ' ')
icecod17 <- read.table('Cod_Iceland_2017.txt', header = T, sep = ' ')
with(icecod, plot(year, SSB))
with(icecod17, plot(year, age4.biom))
ggplotly(ggplot(data = icecod, aes(x = year, y = SSB)) + geom_point())
ggplotly(
ggplot(data = filter(icecod, year > 1977 & year < 1999),
aes(x = year, y = SSB)) +
geom_point()
)
ggplotly(
ggplot(data = filter(icecod17, year > 1977 & year < 1999),
aes(x = year, y = age4.biom)) +
geom_point()
)
## capelin ----
# read data
abun <- read.csv('D:/Buren_files/MEGA/papersAle/Frank_etal_2016_rebuttal/IcelandCapelinStockBiomass/data/iceland_capelin_abundance.csv', header = T) %>%
mutate(abun = abundance * 1000000000)
meanw <- read.csv('D:/Buren_files/MEGA/papersAle/Frank_etal_2016_rebuttal/IcelandCapelinStockBiomass/data/iceland_capelin_meanweight.csv', header = T)
left_join(abun, meanw, by = c('year', 'age', 'maturity')) %>%
mutate(biomass = abun * meanweight * 1e-12) %>% # stock biomass in million tonnes
group_by(year) %>%
summarize(stockbiomass = sum(biomass, na.rm = T)) %>%
ggplot(aes(x = year, y = stockbiomass)) + geom_line() + ylab('stock biomass (million tonnes)')
icelandcap <- left_join(abun, meanw, by = c('year', 'age', 'maturity')) %>%
mutate(biomass = abun * meanweight * 1e-12) %>% # stock biomass in million tonnes
group_by(year) %>%
summarize(stockbiomass = sum(biomass, na.rm = T))
select(icecod17, year, age4.biom) %>%
left_join(icelandcap) %>%
mutate(ratiocapcod = (stockbiomass*1000000)/age4.biom) %>%
filter(year > 1977 & year < 1999) %>%
ggplot(aes(x = year, y = ratiocapcod)) + geom_line()
select(icecod, year, SSB) %>%
left_join(icelandcap) %>%
mutate(ratiocapcod = (stockbiomass*1000000)/SSB) %>%
filter(year > 1977 & year < 1999) %>%
ggplot(aes(x = year, y = ratiocapcod)) + geom_line()
## nl capelin and cod ----
nl <- read.csv('cod_capelin_trend.csv', header = T) %>%
mutate(ratiocapcod = (cap_bms/1000)/(cod_bms/1000000))
ggplot(data = nl, aes(x = year, y = ratiocapcod)) + geom_point()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/scores_nbinom.R
\name{scores_nbinom}
\alias{scores_nbinom}
\alias{crps_nbinom}
\alias{logs_nbinom}
\title{Calculating scores for the negative binomial distribution}
\usage{
crps_nbinom(y, size, prob, mu)
logs_nbinom(y, size, prob, mu)
}
\arguments{
\item{y}{vector of observations.}
\item{size}{target for number of successful trials, or dispersion
parameter (the shape parameter of the gamma mixing distribution).
Must be strictly positive, need not be integer.}
\item{prob}{probability of success in each trial. \code{0 < prob <= 1}.}
\item{mu}{alternative parametrization via mean: see \sQuote{Details}.}
}
\value{
A vector of score values.
}
\description{
Calculating scores for the negative binomial distribution
}
\details{
The mean of the negative binomial distribution is given by \code{mu} = \code{size}*(1-\code{prob})/\code{prob}.
}
|
/man/scores_nbinom.Rd
|
no_license
|
thiyangt/scoringRules
|
R
| false
| true
| 934
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/scores_nbinom.R
\name{scores_nbinom}
\alias{scores_nbinom}
\alias{crps_nbinom}
\alias{logs_nbinom}
\title{Calculating scores for the negative binomial distribution}
\usage{
crps_nbinom(y, size, prob, mu)
logs_nbinom(y, size, prob, mu)
}
\arguments{
\item{y}{vector of observations.}
\item{size}{target for number of successful trials, or dispersion
parameter (the shape parameter of the gamma mixing distribution).
Must be strictly positive, need not be integer.}
\item{prob}{probability of success in each trial. \code{0 < prob <= 1}.}
\item{mu}{alternative parametrization via mean: see \sQuote{Details}.}
}
\value{
A vector of score values.
}
\description{
Calculating scores for the negative binomial distribution
}
\details{
The mean of the negative binomial distribution is given by \code{mu} = \code{size}*(1-\code{prob})/\code{prob}.
}
|
#' @title Negative log-likelihood for potentially constrained von Bertalanffy growth model (typically used internally).
#'
#' @description \code{vb_bind_nll} returns the negative log-likelihood for the von Bertalanffy model. Equality constraints across sexes can be implemented for any combination of parameters using the \code{binding} argument.
#' @param theta A parameter vector of the same length as the maximum of \code{binding}. Unconstrained parameters take the order: lnlinfF, lnlinfM, lnkF, lnkM, lnnt0F, lnnt0M, lnsigmaF, lnsigmaM.
#' @param binding A (4x2) parameter index matrix with rows named (in order): "lnlinf", "lnk", "lnnt0", "lnsigma" and the left column for the female parameter index and right column for male parameter index. Used to impose arbitrary equality constraints across the sexes (see Examples).
#' @param data data.frame with columns: "age", "length" and "weights". "weights" are set to 1 or 0 for known females or males, respectively; proportions otherwise.
#' @param distribution Character with options: "normal" or "lognormal"
#' @return Complete data negative log-likelihood:
#' @examples
#' ## Unconstrained model
#' binding <- matrix(c(1:8), ncol = 2, byrow = TRUE)
#' rownames(binding) <- c("lnlinf", "lnk", "lnnt0", "lnsigma")
#' colnames(binding) <- c("female", "male")
#' ## starting values
#' start.par <- c(rep(log(25), 2), rep(log(0.2), 2), rep(log(3), 2), rep(log(1), 2))
#' vb_bind_nll(theta = start.par, binding = binding,
#' data = data.frame(age = rep(1, 2), length = rep(10, 2), weights = c(1, 0)),
#' distribution = "normal")
#' @export
vb_bind_nll <- function(theta, binding, data, distribution) {
linfF <- exp(theta[binding["lnlinf", "female"]])
linfM <- exp(theta[binding["lnlinf", "male"]])
kF <- exp(theta[binding["lnk", "female"]])
kM <- exp(theta[binding["lnk", "male"]])
t0F <- -exp(theta[binding["lnnt0", "female"]])
t0M <- -exp(theta[binding["lnnt0", "male"]])
sigmaF <- exp(theta[binding["lnsigma", "female"]])
sigmaM <- exp(theta[binding["lnsigma", "male"]])
muF <- linfF * (1 - exp(-kF * (data$age - t0F)))
muM <- linfM * (1 - exp(-kM * (data$age - t0M)))
if(distribution == "normal"){
llF <- sum(data$weights * dnorm(data$length, mean = muF,
sd = sigmaF, log = TRUE))
llM <- sum((1 - data$weights) * dnorm(data$length, mean = muM,
sd = sigmaM, log = TRUE))
}
if(distribution == "lognormal"){
llF <- sum(data$weights * dlnorm(data$length, meanlog = log(muF) - (sigmaF^2) / 2,
sdlog = sigmaF, log = TRUE))
llM <- sum((1 - data$weights) * dlnorm(data$length, meanlog = log(muM) - (sigmaM^2) / 2,
sdlog = sigmaM, log = TRUE))
}
return(-(llF + llM))
}
|
/R/vb_bind_nll.R
|
no_license
|
mintoc/lhmixr
|
R
| false
| false
| 2,846
|
r
|
#' @title Negative log-likelihood for potentially constrained von Bertalanffy growth model (typically used internally).
#'
#' @description \code{vb_bind_nll} returns the negative log-likelihood for the von Bertalanffy model. Equality constraints across sexes can be implemented for any combination of parameters using the \code{binding} argument.
#' @param theta A parameter vector of the same length as the maximum of \code{binding}. Unconstrained parameters take the order: lnlinfF, lnlinfM, lnkF, lnkM, lnnt0F, lnnt0M, lnsigmaF, lnsigmaM.
#' @param binding A (4x2) parameter index matrix with rows named (in order): "lnlinf", "lnk", "lnnt0", "lnsigma" and the left column for the female parameter index and right column for male parameter index. Used to impose arbitrary equality constraints across the sexes (see Examples).
#' @param data data.frame with columns: "age", "length" and "weights". "weights" are set to 1 or 0 for known females or males, respectively; proportions otherwise.
#' @param distribution Character with options: "normal" or "lognormal"
#' @return Complete data negative log-likelihood:
#' @examples
#' ## Unconstrained model
#' binding <- matrix(c(1:8), ncol = 2, byrow = TRUE)
#' rownames(binding) <- c("lnlinf", "lnk", "lnnt0", "lnsigma")
#' colnames(binding) <- c("female", "male")
#' ## starting values
#' start.par <- c(rep(log(25), 2), rep(log(0.2), 2), rep(log(3), 2), rep(log(1), 2))
#' vb_bind_nll(theta = start.par, binding = binding,
#' data = data.frame(age = rep(1, 2), length = rep(10, 2), weights = c(1, 0)),
#' distribution = "normal")
#' @export
vb_bind_nll <- function(theta, binding, data, distribution) {
linfF <- exp(theta[binding["lnlinf", "female"]])
linfM <- exp(theta[binding["lnlinf", "male"]])
kF <- exp(theta[binding["lnk", "female"]])
kM <- exp(theta[binding["lnk", "male"]])
t0F <- -exp(theta[binding["lnnt0", "female"]])
t0M <- -exp(theta[binding["lnnt0", "male"]])
sigmaF <- exp(theta[binding["lnsigma", "female"]])
sigmaM <- exp(theta[binding["lnsigma", "male"]])
muF <- linfF * (1 - exp(-kF * (data$age - t0F)))
muM <- linfM * (1 - exp(-kM * (data$age - t0M)))
if(distribution == "normal"){
llF <- sum(data$weights * dnorm(data$length, mean = muF,
sd = sigmaF, log = TRUE))
llM <- sum((1 - data$weights) * dnorm(data$length, mean = muM,
sd = sigmaM, log = TRUE))
}
if(distribution == "lognormal"){
llF <- sum(data$weights * dlnorm(data$length, meanlog = log(muF) - (sigmaF^2) / 2,
sdlog = sigmaF, log = TRUE))
llM <- sum((1 - data$weights) * dlnorm(data$length, meanlog = log(muM) - (sigmaM^2) / 2,
sdlog = sigmaM, log = TRUE))
}
return(-(llF + llM))
}
|
library(testthat)
context("Tests for tracker")
tcxfile <- system.file("extdata", "2013-06-08-090442.TCX", package = "trackeR")
DataNonGarmin <- readContainer(tcxfile, cores = 2)
## Test trackeRdata object
test_that("class of object from readContainer is trackeRdata", {
expect_is(DataNonGarmin, "trackeRdata")
})
test_that("number of sessions in DataNonGarmin is 1", {
expect_equal(length(DataNonGarmin), 1)
})
trackeRdatanames <- c("latitude", "longitude", "altitude", "distance", "heart.rate", "speed", "cadence", "power", "pace")
test_that("the names of each element of an trackeRdata object are as in trackeRdatanames", {
expect_named(DataNonGarmin[[1]], trackeRdatanames)
})
test_that("class of each element of an trackeRdata object is of class zoo", {
expect_is(DataNonGarmin[[1]], "zoo")
})
## Smoother
DataNonGarmin_smoothed <- smoother(DataNonGarmin, width = 20, what = "speed", cores = 2)
test_that("class of object from smoother.trackeRdata is trackeRdata", {
expect_is(DataNonGarmin_smoothed, "trackeRdata")
})
test_that("only speed is smoothed in DataNonGarmin_smoothed (test only first session)", {
Data_smoothed <- DataNonGarmin_smoothed[[1]]
Data_original <- DataNonGarmin[[1]]
inds <- match("speed", names(Data_smoothed))
expect_equal(Data_smoothed[, -inds],
Data_original[index(Data_smoothed), -inds])
expect_false(isTRUE(all.equal(Data_smoothed[, inds], Data_original[index(Data_smoothed), inds])))
})
test_that("smoother returns error is the trackeRdata object is already smoothed", {
expect_error(smoother(DataNonGarmin_smoothed, cores = 2))
})
## Summary
DataNonGarmin_summary <- summary(DataNonGarmin)
test_that("standard keywords are produced from print.spdataSummary", {
expect_output(print(DataNonGarmin_summary), "Session")
expect_output(print(DataNonGarmin_summary), "Duration")
expect_output(print(DataNonGarmin_summary), "Distance")
expect_output(print(DataNonGarmin_summary), "speed")
expect_output(print(DataNonGarmin_summary), "pace")
expect_output(print(DataNonGarmin_summary), "time")
})
test_that("class of the object from summary.trackeRdata is trackeRdataSummary", {
expect_is(DataNonGarmin_summary, "trackeRdataSummary")
})
test_that("object from summary.trackeRdata also inherit from data.frame", {
expect_is(DataNonGarmin_summary, "data.frame")
})
|
/trackeR/tests/testthat/tests_testthat.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 2,399
|
r
|
library(testthat)
context("Tests for tracker")
tcxfile <- system.file("extdata", "2013-06-08-090442.TCX", package = "trackeR")
DataNonGarmin <- readContainer(tcxfile, cores = 2)
## Test trackeRdata object
test_that("class of object from readContainer is trackeRdata", {
expect_is(DataNonGarmin, "trackeRdata")
})
test_that("number of sessions in DataNonGarmin is 1", {
expect_equal(length(DataNonGarmin), 1)
})
trackeRdatanames <- c("latitude", "longitude", "altitude", "distance", "heart.rate", "speed", "cadence", "power", "pace")
test_that("the names of each element of an trackeRdata object are as in trackeRdatanames", {
expect_named(DataNonGarmin[[1]], trackeRdatanames)
})
test_that("class of each element of an trackeRdata object is of class zoo", {
expect_is(DataNonGarmin[[1]], "zoo")
})
## Smoother
DataNonGarmin_smoothed <- smoother(DataNonGarmin, width = 20, what = "speed", cores = 2)
test_that("class of object from smoother.trackeRdata is trackeRdata", {
expect_is(DataNonGarmin_smoothed, "trackeRdata")
})
test_that("only speed is smoothed in DataNonGarmin_smoothed (test only first session)", {
Data_smoothed <- DataNonGarmin_smoothed[[1]]
Data_original <- DataNonGarmin[[1]]
inds <- match("speed", names(Data_smoothed))
expect_equal(Data_smoothed[, -inds],
Data_original[index(Data_smoothed), -inds])
expect_false(isTRUE(all.equal(Data_smoothed[, inds], Data_original[index(Data_smoothed), inds])))
})
test_that("smoother returns error is the trackeRdata object is already smoothed", {
expect_error(smoother(DataNonGarmin_smoothed, cores = 2))
})
## Summary
DataNonGarmin_summary <- summary(DataNonGarmin)
test_that("standard keywords are produced from print.spdataSummary", {
expect_output(print(DataNonGarmin_summary), "Session")
expect_output(print(DataNonGarmin_summary), "Duration")
expect_output(print(DataNonGarmin_summary), "Distance")
expect_output(print(DataNonGarmin_summary), "speed")
expect_output(print(DataNonGarmin_summary), "pace")
expect_output(print(DataNonGarmin_summary), "time")
})
test_that("class of the object from summary.trackeRdata is trackeRdataSummary", {
expect_is(DataNonGarmin_summary, "trackeRdataSummary")
})
test_that("object from summary.trackeRdata also inherit from data.frame", {
expect_is(DataNonGarmin_summary, "data.frame")
})
|
# If each ln = l, we have pn = (1-l)^(n-1) * l, which is the geometric distribution
# on 1, 2, ... So, the geometric distribution corresponds to the case of a constant
# hazard rate.
n = 100000
l = 0.9
U <- runif(n)
f <- c()
j <- 1
Fj <- 1
while (sum(f) < n) {
f[j] <- length(which(U >= Fj * (1 - l) & U < Fj))
j <- j + 1
Fj <- Fj * (1 - l)
}
rbind(1:length(f), f / n)
# Now we take different hazard rates. For example, ln = 0.4, 0.5, 2/3, 2/3, ...
# then pn are 0.4, 0.3, 0.2, 0.1 * 2/3, 0.1 * 1/3 * 2/3, 0.1 * 1/3^2 * 2/3, ...
# Note that ln <= 2/3 for all n, and we can use the algorithm of 19 with l=2/3.
# In the algorithm, Y = k iff (1-l)^(k-1) >= U > (1-l)^k, so the probability of
# Y = k is l*(1-l)^(k-1), so Y has the geometric distribution with parameter k.
# What is going on here is that we increase the hazard rate at each step, and this
# allows as to generate a series of Bernoulli observations as the geometric random
# variable, and then if the hazard event does not happen at a certain stage at the
# increased hazard rate, we assume it does not happen at the lower rate, otherwise
# if the hazard event happens at the higher rate l, we assume that it happened for
# real with the probability ln / l. Therefore, the probability that X = k is equal
# to the probability that X >= k times l * ln / l = ln, exactly what we need.
X <- function () {
X <- 0
l <- 2/3
while (TRUE) {
X <- X + floor(log(runif(1)) / log(1 - l)) + 1
if (X <= 2) {
lX <- (c( 0.4, 0.5 ))[X]
}
else {
lX <- l
}
if (runif(1) < lX / l) {
break
}
}
X
}
s <- replicate(n, X())
prop.table(table(s))
|
/4.18-19.R
|
no_license
|
fengzenggithub/R-Simulation-by-Ross
|
R
| false
| false
| 1,654
|
r
|
# If each ln = l, we have pn = (1-l)^(n-1) * l, which is the geometric distribution
# on 1, 2, ... So, the geometric distribution corresponds to the case of a constant
# hazard rate.
n = 100000
l = 0.9
U <- runif(n)
f <- c()
j <- 1
Fj <- 1
while (sum(f) < n) {
f[j] <- length(which(U >= Fj * (1 - l) & U < Fj))
j <- j + 1
Fj <- Fj * (1 - l)
}
rbind(1:length(f), f / n)
# Now we take different hazard rates. For example, ln = 0.4, 0.5, 2/3, 2/3, ...
# then pn are 0.4, 0.3, 0.2, 0.1 * 2/3, 0.1 * 1/3 * 2/3, 0.1 * 1/3^2 * 2/3, ...
# Note that ln <= 2/3 for all n, and we can use the algorithm of 19 with l=2/3.
# In the algorithm, Y = k iff (1-l)^(k-1) >= U > (1-l)^k, so the probability of
# Y = k is l*(1-l)^(k-1), so Y has the geometric distribution with parameter k.
# What is going on here is that we increase the hazard rate at each step, and this
# allows as to generate a series of Bernoulli observations as the geometric random
# variable, and then if the hazard event does not happen at a certain stage at the
# increased hazard rate, we assume it does not happen at the lower rate, otherwise
# if the hazard event happens at the higher rate l, we assume that it happened for
# real with the probability ln / l. Therefore, the probability that X = k is equal
# to the probability that X >= k times l * ln / l = ln, exactly what we need.
X <- function () {
X <- 0
l <- 2/3
while (TRUE) {
X <- X + floor(log(runif(1)) / log(1 - l)) + 1
if (X <= 2) {
lX <- (c( 0.4, 0.5 ))[X]
}
else {
lX <- l
}
if (runif(1) < lX / l) {
break
}
}
X
}
s <- replicate(n, X())
prop.table(table(s))
|
ResultTable <- read.csv("wResult.csv", head=T);
summary(ResultTable)
ori <- ResultTable$AvgFromReviews
w1 <- ResultTable$wIDF
w2 <- ResultTable$wAvg
w3 <- ResultTable$wRM
w4 <- ResultTable$wBase
n <- length(ori)
RMSE1 <- sqrt(sum((ori-w1)^2)/n)
RMSE2 <- sqrt(sum((ori-w2)^2)/n)
RMSE3 <- sqrt(sum((ori-w3)^2)/n)
RMSE4 <- sqrt(sum((ori-w4)^2)/n)
|
/llh/计算商品和类别对应表/testResult.R
|
no_license
|
hustwcw/RecSys2013
|
R
| false
| false
| 347
|
r
|
ResultTable <- read.csv("wResult.csv", head=T);
summary(ResultTable)
ori <- ResultTable$AvgFromReviews
w1 <- ResultTable$wIDF
w2 <- ResultTable$wAvg
w3 <- ResultTable$wRM
w4 <- ResultTable$wBase
n <- length(ori)
RMSE1 <- sqrt(sum((ori-w1)^2)/n)
RMSE2 <- sqrt(sum((ori-w2)^2)/n)
RMSE3 <- sqrt(sum((ori-w3)^2)/n)
RMSE4 <- sqrt(sum((ori-w4)^2)/n)
|
## File:- cachematrix.R
## This file contains a pair of functions - 'makeCacheMatrix' &
## 'cacheSolve' - implemented for 'Caching the Inverse of a Matrix'.
## The function 'makeCacheMatrix' creates a special "matrix" object
## that can cache its inverse. The function 'cacheSolve' computes the
## inverse of the special "matrix" returned by 'makeCacheMatrix'.
## The function 'makeCacheMatrix' creates the special matrix object that can
## cache its inverse. It provides the following functionalities:
## 1. setMatrix:- Used to set new values to the already exisitng object
## 2. getMatrix:- Used to get the existing values in the object
## 3. setInverse:- To set the Inverse of the matrix in the object
## 4. getInverse:- To get the Inverse of the matrix from the object
makeCacheMatrix <- function(x = matrix()) {
inverseVal <- NULL # Initialize the inverse matrix as NULL
setMatrix <- function(y) # Function to set any value specified
{
x <<- y
inverseVal <<- NULL
}
getMatrix <- function() # To return the specified matrix
{
x
}
setInverse <- function( inv ) # To set the inverse value of the matrix
{
inverseVal <<- inv
}
getInverse <- function() # To return the cached inverse matrix
{
inverseVal
}
list( setMatrix = setMatrix, getMatrix = getMatrix,
setInverse = setInverse, getInverse = getInverse )
}
## The function cacheSolve computes the inverse of the special "matrix"
## returned by makeCacheMatrix. If the inverse has already been calculated
## (and the matrix has not changed), then the 'cachesolve' should retrieve
## the inverse from the cache.
## Return a matrix that is the inverse of 'x'
## NB: This function assumes that the matrix supplied is always invertible
cacheSolve <- function(x, ...) {
invMatrix <- x$getInverse()
# Check whether the inverse matrix of the input matrix is null or not
if( !is.null( invMatrix ))
{
# Inverse is not null. So, get the cached value
message( "Getting the cached value for inverse matrix" )
return( invMatrix )
}
# No cached value for the input matrix
# Calculate the inverse matrix
invMatrix <- solve( x$getMatrix())
# Set the calculated inverse matrix to the input data
x$setInverse( invMatrix )
# Return the inverse of the matrix
invMatrix
}
|
/cachematrix.R
|
no_license
|
Veena-S/ProgrammingAssignment2
|
R
| false
| false
| 2,463
|
r
|
## File:- cachematrix.R
## This file contains a pair of functions - 'makeCacheMatrix' &
## 'cacheSolve' - implemented for 'Caching the Inverse of a Matrix'.
## The function 'makeCacheMatrix' creates a special "matrix" object
## that can cache its inverse. The function 'cacheSolve' computes the
## inverse of the special "matrix" returned by 'makeCacheMatrix'.
## The function 'makeCacheMatrix' creates the special matrix object that can
## cache its inverse. It provides the following functionalities:
## 1. setMatrix:- Used to set new values to the already exisitng object
## 2. getMatrix:- Used to get the existing values in the object
## 3. setInverse:- To set the Inverse of the matrix in the object
## 4. getInverse:- To get the Inverse of the matrix from the object
makeCacheMatrix <- function(x = matrix()) {
inverseVal <- NULL # Initialize the inverse matrix as NULL
setMatrix <- function(y) # Function to set any value specified
{
x <<- y
inverseVal <<- NULL
}
getMatrix <- function() # To return the specified matrix
{
x
}
setInverse <- function( inv ) # To set the inverse value of the matrix
{
inverseVal <<- inv
}
getInverse <- function() # To return the cached inverse matrix
{
inverseVal
}
list( setMatrix = setMatrix, getMatrix = getMatrix,
setInverse = setInverse, getInverse = getInverse )
}
## The function cacheSolve computes the inverse of the special "matrix"
## returned by makeCacheMatrix. If the inverse has already been calculated
## (and the matrix has not changed), then the 'cachesolve' should retrieve
## the inverse from the cache.
## Return a matrix that is the inverse of 'x'
## NB: This function assumes that the matrix supplied is always invertible
cacheSolve <- function(x, ...) {
invMatrix <- x$getInverse()
# Check whether the inverse matrix of the input matrix is null or not
if( !is.null( invMatrix ))
{
# Inverse is not null. So, get the cached value
message( "Getting the cached value for inverse matrix" )
return( invMatrix )
}
# No cached value for the input matrix
# Calculate the inverse matrix
invMatrix <- solve( x$getMatrix())
# Set the calculated inverse matrix to the input data
x$setInverse( invMatrix )
# Return the inverse of the matrix
invMatrix
}
|
plotData <- read.table("household_power_consumption.txt", header=T, sep=";", na.strings="?")
## set time variable
finalData <- plotData[plotData$Date %in% c("1/2/2007","2/2/2007"),]
SetTime <-strptime(paste(finalData$Date, finalData$Time, sep=" "),"%d/%m/%Y %H:%M:%S")
finalData <- cbind(SetTime, finalData)
##
## Generating 4th Plot
labels <- c("Sub_metering_1","Sub_metering_2","Sub_metering_3")
columnlines <- c("black","red","blue")
par(mfrow=c(2,2))
plot(finalData$SetTime, finalData$Global_active_power, type="l", col="green", xlab="", ylab="Global Active Power")
plot(finalData$SetTime, finalData$Voltage, type="l", col="orange", xlab="datetime", ylab="Voltage")
plot(finalData$SetTime, finalData$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering")
lines(finalData$SetTime, finalData$Sub_metering_2, type="l", col="red")
lines(finalData$SetTime, finalData$Sub_metering_3, type="l", col="blue")
legend("topright", bty="n", legend=labels, lty=1, col=columnlines)
plot(finalData$SetTime, finalData$Global_reactive_power, type="l", col="blue", xlab="datetime", ylab="Global_reactive_power")
dev.copy(png, file = "Plot4.png")
dev.off()
|
/Plot4.R
|
no_license
|
fmrigueiro/Exploratory-Data-Analysis
|
R
| false
| false
| 1,148
|
r
|
plotData <- read.table("household_power_consumption.txt", header=T, sep=";", na.strings="?")
## set time variable
finalData <- plotData[plotData$Date %in% c("1/2/2007","2/2/2007"),]
SetTime <-strptime(paste(finalData$Date, finalData$Time, sep=" "),"%d/%m/%Y %H:%M:%S")
finalData <- cbind(SetTime, finalData)
##
## Generating 4th Plot
labels <- c("Sub_metering_1","Sub_metering_2","Sub_metering_3")
columnlines <- c("black","red","blue")
par(mfrow=c(2,2))
plot(finalData$SetTime, finalData$Global_active_power, type="l", col="green", xlab="", ylab="Global Active Power")
plot(finalData$SetTime, finalData$Voltage, type="l", col="orange", xlab="datetime", ylab="Voltage")
plot(finalData$SetTime, finalData$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering")
lines(finalData$SetTime, finalData$Sub_metering_2, type="l", col="red")
lines(finalData$SetTime, finalData$Sub_metering_3, type="l", col="blue")
legend("topright", bty="n", legend=labels, lty=1, col=columnlines)
plot(finalData$SetTime, finalData$Global_reactive_power, type="l", col="blue", xlab="datetime", ylab="Global_reactive_power")
dev.copy(png, file = "Plot4.png")
dev.off()
|
#' Sequential Optimal Design
#'
#' Given a set of georeferenced measurements, this function finds the `add.pts`
#' optimal locations for sampling.
#'
#' @param geodata A geodata object containing the initial sample
#' @param add.pts Number of points to be added to the initial sample
#' @param n Number of points in the sides of the candidate grid. If a vector
#' of length 1, both sides of the grid will have the same number of points. If
#' a vector of length 2, the first value indicates the number of points along
#' the x axis, and the second value indicates the number of points along the y
#' axis. Defaults to the square root of ten times the number of observations in
#' the geodata object
#' @param util Utility function to be used. Possibilities are `predvar`,
#' `extrprob` or `mixed`. See the Details section for further details
#' @param kcontrol Parameters for kriging as a krige.geoR object. See the help
#' for krige.control for further details
#' @param parallel Indicates if the code should run in parallel. Defaults to
#' TRUE
#' @param qtl Reference quantile for the extreme probability utility function.
#' Defaults to 0.75
#' @param p Weight of the predictive variance function for the calculation of
#' the mixed utility function. Defaults to 0.5
#' @param shape A SpatialPointsDataFrame object, read with function readOGR()
#' from rgdal, which contains the area of interest. The candidate grid will be
#' located inside the limits indicated by this shapefile. Defaults to NULL, and
#' in this case, uses the bounding box of the observed data to create the
#' candidate grid
#'
#' @return Coordinates of the new sampling locations
#'
#' @details
#' The value `predvar` for util refers to the reduction in predictive variance
#' utility function. `extrprob` refers to the utility function that favours the
#' locations that will have a higher probability of observing extreme values.
#' `mixed` refers to a mixed utility function which uses weight p for the
#' reduction in predictive variance utility function and weight 1-p for the
#' extreme observations utility function.
#'
#' @examples
#' library(geoR)
#'
#' add.pts <- 10 # Number of points to be added
#' n <- 10 # Number of points to define the candidate grid
#' N <- 15 # Number of points for the simulation (only for testing)
#' qtl <- 0.75
#'
#' # Model parameters: 20, 0.45, 1
#' set.seed(300)
#' simdata <- grf(N, cov.pars = c(20, 0.45), nug = 1)
#'
#' # Visualization of simulated data:
#' # points(simdata, cex.min = 1, cex.max = 2.5, col = gray(seq(1, 0, l = 4)))
#'
#' beta1 <- mean(simdata$data)
#' m1 <- as.matrix(simdata$coords)
#' emv <- ajemv(simdata, ini.phi = 0.4, plot = F,
#' ini.sigma2 = 10, pepita = 1, modelo = 'exponential')
#'
#' new.pts <- SOD(simdata, add.pts, n, util = 'predvar',
#' kcontrol = krige.control(type.krige = "SK",
#' trend.d = "cte",
#' nugget = emv$tausq,
#' beta = mean(simdata$data),
#' cov.pars = emv$cov.pars),
#' parallel = F)
#' new.pts.bayes <- SOD(simdata, add.pts, n, util = 'predvar',
#' kcontrol = prior.control(beta.prior = "flat",
#' sigmasq.prior = "reciprocal",
#' phi.prior="uniform",
#' phi.discrete=seq(0,2,l=20),
#' tausq.rel.prior = "uniform",
#' tausq.rel.discrete = seq(0, 1, l=20)),
#' parallel = F)
#'
#' # Old points and new points
#' par(mfrow = c(1, 2))
#' plot(simdata$coords, pch = 16, main = 'Classical kriging')
#' points(new.pts, pch = '+', col = 'red')
#' plot(simdata$coords, pch = 16, main = 'Bayesian kriging')
#' points(new.pts.bayes, pch = '+', col = 'red')
#' par(mfrow = c(1, 1))
#'
#' @importFrom snow makeCluster
#' @importFrom doSNOW registerDoSNOW
#' @importFrom geoR krige.conv
#' @importFrom geoR as.geodata
#' @importFrom foreach foreach
#' @importFrom foreach %dopar%
#' @importFrom sp bbox
#' @importFrom sp SpatialPoints
#' @importFrom sp proj4string
#' @importFrom spatstat as.owin
#' @importFrom spatstat gridcentres
#'
#'
#' @export
SOD <- function(geodata, add.pts, n = ceiling(sqrt(10*nrow(geodata$coords))),
util, kcontrol, parallel = T, qtl = 0.75, p = 0.5, shape = NULL) {
if (!("geodata" %in% class(geodata)))
stop("Expected first argument to be a geodata")
if (mode(add.pts) != "numeric" || add.pts <= 0)
stop("Invalid value for `add.pts`")
if (mode(n) != "numeric")
stop("Expected `n` to be numeric")
if (length(n) == 1) {
nx <- ny <- n
} else if (length(n) == 2) {
nx <- n[1]
ny <- n[2]
} else {
stop("Invalid length for n")
}
if (!(util %in% c('predvar', 'extrprob', 'mixed')))
stop("Invalid value for util")
if (!(class(kcontrol) %in% c("krige.geoR", "prior.geoR")))
stop("Expected `kcontrol` to be a `krige.geoR` or `prior.geoR` object")
if (typeof(parallel) != "logical")
stop("Expected `parallel` to be a logical value")
if (mode(qtl) != "numeric" || (qtl < 0 | qtl > 1))
stop("Expected `qtl` to be a value between 0 and 1")
if (mode(p) != "numeric" || (p < 0 | p > 1))
stop("Expected `p` to be a value between 0 and 1")
if (parallel) {
cl <- makeCluster(c("localhost", "localhost"), "SOCK")
registerDoSNOW(cl)
}
if (!is.null(shape)) {
if (typeof(shape) != "S4") #?
stop("Expected `shape` to be a SpatialPolygonsDataFrame value")
}
if (!is.null(shape)) { # Creates kriging/candidate grid based on the shape-
# file if it's provided
shape.window <- as.owin(shape) # Owin
initgrid <- gridcentres(shape.window, nx, ny) # List
initgridP <- SpatialPoints(initgrid) # SpatialPoints
proj4string(initgridP) <- proj4string(shape)
finalgrid <- initgridP[shape,] # SpatialPoints
cgrid <- kgrid <- finalgrid@coords # matrix
} else {
box <- bbox(geodata$coords)
kgrid <- cgrid <- expand.grid(seq(box[1,1], box[1,2], l = nx),
seq(box[2,1], box[2,2], l = ny))
}
is.bayes <- class(kcontrol) == "prior.geoR"
#botar um if aqui pra criar malha preditiva, dependendo de se há shapefile
#botar mensagens para quando está criando malha preditiva, pq demora
#botar 2 mensagens: criando malha, otimizando
cat("Optimizing...\n")
if (util == 'predvar') {
it.predvar.util <- list() # predictive variance utility f.
best.pt <- NULL
for (g in 1:(add.pts)) {
cat(sprintf("Iteration %d out of %d%s\n",
g, add.pts, if (g == add.pts) "! Yay!" else ""))
# "Original" kriging (to be compared with krigings at each point)
capture.output(
or.krig <- if (is.bayes) {
krige.bayes(geodata, locations = kgrid,
model = model.control(cov.model = "spherical"),
prior = kcontrol)
} else {
krige.conv(geodata, locations = kgrid,
krige = kcontrol)
}
)
# Checks for common points between existing coords and 'cgrid'
m1 <- as.matrix(geodata$coords) # TODO: Remove as.matrix(.) call
m2 <- as.matrix(cgrid)
ptscommon <- NULL # Coordinates of common points
cont <- 0 # How many common points exist
cont2 <- 0 #
ptnr <- NULL # Index of the common points
for (i in 1:nrow(m2)) {
cont2 <- cont2 + 1
for (j in 1:nrow(m1)) {
if (all(m1[j,] == m2[i,])) {
cont <- cont + 1 # how many common points exist
ptscommon <- rbind(ptscommon,m1[j,]) # coordinates of common points
# Pode apenas fazer m2[ptnr, ] no final para obter mesma matriz
ptnr <- c(ptnr,cont2) # (sequence) number of the common points
}
}
}
# Calculates decrease in predictive variance ("predvar")
names(cgrid) <- names(as.data.frame(geodata$coords))
geodata.list <- list()
values <- c(geodata$data,0)
df.list <- list()
krig.list <- list()
i <- 0
predvar.util <- NULL
if (cont == 0) { #if there are no points in common
cat("cont is = 0\n")
predvar.util <- foreach(i = 1:length(cgrid[,1]), .packages = 'geoR', .combine = "c") %dopar% {
df.list[[i]] <- rbind(geodata$coords,cgrid[i,])
geodata.list[[i]] <- as.geodata(data.frame(cbind(df.list[[i]],values)))
capture.output(
krig.list[[i]] <- if (is.bayes) {
krige.bayes(geodata.list[[i]], locations = kgrid,
model = model.control(cov.model = "spherical"),
prior = kcontrol)
} else {
krige.conv(geodata.list[[i]], locations = kgrid,
krige = kcontrol)
}
)
predvar.util <- if (is.bayes) {
mean(or.krig$predictive$variance - krig.list[[i]]$predictive$variance)
} else {
mean(or.krig$krige.var - krig.list[[i]]$krige.var)
}
df.list[[i]] <- 0
krig.list[[i]] <- 0
geodata.list[[i]] <- 0
predvar.util
}
cat("finished kriging\n")
}
if (cont > 0) { #if there are points in common
cat(sprintf("cont is = %2d\n", cont))
predvar.util <- foreach(i = 1:length(cgrid[,1]), .packages = 'geoR', .combine = "c") %dopar% {
if (!(i %in% ptnr)) { # if point 'i' is NOT one of the common points
df.list[[i]] <- rbind(geodata$coords,cgrid[i,])
geodata.list[[i]] <- as.geodata(data.frame(cbind(df.list[[i]],values)))
capture.output(
krig.list[[i]] <- if (is.bayes) {
krige.bayes(geodata.list[[i]], locations = kgrid,
model = model.control(cov.model = "spherical"),
prior = kcontrol)
} else {
krige.conv(geodata.list[[i]], locations = kgrid,
krige = kcontrol)
}
)
predvar.util <- if (is.bayes) {
mean(or.krig$predictive$variance - krig.list[[i]]$predictive$variance)
} else {
mean(or.krig$krige.var - krig.list[[i]]$krige.var)
}
df.list[[i]] <- 0
krig.list[[i]] <- 0
geodata.list[[i]] <- 0
predvar.util
}
}
cat("finished kriging\n")
}
# Erases utility function value if location is already in sample
if (cont > 0) {
for (i in 1:(cont)) {
predvar.util <- insert(predvar.util, 0, ptnr[i])
}
}
# Linear transformation in utility function ( R+ -> (0,1) )
coef1 <- range(predvar.util)[1]
coef2 <- range(predvar.util)[2]
co <- matrix(c(coef1, coef2, 1, 1), 2)
ld <- c(0, 1)
sol <- solve(co, ld)
a <- sol[1]
b <- sol[2]
tr.predvar.util <- a * predvar.util + b # transformed utility function
# Saves corrected utility function values for iteration "g"
it.predvar.util[[g]] <- tr.predvar.util
# Optimal sampling location for utility function #1
# TODO: Maximum should always equal to one, so just check == 1 (MAY NOT WORK)
best.pt <- c(best.pt, which(tr.predvar.util == max(tr.predvar.util)))
# Point coordinates and value
best.coord <- cgrid[best.pt,]
best.value <- if (is.bayes) {
or.krig$predictive$mean[best.pt]
} else {
or.krig$predict[best.pt]
}
# Merges data with optimal point
geodata$data <- c(geodata$data, best.value[g])
geodata$coords <- rbind(geodata$coords, best.coord[g,])
rownames(geodata$coords) <- NULL
geodata <- as.geodata(cbind(geodata$coords, geodata$data))
}
} # if util == 'predvar' ends here
if (util == 'extrprob') {
it.extrprob.util <- list() # extreme probabilities utility f
best.pt <- NULL
for (g in 1:(add.pts)) {
cat(sprintf("Iteration %d out of %d%s\n",
g, add.pts, if (g == add.pts) "! Yay!" else ""))
# "Original" kriging (to be compared with krigings at each point)
capture.output(
or.krig <- if (is.bayes) {
krige.bayes(geodata, locations = kgrid,
model = model.control(cov.model = "spherical"),
prior = kcontrol)
} else {
krige.conv(geodata, locations = kgrid,
krige = kcontrol)
}
)
# Checks for common points between 'coords' and 'cgrid'
m1 <- as.matrix(geodata$coords)
m2 <- as.matrix(cgrid)
ptscommon <- NULL
cont <- 0
cont2 <- 0
ptnr <- NULL
for (i in 1:length(m2[,1])) {
cont2 = cont2 + 1
for (j in 1:length(m1[,1])) {
if (all(m1[j,] == m2[i,])) {
cont <- cont + 1 # how many common points exist
ptscommon <- rbind(ptscommon,m1[j,]) # coordinates of common points
ptnr <- c(ptnr,cont2) # number of the points that are in common
}
}
}
# Utility function #2:
# Increase in the probability of observing extreme events ("extrprob")
# (reference quantile: "qtl")
extrprob.util <- NULL
tolerance <- quantile(geodata$data, qtl)
for (i in 1:(nx*ny)) {
extrprob.util[i] <- (1 - pnorm(tolerance, sd = sqrt(or.krig$krige.var[i]),
mean = or.krig$predict[i]))^2
# "probability of value in point i being greater than the tolerance"
}
# Erases utility function values if location is already in sample
if (cont != 0) {
extrprob.util[ptnr] <- 0
}
# Saves corrected utility function values for iteration "g"
it.extrprob.util[[g]] <- extrprob.util
# Optimal sampling location for utility function #2
best.pt <- c(best.pt, which(extrprob.util == max(extrprob.util)))
# Point coordinates and value
best.coord <- cgrid[best.pt,]
best.value <- if (is.bayes) {
or.krig$predictive$mean[best.pt]
} else {
or.krig$predict[best.pt]
}
colnames(best.coord) <- c("x", "y")
# Merges geodata with optimal point
geodata$data <- c(geodata$data, best.value[g])
geodata$coords <- rbind(geodata$coords,best.coord[g,])
rownames(geodata$coords) <- NULL
geodata <- as.geodata(cbind(geodata$coords,geodata$data))
}
} # if util == 'extrprob' ends here
if (util == 'mixed') {
it.extrprob.util <- list()
it.predvar.util <- list()
it.mixed.util <- list() # mixed utility f.
best.pt <- NULL
for (g in 1:(add.pts)) {
cat(sprintf("Iteration %d out of %d%s\n",
g, add.pts, if (g == add.pts) "! Yay!" else ""))
# "Original" kriging (to be compared with krigings at each point)
capture.output(
or.krig <- if (is.bayes) {
krige.bayes(geodata, locations = kgrid,
model = model.control(cov.model = "spherical"),
prior = kcontrol)
} else {
krige.conv(geodata, locations = kgrid,
krige = kcontrol)
}
)
# Checks for common points between existing coords and 'cgrid'
m1 <- as.matrix(geodata$coords)
m2 <- as.matrix(cgrid)
ptscommon <- NULL
cont <- 0
cont2 <- 0
ptnr <- NULL
for (i in 1:length(m2[,1])) {
cont2 = cont2 + 1
for (j in 1:length(m1[,1])) {
if (all(m1[j,] == m2[i,])) {
cont <- cont + 1 # how many common points exist
ptscommon <- rbind(ptscommon,m1[j,]) # coordinates of common points
ptnr <- c(ptnr,cont2) # (sequence) number of the common points
}
}
}
# Calculates decrease in predictive variance ("predvar")
names(cgrid) <- names(as.data.frame(geodata$coords))
geodata.list <- list()
values <- c(geodata$data,0)
df.list <- list()
krig.list <- list()
i <- 0
predvar.util <- NULL
if (cont == 0) { #if there are no points in common
predvar.util <- foreach(i = 1:length(cgrid[,1]), .packages = 'geoR', .combine = "c") %dopar% {
df.list[[i]] <- rbind(geodata$coords,cgrid[i,])
geodata.list[[i]] <- as.geodata(data.frame(cbind(df.list[[i]],values)))
capture.output(
krig.list[[i]] <- if (is.bayes) {
krige.bayes(geodata.list[[i]], locations = kgrid,
model = model.control(cov.model = "spherical"),
prior = kcontrol)
} else {
krige.conv(geodata.list[[i]], locations = kgrid,
krige = kcontrol)
}
)
predvar.util <- if (is.bayes) {
mean(or.krig$predictive$variance - krig.list[[i]]$predictive$variance)
} else {
mean(or.krig$krige.var - krig.list[[i]]$krige.var)
}
df.list[[i]] <- 0
krig.list[[i]] <- 0
geodata.list[[i]] <- 0
predvar.util
}
}
if (cont > 0) { #if there are points in common
predvar.util <- foreach(i = 1:length(cgrid[,1]), .packages = 'geoR', .combine = "c") %dopar% {
if (!(i %in% ptnr)) { # if point 'i' is NOT one of the common points
df.list[[i]] <- rbind(geodata$coords, cgrid[i,])
geodata.list[[i]] <- as.geodata(data.frame(cbind(df.list[[i]], values)))
capture.output(
krig.list[[i]] <- if (is.bayes) {
krige.bayes(geodata.list[[i]], locations = kgrid,
model = model.control(cov.model = "spherical"),
prior = kcontrol)
} else {
krige.conv(geodata.list[[i]], locations = kgrid,
krige = kcontrol)
}
)
predvar.util <- if (is.bayes) {
mean(or.krig$predictive$variance - krig.list[[i]]$predictive$variance)
} else {
mean(or.krig$krige.var - krig.list[[i]]$krige.var)
}
df.list[[i]] <- 0
krig.list[[i]] <- 0
geodata.list[[i]] <- 0
predvar.util
}
}
}
# Erases utility function value if location is already in sample
if (cont > 0) {
for (i in 1:(cont)) {
predvar.util <- insert(predvar.util, 0, ptnr[i])
}
}
# Linear transformation in utility function ( R+ -> (0,1) )
coef1 <- range(predvar.util)[1]
coef2 <- range(predvar.util)[2]
co <- matrix(c(coef1, coef2, 1, 1), 2)
ld <- c(0, 1)
sol <- solve(co,ld)
a <- sol[1]
b <- sol[2]
tr.predvar.util <- a * predvar.util + b # transformed utility function
# Utility function #2:
# Increase in the probability of observing extreme events ("extrprob")
# (reference quantile: "qtl")
extrprob.util <- NULL
tolerance <- quantile(geodata$data, qtl)
for (i in 1:(nx * ny)) {
extrprob.util[i] <- (1 - pnorm(tolerance, sd = sqrt(or.krig$krige.var[i]),
mean = or.krig$predict[i]))^2
# "probability of value in point i being greater than the tolerance"
}
# Erases utility function values if location is already in sample
if (cont != 0) {
extrprob.util[ptnr] <- 0
}
# Utility function #3:
# Mixed utility function
# p (weight of predvar.util) defaults to 0.5
mixed.util <- p * tr.predvar.util + (1 - p) * extrprob.util
it.mixed.util[[g]] <- mixed.util
# Saves (corrected) utility function values for iteration "g"
it.predvar.util[[g]] <- tr.predvar.util
it.extrprob.util[[g]] <- extrprob.util
it.mixed.util[[g]] <- extrprob.util
# Optimal sampling location for utility function #3
best.pt <- c(best.pt, which(mixed.util == max(mixed.util)))
# Point coordinates and value
best.coord <- cgrid[best.pt,]
best.value <- if (is.bayes) {
or.krig$predictive$mean[best.pt]
} else {
or.krig$predict[best.pt]
}
# Merges data with optimal point
geodata$data <- c(geodata$data, best.value[g])
geodata$coords <- rbind(geodata$coords, best.coord[g,])
rownames(geodata$coords) <- NULL
geodata <- as.geodata(cbind(geodata$coords, geodata$data))
}
} # if util == 'mixed' ends here
util.evolution <- NULL # Utility function evolution through iterations
# (plot images or make line plot with mean at each iteration)
if (util == 'predvar') util.evolution <- it.predvar.util else
if (util == 'extrprob') util.evolution <- it.extrprob.util else
util.evolution <- it.mixed.util
if (parallel) stopCluster(cl)
nc <- nrow(geodata$coords)
geodata$coords[(nc - add.pts + 1):nc,]
}
|
/R/sod.R
|
no_license
|
GS-Ferreira/geodesign
|
R
| false
| false
| 24,338
|
r
|
#' Sequential Optimal Design
#'
#' Given a set of georeferenced measurements, this function finds the `add.pts`
#' optimal locations for sampling.
#'
#' @param geodata A geodata object containing the initial sample
#' @param add.pts Number of points to be added to the initial sample
#' @param n Number of points in the sides of the candidate grid. If a vector
#' of length 1, both sides of the grid will have the same number of points. If
#' a vector of length 2, the first value indicates the number of points along
#' the x axis, and the second value indicates the number of points along the y
#' axis. Defaults to the square root of ten times the number of observations in
#' the geodata object
#' @param util Utility function to be used. Possibilities are `predvar`,
#' `extrprob` or `mixed`. See the Details section for further details
#' @param kcontrol Parameters for kriging as a krige.geoR object. See the help
#' for krige.control for further details
#' @param parallel Indicates if the code should run in parallel. Defaults to
#' TRUE
#' @param qtl Reference quantile for the extreme probability utility function.
#' Defaults to 0.75
#' @param p Weight of the predictive variance function for the calculation of
#' the mixed utility function. Defaults to 0.5
#' @param shape A SpatialPointsDataFrame object, read with function readOGR()
#' from rgdal, which contains the area of interest. The candidate grid will be
#' located inside the limits indicated by this shapefile. Defaults to NULL, and
#' in this case, uses the bounding box of the observed data to create the
#' candidate grid
#'
#' @return Coordinates of the new sampling locations
#'
#' @details
#' The value `predvar` for util refers to the reduction in predictive variance
#' utility function. `extrprob` refers to the utility function that favours the
#' locations that will have a higher probability of observing extreme values.
#' `mixed` refers to a mixed utility function which uses weight p for the
#' reduction in predictive variance utility function and weight 1-p for the
#' extreme observations utility function.
#'
#' @examples
#' library(geoR)
#'
#' add.pts <- 10 # Number of points to be added
#' n <- 10 # Number of points to define the candidate grid
#' N <- 15 # Number of points for the simulation (only for testing)
#' qtl <- 0.75
#'
#' # Model parameters: 20, 0.45, 1
#' set.seed(300)
#' simdata <- grf(N, cov.pars = c(20, 0.45), nug = 1)
#'
#' # Visualization of simulated data:
#' # points(simdata, cex.min = 1, cex.max = 2.5, col = gray(seq(1, 0, l = 4)))
#'
#' beta1 <- mean(simdata$data)
#' m1 <- as.matrix(simdata$coords)
#' emv <- ajemv(simdata, ini.phi = 0.4, plot = F,
#' ini.sigma2 = 10, pepita = 1, modelo = 'exponential')
#'
#' new.pts <- SOD(simdata, add.pts, n, util = 'predvar',
#' kcontrol = krige.control(type.krige = "SK",
#' trend.d = "cte",
#' nugget = emv$tausq,
#' beta = mean(simdata$data),
#' cov.pars = emv$cov.pars),
#' parallel = F)
#' new.pts.bayes <- SOD(simdata, add.pts, n, util = 'predvar',
#' kcontrol = prior.control(beta.prior = "flat",
#' sigmasq.prior = "reciprocal",
#' phi.prior="uniform",
#' phi.discrete=seq(0,2,l=20),
#' tausq.rel.prior = "uniform",
#' tausq.rel.discrete = seq(0, 1, l=20)),
#' parallel = F)
#'
#' # Old points and new points
#' par(mfrow = c(1, 2))
#' plot(simdata$coords, pch = 16, main = 'Classical kriging')
#' points(new.pts, pch = '+', col = 'red')
#' plot(simdata$coords, pch = 16, main = 'Bayesian kriging')
#' points(new.pts.bayes, pch = '+', col = 'red')
#' par(mfrow = c(1, 1))
#'
#' @importFrom snow makeCluster
#' @importFrom doSNOW registerDoSNOW
#' @importFrom geoR krige.conv
#' @importFrom geoR as.geodata
#' @importFrom foreach foreach
#' @importFrom foreach %dopar%
#' @importFrom sp bbox
#' @importFrom sp SpatialPoints
#' @importFrom sp proj4string
#' @importFrom spatstat as.owin
#' @importFrom spatstat gridcentres
#'
#'
#' @export
SOD <- function(geodata, add.pts, n = ceiling(sqrt(10*nrow(geodata$coords))),
util, kcontrol, parallel = T, qtl = 0.75, p = 0.5, shape = NULL) {
if (!("geodata" %in% class(geodata)))
stop("Expected first argument to be a geodata")
if (mode(add.pts) != "numeric" || add.pts <= 0)
stop("Invalid value for `add.pts`")
if (mode(n) != "numeric")
stop("Expected `n` to be numeric")
if (length(n) == 1) {
nx <- ny <- n
} else if (length(n) == 2) {
nx <- n[1]
ny <- n[2]
} else {
stop("Invalid length for n")
}
if (!(util %in% c('predvar', 'extrprob', 'mixed')))
stop("Invalid value for util")
if (!(class(kcontrol) %in% c("krige.geoR", "prior.geoR")))
stop("Expected `kcontrol` to be a `krige.geoR` or `prior.geoR` object")
if (typeof(parallel) != "logical")
stop("Expected `parallel` to be a logical value")
if (mode(qtl) != "numeric" || (qtl < 0 | qtl > 1))
stop("Expected `qtl` to be a value between 0 and 1")
if (mode(p) != "numeric" || (p < 0 | p > 1))
stop("Expected `p` to be a value between 0 and 1")
if (parallel) {
cl <- makeCluster(c("localhost", "localhost"), "SOCK")
registerDoSNOW(cl)
}
if (!is.null(shape)) {
if (typeof(shape) != "S4") #?
stop("Expected `shape` to be a SpatialPolygonsDataFrame value")
}
if (!is.null(shape)) { # Creates kriging/candidate grid based on the shape-
# file if it's provided
shape.window <- as.owin(shape) # Owin
initgrid <- gridcentres(shape.window, nx, ny) # List
initgridP <- SpatialPoints(initgrid) # SpatialPoints
proj4string(initgridP) <- proj4string(shape)
finalgrid <- initgridP[shape,] # SpatialPoints
cgrid <- kgrid <- finalgrid@coords # matrix
} else {
box <- bbox(geodata$coords)
kgrid <- cgrid <- expand.grid(seq(box[1,1], box[1,2], l = nx),
seq(box[2,1], box[2,2], l = ny))
}
is.bayes <- class(kcontrol) == "prior.geoR"
#botar um if aqui pra criar malha preditiva, dependendo de se há shapefile
#botar mensagens para quando está criando malha preditiva, pq demora
#botar 2 mensagens: criando malha, otimizando
cat("Optimizing...\n")
if (util == 'predvar') {
it.predvar.util <- list() # predictive variance utility f.
best.pt <- NULL
for (g in 1:(add.pts)) {
cat(sprintf("Iteration %d out of %d%s\n",
g, add.pts, if (g == add.pts) "! Yay!" else ""))
# "Original" kriging (to be compared with krigings at each point)
capture.output(
or.krig <- if (is.bayes) {
krige.bayes(geodata, locations = kgrid,
model = model.control(cov.model = "spherical"),
prior = kcontrol)
} else {
krige.conv(geodata, locations = kgrid,
krige = kcontrol)
}
)
# Checks for common points between existing coords and 'cgrid'
m1 <- as.matrix(geodata$coords) # TODO: Remove as.matrix(.) call
m2 <- as.matrix(cgrid)
ptscommon <- NULL # Coordinates of common points
cont <- 0 # How many common points exist
cont2 <- 0 #
ptnr <- NULL # Index of the common points
for (i in 1:nrow(m2)) {
cont2 <- cont2 + 1
for (j in 1:nrow(m1)) {
if (all(m1[j,] == m2[i,])) {
cont <- cont + 1 # how many common points exist
ptscommon <- rbind(ptscommon,m1[j,]) # coordinates of common points
# Pode apenas fazer m2[ptnr, ] no final para obter mesma matriz
ptnr <- c(ptnr,cont2) # (sequence) number of the common points
}
}
}
# Calculates decrease in predictive variance ("predvar")
names(cgrid) <- names(as.data.frame(geodata$coords))
geodata.list <- list()
values <- c(geodata$data,0)
df.list <- list()
krig.list <- list()
i <- 0
predvar.util <- NULL
if (cont == 0) { #if there are no points in common
cat("cont is = 0\n")
predvar.util <- foreach(i = 1:length(cgrid[,1]), .packages = 'geoR', .combine = "c") %dopar% {
df.list[[i]] <- rbind(geodata$coords,cgrid[i,])
geodata.list[[i]] <- as.geodata(data.frame(cbind(df.list[[i]],values)))
capture.output(
krig.list[[i]] <- if (is.bayes) {
krige.bayes(geodata.list[[i]], locations = kgrid,
model = model.control(cov.model = "spherical"),
prior = kcontrol)
} else {
krige.conv(geodata.list[[i]], locations = kgrid,
krige = kcontrol)
}
)
predvar.util <- if (is.bayes) {
mean(or.krig$predictive$variance - krig.list[[i]]$predictive$variance)
} else {
mean(or.krig$krige.var - krig.list[[i]]$krige.var)
}
df.list[[i]] <- 0
krig.list[[i]] <- 0
geodata.list[[i]] <- 0
predvar.util
}
cat("finished kriging\n")
}
if (cont > 0) { #if there are points in common
cat(sprintf("cont is = %2d\n", cont))
predvar.util <- foreach(i = 1:length(cgrid[,1]), .packages = 'geoR', .combine = "c") %dopar% {
if (!(i %in% ptnr)) { # if point 'i' is NOT one of the common points
df.list[[i]] <- rbind(geodata$coords,cgrid[i,])
geodata.list[[i]] <- as.geodata(data.frame(cbind(df.list[[i]],values)))
capture.output(
krig.list[[i]] <- if (is.bayes) {
krige.bayes(geodata.list[[i]], locations = kgrid,
model = model.control(cov.model = "spherical"),
prior = kcontrol)
} else {
krige.conv(geodata.list[[i]], locations = kgrid,
krige = kcontrol)
}
)
predvar.util <- if (is.bayes) {
mean(or.krig$predictive$variance - krig.list[[i]]$predictive$variance)
} else {
mean(or.krig$krige.var - krig.list[[i]]$krige.var)
}
df.list[[i]] <- 0
krig.list[[i]] <- 0
geodata.list[[i]] <- 0
predvar.util
}
}
cat("finished kriging\n")
}
# Erases utility function value if location is already in sample
if (cont > 0) {
for (i in 1:(cont)) {
predvar.util <- insert(predvar.util, 0, ptnr[i])
}
}
# Linear transformation in utility function ( R+ -> (0,1) )
coef1 <- range(predvar.util)[1]
coef2 <- range(predvar.util)[2]
co <- matrix(c(coef1, coef2, 1, 1), 2)
ld <- c(0, 1)
sol <- solve(co, ld)
a <- sol[1]
b <- sol[2]
tr.predvar.util <- a * predvar.util + b # transformed utility function
# Saves corrected utility function values for iteration "g"
it.predvar.util[[g]] <- tr.predvar.util
# Optimal sampling location for utility function #1
# TODO: Maximum should always equal to one, so just check == 1 (MAY NOT WORK)
best.pt <- c(best.pt, which(tr.predvar.util == max(tr.predvar.util)))
# Point coordinates and value
best.coord <- cgrid[best.pt,]
best.value <- if (is.bayes) {
or.krig$predictive$mean[best.pt]
} else {
or.krig$predict[best.pt]
}
# Merges data with optimal point
geodata$data <- c(geodata$data, best.value[g])
geodata$coords <- rbind(geodata$coords, best.coord[g,])
rownames(geodata$coords) <- NULL
geodata <- as.geodata(cbind(geodata$coords, geodata$data))
}
} # if util == 'predvar' ends here
if (util == 'extrprob') {
it.extrprob.util <- list() # extreme probabilities utility f
best.pt <- NULL
for (g in 1:(add.pts)) {
cat(sprintf("Iteration %d out of %d%s\n",
g, add.pts, if (g == add.pts) "! Yay!" else ""))
# "Original" kriging (to be compared with krigings at each point)
capture.output(
or.krig <- if (is.bayes) {
krige.bayes(geodata, locations = kgrid,
model = model.control(cov.model = "spherical"),
prior = kcontrol)
} else {
krige.conv(geodata, locations = kgrid,
krige = kcontrol)
}
)
# Checks for common points between 'coords' and 'cgrid'
m1 <- as.matrix(geodata$coords)
m2 <- as.matrix(cgrid)
ptscommon <- NULL
cont <- 0
cont2 <- 0
ptnr <- NULL
for (i in 1:length(m2[,1])) {
cont2 = cont2 + 1
for (j in 1:length(m1[,1])) {
if (all(m1[j,] == m2[i,])) {
cont <- cont + 1 # how many common points exist
ptscommon <- rbind(ptscommon,m1[j,]) # coordinates of common points
ptnr <- c(ptnr,cont2) # number of the points that are in common
}
}
}
# Utility function #2:
# Increase in the probability of observing extreme events ("extrprob")
# (reference quantile: "qtl")
extrprob.util <- NULL
tolerance <- quantile(geodata$data, qtl)
for (i in 1:(nx*ny)) {
extrprob.util[i] <- (1 - pnorm(tolerance, sd = sqrt(or.krig$krige.var[i]),
mean = or.krig$predict[i]))^2
# "probability of value in point i being greater than the tolerance"
}
# Erases utility function values if location is already in sample
if (cont != 0) {
extrprob.util[ptnr] <- 0
}
# Saves corrected utility function values for iteration "g"
it.extrprob.util[[g]] <- extrprob.util
# Optimal sampling location for utility function #2
best.pt <- c(best.pt, which(extrprob.util == max(extrprob.util)))
# Point coordinates and value
best.coord <- cgrid[best.pt,]
best.value <- if (is.bayes) {
or.krig$predictive$mean[best.pt]
} else {
or.krig$predict[best.pt]
}
colnames(best.coord) <- c("x", "y")
# Merges geodata with optimal point
geodata$data <- c(geodata$data, best.value[g])
geodata$coords <- rbind(geodata$coords,best.coord[g,])
rownames(geodata$coords) <- NULL
geodata <- as.geodata(cbind(geodata$coords,geodata$data))
}
} # if util == 'extrprob' ends here
if (util == 'mixed') {
it.extrprob.util <- list()
it.predvar.util <- list()
it.mixed.util <- list() # mixed utility f.
best.pt <- NULL
for (g in 1:(add.pts)) {
cat(sprintf("Iteration %d out of %d%s\n",
g, add.pts, if (g == add.pts) "! Yay!" else ""))
# "Original" kriging (to be compared with krigings at each point)
capture.output(
or.krig <- if (is.bayes) {
krige.bayes(geodata, locations = kgrid,
model = model.control(cov.model = "spherical"),
prior = kcontrol)
} else {
krige.conv(geodata, locations = kgrid,
krige = kcontrol)
}
)
# Checks for common points between existing coords and 'cgrid'
m1 <- as.matrix(geodata$coords)
m2 <- as.matrix(cgrid)
ptscommon <- NULL
cont <- 0
cont2 <- 0
ptnr <- NULL
for (i in 1:length(m2[,1])) {
cont2 = cont2 + 1
for (j in 1:length(m1[,1])) {
if (all(m1[j,] == m2[i,])) {
cont <- cont + 1 # how many common points exist
ptscommon <- rbind(ptscommon,m1[j,]) # coordinates of common points
ptnr <- c(ptnr,cont2) # (sequence) number of the common points
}
}
}
# Calculates decrease in predictive variance ("predvar")
names(cgrid) <- names(as.data.frame(geodata$coords))
geodata.list <- list()
values <- c(geodata$data,0)
df.list <- list()
krig.list <- list()
i <- 0
predvar.util <- NULL
if (cont == 0) { #if there are no points in common
predvar.util <- foreach(i = 1:length(cgrid[,1]), .packages = 'geoR', .combine = "c") %dopar% {
df.list[[i]] <- rbind(geodata$coords,cgrid[i,])
geodata.list[[i]] <- as.geodata(data.frame(cbind(df.list[[i]],values)))
capture.output(
krig.list[[i]] <- if (is.bayes) {
krige.bayes(geodata.list[[i]], locations = kgrid,
model = model.control(cov.model = "spherical"),
prior = kcontrol)
} else {
krige.conv(geodata.list[[i]], locations = kgrid,
krige = kcontrol)
}
)
predvar.util <- if (is.bayes) {
mean(or.krig$predictive$variance - krig.list[[i]]$predictive$variance)
} else {
mean(or.krig$krige.var - krig.list[[i]]$krige.var)
}
df.list[[i]] <- 0
krig.list[[i]] <- 0
geodata.list[[i]] <- 0
predvar.util
}
}
if (cont > 0) { #if there are points in common
predvar.util <- foreach(i = 1:length(cgrid[,1]), .packages = 'geoR', .combine = "c") %dopar% {
if (!(i %in% ptnr)) { # if point 'i' is NOT one of the common points
df.list[[i]] <- rbind(geodata$coords, cgrid[i,])
geodata.list[[i]] <- as.geodata(data.frame(cbind(df.list[[i]], values)))
capture.output(
krig.list[[i]] <- if (is.bayes) {
krige.bayes(geodata.list[[i]], locations = kgrid,
model = model.control(cov.model = "spherical"),
prior = kcontrol)
} else {
krige.conv(geodata.list[[i]], locations = kgrid,
krige = kcontrol)
}
)
predvar.util <- if (is.bayes) {
mean(or.krig$predictive$variance - krig.list[[i]]$predictive$variance)
} else {
mean(or.krig$krige.var - krig.list[[i]]$krige.var)
}
df.list[[i]] <- 0
krig.list[[i]] <- 0
geodata.list[[i]] <- 0
predvar.util
}
}
}
# Erases utility function value if location is already in sample
if (cont > 0) {
for (i in 1:(cont)) {
predvar.util <- insert(predvar.util, 0, ptnr[i])
}
}
# Linear transformation in utility function ( R+ -> (0,1) )
coef1 <- range(predvar.util)[1]
coef2 <- range(predvar.util)[2]
co <- matrix(c(coef1, coef2, 1, 1), 2)
ld <- c(0, 1)
sol <- solve(co,ld)
a <- sol[1]
b <- sol[2]
tr.predvar.util <- a * predvar.util + b # transformed utility function
# Utility function #2:
# Increase in the probability of observing extreme events ("extrprob")
# (reference quantile: "qtl")
extrprob.util <- NULL
tolerance <- quantile(geodata$data, qtl)
for (i in 1:(nx * ny)) {
extrprob.util[i] <- (1 - pnorm(tolerance, sd = sqrt(or.krig$krige.var[i]),
mean = or.krig$predict[i]))^2
# "probability of value in point i being greater than the tolerance"
}
# Erases utility function values if location is already in sample
if (cont != 0) {
extrprob.util[ptnr] <- 0
}
# Utility function #3:
# Mixed utility function
# p (weight of predvar.util) defaults to 0.5
mixed.util <- p * tr.predvar.util + (1 - p) * extrprob.util
it.mixed.util[[g]] <- mixed.util
# Saves (corrected) utility function values for iteration "g"
it.predvar.util[[g]] <- tr.predvar.util
it.extrprob.util[[g]] <- extrprob.util
it.mixed.util[[g]] <- extrprob.util
# Optimal sampling location for utility function #3
best.pt <- c(best.pt, which(mixed.util == max(mixed.util)))
# Point coordinates and value
best.coord <- cgrid[best.pt,]
best.value <- if (is.bayes) {
or.krig$predictive$mean[best.pt]
} else {
or.krig$predict[best.pt]
}
# Merges data with optimal point
geodata$data <- c(geodata$data, best.value[g])
geodata$coords <- rbind(geodata$coords, best.coord[g,])
rownames(geodata$coords) <- NULL
geodata <- as.geodata(cbind(geodata$coords, geodata$data))
}
} # if util == 'mixed' ends here
util.evolution <- NULL # Utility function evolution through iterations
# (plot images or make line plot with mean at each iteration)
if (util == 'predvar') util.evolution <- it.predvar.util else
if (util == 'extrprob') util.evolution <- it.extrprob.util else
util.evolution <- it.mixed.util
if (parallel) stopCluster(cl)
nc <- nrow(geodata$coords)
geodata$coords[(nc - add.pts + 1):nc,]
}
|
library(lubridate)
library(mailR)
library(shiny)
library(RMySQL)
library(dplyr)
library(plyr)
library(DT)
library(twitteR)
library(shinythemes)
Free_agent <- read.csv("FA.csv")
team <- read.csv("team.csv")
team$Team <- as.character(team$Team)
setup_twitter_oauth(consumer_key, consumer_secret, access_token, access_secret)
ui <- shinyUI(
fluidPage(
shinythemes::themeSelector(),
theme = shinytheme("superhero"),
{
sidebarLayout(
sidebarPanel(
uiOutput("uiLogin"),
#Part 1 Drop down menu to choose FA
selectInput("FA_players", "Choose a FA:", choices = Free_agent$Name),
#Part 3 Action button
p("Note: After clicking the submit button, allow 5-10 seconds for the message confirming successful submission of contract offer. Buffering time may differ based on condition of your internet connectivity"),
br(),
actionButton("choose","Submit Your Contract Offer to the Selected FA"),
br(),
p("You can set the value to $0 in the contract years that you don't want to commit to. e.g. If you want to give 1-year contract, set 2017 salary but set everything else to 0. (Everything is defaulted to zero for you. Please check all the parameters before submitting contracts"),
br(),
p("Note: Raises in salary from one year to the next are not allowed to be higher than 100%. Decreases in salary from one year to the next are not allowed to be lower than -50% "),
#Action button for checking point values
actionButton("check","Check points"),
p("Note about 'check points' button: Fill out all contract info below to figure out your points. Continue to play around with the parameters until you outbid the current highest points."),
#Part 5
selectInput("club_option", "Yes or no to club option:",
choices = c("YES","NO"),selected="NO"),
selectInput("vest_option", "Yes or no to vesting option:",
choices = c("YES","NO"),selected="NO"),
sliderInput("n15", "Guaranteed Year:",
min = 1, max = 10, value = 1, step = 1),
uiOutput("tickers"),
textInput("n16", "Signing Bonus. (Avoid entering $ sign)",
value = 0),
selectInput("n17", "Contract Type:",choices = c("Major League Contract","Minor League Contract")),
wellPanel(
downloadButton('downloadData', 'Download contract details of signed FAs'),
downloadButton('downloadData2','Download bidding history for your team.')
),
tags$head(
tags$style(HTML("
@import url('//fonts.googleapis.com/css?family=Lobster|Cabin:400,700');
h1 {
font-family: 'Lobster',cursive;
font-weight: bold;
line-height: 1.1;
color: #000000;
}
"))
),
tags$head(
tags$style(HTML("
@import url('//fonts.googleapis.com/css?family=Lobster|Cabin:400,700');
h3 {
font-family: 'Open Sans';
font-weight: bold;
line-height: 1.1;
color: #0013F7;
}
"))
),
tags$head(tags$style("#timer{color: blue;
font-size: 20px;
font-style: italic;
}"
)
),
tags$head(tags$style("#values{color: red;
font-size: 20px;
font-style: bold;
}"
)
)
),
mainPanel(
h1("World Series of Fantasy Baseball Free Agency"),
tabsetPanel(type = "tabs",
tabPanel("main",h3("Login Status."),
verbatimTextOutput("pass"),
h3("Timer"),
verbatimTextOutput("timer"),
h3("Point checker"),
verbatimTextOutput("points"),
h3("Update"),
verbatimTextOutput("values"),h3("10 Most Recent Bidding (Most recent bid at the top. Time in ET)"),
tableOutput("recent")
),
tabPanel("Announcements",h3("Announcements"),
verbatimTextOutput("announce")),
tabPanel("Summary", h3("Who Signed Where?"),
tableOutput("signed")),
tabPanel("History", h3("Bidding history of FA in target"),
tableOutput("table")),
tabPanel("Progress", h3("Your Team Bidding Progress (BETA Version)"),
tableOutput("sort_by_team")),
tabPanel("all result", h3("Highest bidding point for each of 2016 FA. (Time in ET)"),
dataTableOutput("all_results"))
)
)
)
})
)
server <- shinyServer(function(input, output, session){
USER <- reactiveValues(Logged = FALSE)
TEAM <- reactiveValues(name = NA)
output$uiLogin <- renderUI({
if (USER$Logged == FALSE) {
wellPanel(
textInput("Username", "Username: (Case sensitive)"),
textInput("Password", "Password:"),
br(),
actionButton("Login", "Log in")
)
}
})
output$pass <- eventReactive(input$Login,{
con <- dbConnect(drv = MySQL(),user="userid",password="password",dbname="db_name",host="host_address")
PASSWORD <- dbReadTable(con,"Password",row.names=NULL)
dbDisconnect(con)
Username <- isolate(input$Username)
Password <- isolate(input$Password)
Id.username <- which(PASSWORD$user == Username)
Id.password <- which(PASSWORD$password == Password)
if (length(Id.username) > 0 & length(Id.password) > 0) {
if (Id.username == Id.password) {
USER$Logged <- TRUE
TEAM <- reactiveValues(name = PASSWORD$team[which(PASSWORD$user == input$Username)])
"Log in successful. (Exit the browser to logoff)"
}
} else {
"User name or password failed!!!"
}
})
output$tickers <- renderUI({
num_year <- as.integer(input$n15)
lapply(1:num_year, function(i) {
list(textInput(paste0("n",i), label = paste0("Year", i+2016," (Avoid entering $ sign)"), value = 0))
})
})
output$timer <- renderText({
if (USER$Logged == TRUE){
con <- dbConnect(drv = MySQL(),user="userid",password="password",dbname="db_name",host="host_address")
tbl <- dbReadTable(con,"FA_TABLE",row.names=NULL)
clock <- dbReadTable(con,"clock2",row.names=NULL)
dbDisconnect(con)
ineligible <- as.character(clock$Player[clock$clockend %in% "YES"])
tbl <- tbl[tbl$Player %in% input$FA_players,]
tbl <- tbl[order(tbl$Bid_Num, decreasing = TRUE),]
tbl <- tbl[1,]
if(tbl$Player[1] %in% ineligible)
{
print_it <- paste0("Time is up on ",tbl$Player[1])
}
if((tbl$Bid_Num[1] == 1) & (!tbl$Player[1] %in% ineligible))
{
tbl$Start_Time[1] <- "Clock hasn't started"
tbl$End_Time[1] <- "Clock hasn't started"
tbl$Time_left[1] <- "Clock hasn't started"
print_it <- tbl$Time_left[1]
}
if((tbl$Bid_Num[1] > 1) & (!tbl$Player[1] %in% ineligible))
{
end_time <- as.POSIXct(tbl$End_Time[1])
start_time <- as.POSIXct(tbl$Start_Time[1])
time_diff <- (as.numeric(as.POSIXct(end_time),units="sec") - as.numeric(as.POSIXct(start_time),units="sec")) - (as.numeric(as.POSIXct(now(tzone="EST")),units="sec") - as.numeric(as.POSIXct(start_time),units="sec")) + 18000
hour <- time_diff %/% 3600
min <- time_diff %% 3600
second <- min %% 60
min <- min %/% 60
second <- floor(second)
print_it <- paste0(hour," hours ",min," mins ",second," seconds to go")
invalidateLater(1000, session)
print_it
}
print_it
}
})
# PART5
sliderValues <- eventReactive(input$choose,{
con <- dbConnect(drv = MySQL(),user="userid",password="password",dbname="db_name",host="host_address")
clock <- dbReadTable(con,"tb_name",row.names=NULL)
dbDisconnect(con)
ineligible <- as.character(clock$Player[clock$clockend %in% "YES"])
finished <- "NO"
if(input$FA_players %in% ineligible)
{
finished <- "YES"
word <- paste0(input$FA_players," already signed FA contract.")
}
if(input$Username == "Tony")
{
word <- "You are not authorized to sign any FA"
word
}
if ((USER$Logged == TRUE) & (input$Username != "Tony") & (finished == "NO")){
con <- dbConnect(drv = MySQL(),user="userid",password="password",dbname="db_name",host="host_address")
tbl <- dbReadTable(con,"tb_name",row.names=NULL)
years <- input$n15
illegal_minor <- FALSE
ifelse((length(input$n1) > 0) & (years %in% c(1:10)),assign(paste0("c",1),input$n1),assign(paste0("c",1),0))
ifelse((length(input$n2) > 0) & (years %in% c(2:10)),assign(paste0("c",2),input$n2),assign(paste0("c",2),0))
ifelse((length(input$n3) > 0) & (years %in% c(3:10)),assign(paste0("c",3),input$n3),assign(paste0("c",3),0))
ifelse((length(input$n4) > 0) & (years %in% c(4:10)),assign(paste0("c",4),input$n4),assign(paste0("c",4),0))
ifelse((length(input$n5) > 0) & (years %in% c(5:10)),assign(paste0("c",5),input$n5),assign(paste0("c",5),0))
ifelse((length(input$n6) > 0) & (years %in% c(6:10)),assign(paste0("c",6),input$n6),assign(paste0("c",6),0))
ifelse((length(input$n7) > 0) & (years %in% c(7:10)),assign(paste0("c",7),input$n7),assign(paste0("c",7),0))
ifelse((length(input$n8) > 0) & (years %in% c(8:10)),assign(paste0("c",8),input$n8),assign(paste0("c",8),0))
ifelse((length(input$n9) > 0) & (years %in% c(9:10)),assign(paste0("c",9),input$n9),assign(paste0("c",9),0))
ifelse((length(input$n10) > 0) & (years %in% c(10)),assign(paste0("c",10),input$n10),assign(paste0("c",10),0))
ifelse((length(input$n16) > 0),assign(paste0("c",16),input$n16),assign(paste0("c",16),0))
ifelse((exists("c1") == TRUE) & (years %in% c(1:10)), c1 <- as.numeric(gsub(",", "", c1)), c1 <- 0)
ifelse((exists("c2") == TRUE) & (years %in% c(2:10)), c2 <- as.numeric(gsub(",", "", c2)), c2 <- 0)
ifelse((exists("c3") == TRUE) & (years %in% c(3:10)), c3 <- as.numeric(gsub(",", "", c3)), c3 <- 0)
ifelse((exists("c4") == TRUE) & (years %in% c(4:10)), c4 <- as.numeric(gsub(",", "", c4)), c4 <- 0)
ifelse((exists("c5") == TRUE) & (years %in% c(5:10)), c5 <- as.numeric(gsub(",", "", c5)), c5 <- 0)
ifelse((exists("c6") == TRUE) & (years %in% c(6:10)), c6 <- as.numeric(gsub(",", "", c6)), c6 <- 0)
ifelse((exists("c7") == TRUE) & (years %in% c(7:10)), c7 <- as.numeric(gsub(",", "", c7)), c7 <- 0)
ifelse((exists("c8") == TRUE) & (years %in% c(8:10)), c8 <- as.numeric(gsub(",", "", c8)), c8 <- 0)
ifelse((exists("c9") == TRUE) & (years %in% c(9:10)), c9 <- as.numeric(gsub(",", "", c9)), c9 <- 0)
ifelse((exists("c10") == TRUE) & (years %in% c(10)), c10 <- as.numeric(gsub(",", "", c10)), c10 <- 0)
ifelse((exists("c16") == TRUE), c16 <- as.numeric(gsub(",", "", c16)), c16 <- 0)
ifelse((exists("c1") == TRUE), c1 <- as.numeric(gsub("$", "", c1)), c1 <- 0)
ifelse((exists("c2") == TRUE), c2 <- as.numeric(gsub("$", "", c2)), c2 <- 0)
ifelse((exists("c3") == TRUE), c3 <- as.numeric(gsub("$", "", c3)), c3 <- 0)
ifelse((exists("c4") == TRUE), c4 <- as.numeric(gsub("$", "", c4)), c4 <- 0)
ifelse((exists("c5") == TRUE), c5 <- as.numeric(gsub("$", "", c5)), c5 <- 0)
ifelse((exists("c6") == TRUE), c6 <- as.numeric(gsub("$", "", c6)), c6 <- 0)
ifelse((exists("c7") == TRUE), c7 <- as.numeric(gsub("$", "", c7)), c7 <- 0)
ifelse((exists("c8") == TRUE), c8 <- as.numeric(gsub("$", "", c8)), c8 <- 0)
ifelse((exists("c9") == TRUE), c9 <- as.numeric(gsub("$", "", c9)), c9 <- 0)
ifelse((exists("c10") == TRUE), c10 <- as.numeric(gsub("$", "", c10)), c10 <- 0)
ifelse((exists("c16") == TRUE), c16 <- as.numeric(gsub("$", "", c16)), c16 <- 0)
ifelse((c1 > 0) & (c1 < 535000), c1 <- as.numeric(535000),c1 <- c1)
ifelse((c2 > 0) & (c2 < 535000), c2 <- as.numeric(535000),c2 <- c2)
ifelse((c3 > 0) & (c3 < 535000), c3 <- as.numeric(535000),c3 <- c3)
ifelse((c4 > 0) & (c4 < 535000), c4 <- as.numeric(535000),c4 <- c4)
ifelse((c5 > 0) & (c5 < 535000), c5 <- as.numeric(535000),c5 <- c5)
ifelse((c6 > 0) & (c6 < 535000), c6 <- as.numeric(535000),c6 <- c6)
ifelse((c7 > 0) & (c7 < 535000), c7 <- as.numeric(535000),c7 <- c7)
ifelse((c8 > 0) & (c8 < 535000), c8 <- as.numeric(535000),c8 <- c8)
ifelse((c9 > 0) & (c9 < 535000), c9 <- as.numeric(535000),c9 <- c9)
ifelse((c10 > 0) & (c10 < 535000), c10 <- as.numeric(535000),c10 <- c10)
if(input$club_option %in% "YES")
{
option_money <- 0
option_buy_out <- 0
years <- input$n15
all_year <- as.numeric(c1) + as.numeric(c2) + as.numeric(c3) + as.numeric(c4) + as.numeric(c5) + as.numeric(c6) + as.numeric(c7) + as.numeric(c8) + as.numeric(c9) + as.numeric(c10)
option_money <- as.numeric(round_any(as.numeric(all_year) / as.numeric(years) * 1.25,1000))
option_buy_out <- round_any(as.numeric((option_money * 0.1)),100000)
}
if(!input$club_option %in% "YES")
{
option_money <- 0
option_money <- as.numeric(option_money)
option_buy_out <- 0
option_buy_out <- as.numeric(option_buy_out)
}
if(input$vest_option %in% "YES")
{
vest_money <- 0
vest_buy_out <- 0
years <- input$n15
all_year_vest <- as.numeric(c1) + as.numeric(c2) + as.numeric(c3) + as.numeric(c4) + as.numeric(c5) + as.numeric(c6) + as.numeric(c7) + as.numeric(c8) + as.numeric(c9) + as.numeric(c10)
vest_money <- as.numeric(round_any(as.numeric(all_year_vest) / as.numeric(years) * 1.25,1000))
vest_buy_out <- round_any(as.numeric((vest_money * 0.1)),100000)
}
if(!input$vest_option %in% "YES")
{
vest_money <- 0
vest_money <- as.numeric(vest_money)
vest_buy_out <- 0
vest_buy_out <- as.numeric(vest_buy_out)
}
years <- input$n15
total <- as.numeric(c1) + as.numeric(c2) + as.numeric(c3) + as.numeric(c4) + as.numeric(c5) + as.numeric(c6) + as.numeric(c7) + as.numeric(c8) + as.numeric(c9) + as.numeric(c10) + as.numeric(option_buy_out) + as.numeric(vest_buy_out) + as.numeric(c16)
AAV <- as.numeric(total) / as.numeric(years)
points <- as.numeric(round_any((as.numeric(total) + (as.numeric(AAV) * 3) + (as.numeric(c1) * 1) - (as.numeric(option_buy_out) * 1) + (as.numeric(vest_buy_out) * 1)) / (1000000) - (as.numeric(years) * 1.5),1))
points <- as.numeric(points)
tbl4 <- tbl[tbl$Player == input$FA_players,]
max_point_player <- max(tbl4$Points)
if(points > max_point_player)
{
success <- TRUE
}
if(points < max_point_player)
{
success <- FALSE
}
if(points == max_point_player)
{
success <- FALSE
}
year <- c("c1","c2","c3","c4","c5","c6","c7","c8","c9","c10")
existing <- vector()
ifelse(c1 >= 535000, existing[1] <- TRUE, existing[1] <- FALSE)
ifelse(c2 >= 535000, existing[2] <- TRUE, existing[2] <- FALSE)
ifelse(c3 >= 535000, existing[3] <- TRUE, existing[3] <- FALSE)
ifelse(c4 >= 535000, existing[4] <- TRUE, existing[4] <- FALSE)
ifelse(c5 >= 535000, existing[5] <- TRUE, existing[5] <- FALSE)
ifelse(c6 >= 535000, existing[6] <- TRUE, existing[6] <- FALSE)
ifelse(c7 >= 535000, existing[7] <- TRUE, existing[7] <- FALSE)
ifelse(c8 >= 535000, existing[8] <- TRUE, existing[8] <- FALSE)
ifelse(c9 >= 535000, existing[9] <- TRUE, existing[9] <- FALSE)
ifelse(c10 >= 535000, existing[10] <- TRUE, existing[10] <- FALSE)
if((length(which(existing == TRUE)) > 1) & (input$n17 == "Minor League Contract"))
{
success <- FALSE
illegal_minor <- TRUE
}
if((success == TRUE) & (input$Username != "Tony") & (illegal_minor == FALSE)){
tbl <- tbl[(tbl$Player == input$FA_players),]
difference <- 0
tbl <- tbl[tbl$Player %in% input$FA_players,]
tbl <- tbl[order(tbl$Bid_Num, decreasing = TRUE),]
tbl <- tbl[1,]
tbl$Club <- as.character(tbl$Club)
#tbl$Club <- as.character(tbl$Club)
# Initial end and start time
end_time <- ymd_hms(tbl$End_Time[1],tz="EST")
start_time <- ymd_hms(tbl$Start_Time[1],tz="EST")
# Time at the bidding
bid_time <- now(tzone = "EST")
# Time difference between at the time of bidding to the deadline
if(tbl$Bid_Num[1] == 1)
{
difference <- 86400 * 10
}
if(tbl$Bid_Num[1] > 1)
{
difference <- as.numeric(end_time - start_time,units="secs") - as.numeric(bid_time - start_time,units="secs")
}
# Max time difference possible. (240 hrs)
max <- (240*3600)
# Time added at the first bidding
if((tbl$Bid_Num[1]+1) == 2)
{
X2nd_bid <- 86400 * 10
difference <- difference + X2nd_bid
}
if((tbl$Bid_Num[1]+1) == 3)
{
X3rd_bid <- 86400 * 5
difference <- difference + X3rd_bid
}
if((tbl$Bid_Num[1]+1) == 4)
{
X4th_bid <- 86400 * 2
difference <- difference + X4th_bid
}
if((tbl$Bid_Num[1]+1) == 5)
{
X5th_bid <- 86400 * 1
difference <- difference + X5th_bid
}
if((tbl$Bid_Num[1]+1) >= 6)
{
X6th_bid <- 86400 * 0.5
difference <- difference + X6th_bid
}
# If "difference" is larger than max, difference equals max
if(difference >= max)
{
difference <- max
end_time <- bid_time + difference
start_time <- bid_time
}
# If "difference" is less than max, difference equals difference
if(difference < max)
{
difference <- difference
end_time <- bid_time + difference
start_time <- bid_time
}
tbl55 <- dbReadTable(con,"tb_name",row.names=NULL)
test <- data.frame(matrix("NA",nrow=1,ncol=27),stringsAsFactors = FALSE)
colnames(test) <- c("row_names","Player","Club","Year_Guaranteed","summary","Signing_Bonus","X2017","X2018","X2019","X2020",
"X2021","X2022","X2023","X2024","X2025","X2026","Club_Option","Buyout_1","Vesting",
"Buyout2","Points","AAV","Total","Bid_Num","Start_Time","End_Time","Contract_Status")
test$row_names[1] <- NA
test$Player[1] <- input$FA_players
test$Club[1] <- tbl55$team[as.character(tbl55$user) %in% as.character(input$Username)]
years <- input$n15
test$Year_Guaranteed[1] <- years
test$Signing_Bonus[1] <- c16
test$Points[1] <- points
ifelse((total %in% c(1,0) & (tbl$Bid_Num[1] %in% c(1))),summary <- paste0("$0M for ",as.numeric(years),"yr(s)"),summary <- paste0("$",round((total) / 1000000,digits=2),"M for ",years,"yr(s)"))
test[1,] <- as.character(c(NA,input$FA_players,as.character(tbl55$team[tbl55$user %in% input$Username]),years,summary,c16,c1,
c2,c3,c4,c5,c6,c7,c8,c9,c10,option_money,option_buy_out,vest_money,vest_buy_out,
points,AAV,total,tbl$Bid_Num[tbl$Player %in% input$FA_players] + 1,strftime(start_time,"%Y-%m-%d %H:%M:%S",tz="EST"),strftime(end_time,"%Y-%m-%d %H:%M:%S",tz="EST"),input$n17))
test <- test[1,]
email <- read.csv("email.csv")
email$email <- as.character(email$email)
email$Team <- as.character(email$Team)
check <- tbl$Points[!is.na(tbl$Points)]
if(all(points > check))
{
max_points <- max(tbl$Points,na.rm=TRUE)
teams_to_send_email <- tbl$Club[which(tbl$Points == max_points)]
teams_to_send_email <- unique(teams_to_send_email)
for(jack in 1:length(teams_to_send_email))
{
recipients <- email$email[email$X %in% teams_to_send_email[jack]]
namer <- unique(email$Team[email$X %in% teams_to_send_email[jack]])
body <- paste("Hello ",namer,", You have been outbidded by an unknown team for: ",input$FA_players,". ",
"An unknown club bidded ",test$Points[1], " points for the lead. Challenge that bid by following this link: https://wsfbdraft.shinyapps.io/free_agents/",
" Thank you.
Best,
James Ryu",sep="")
send.mail(from = "email_address",
to = recipients,
subject = paste("You have been outbidded by somebody for the following FA: ",input$FA_players,sep=""),
body = body,
smtp = list(host.name = "smtp.gmail.com", port = 587, user.name = "username", passwd = "password", tls = TRUE),
authenticate = TRUE,
send = TRUE)
}
}
dbWriteTable(conn=con,"FA_TABLE",test,append=T)
dbWriteTable(conn=con,"FA_TABLE_backup",test,append=T)
}
dbDisconnect(con)
if(success == TRUE)
{
word <- paste("Submitted contracts to ",unlist(strsplit(input$FA_players," "))[2]," ",sub(",","",unlist(strsplit(input$FA_players," "))[1]),sep="")
}
if(success == FALSE)
{
word <- paste0("Your bid to ", input$FA_players, " at ",points," is not higher than ",max_point_player,". Try again.")
}
if((success == FALSE) & (illegal_minor == TRUE))
{
word <- "You entered illegal minor contract. You can't give multi-year minor league contract"
}
word
}
word
})
# Show the values using an HTML table
output$values <- renderPrint({
if (USER$Logged == TRUE){
sliderValues()
}
})
output$table <- renderTable({
if (USER$Logged == TRUE){
con <- dbConnect(drv = MySQL(),user="userid",password="password",dbname="data",host="host")
tbl <- dbReadTable(con,"FA_TABLE",row.names=NULL)
dbDisconnect(con)
tbl3 <- tbl[tbl$Player %in% input$FA_players,]
tbl3 <- tbl3[tbl3$Club != "NONE",]
tbl3 <- tbl3[,c("Player","Year_Guaranteed","Points","Start_Time","Contract_Status","summary")]
tbl3 <- tbl3[order(tbl3$Points,decreasing=FALSE),]
tbl3
}
})
sliderValues2 <- eventReactive(input$check,{
if (USER$Logged == TRUE){
option_money <- 0
option_buy_out <- 0
vest_money <- 0
vest_buy_out <- 0
years <- input$n15
ifelse((length(input$n1) > 0) & (years %in% c(1:10)),assign(paste0("c",1),input$n1),assign(paste0("c",1),0))
ifelse((length(input$n2) > 0) & (years %in% c(2:10)),assign(paste0("c",2),input$n2),assign(paste0("c",2),0))
ifelse((length(input$n3) > 0) & (years %in% c(3:10)),assign(paste0("c",3),input$n3),assign(paste0("c",3),0))
ifelse((length(input$n4) > 0) & (years %in% c(4:10)),assign(paste0("c",4),input$n4),assign(paste0("c",4),0))
ifelse((length(input$n5) > 0) & (years %in% c(5:10)),assign(paste0("c",5),input$n5),assign(paste0("c",5),0))
ifelse((length(input$n6) > 0) & (years %in% c(6:10)),assign(paste0("c",6),input$n6),assign(paste0("c",6),0))
ifelse((length(input$n7) > 0) & (years %in% c(7:10)),assign(paste0("c",7),input$n7),assign(paste0("c",7),0))
ifelse((length(input$n8) > 0) & (years %in% c(8:10)),assign(paste0("c",8),input$n8),assign(paste0("c",8),0))
ifelse((length(input$n9) > 0) & (years %in% c(9:10)),assign(paste0("c",9),input$n9),assign(paste0("c",9),0))
ifelse((length(input$n10) > 0) & (years %in% c(10)),assign(paste0("c",10),input$n10),assign(paste0("c",10),0))
ifelse((length(input$n16) > 0),assign(paste0("c",16),input$n16),assign(paste0("c",16),0))
years <- input$n15
ifelse((exists("c1") == TRUE) & (years %in% c(1:10)), c1 <- as.numeric(gsub(",", "", c1)), c1 <- 0)
ifelse((exists("c2") == TRUE) & (years %in% c(2:10)), c2 <- as.numeric(gsub(",", "", c2)), c2 <- 0)
ifelse((exists("c3") == TRUE) & (years %in% c(3:10)), c3 <- as.numeric(gsub(",", "", c3)), c3 <- 0)
ifelse((exists("c4") == TRUE) & (years %in% c(4:10)), c4 <- as.numeric(gsub(",", "", c4)), c4 <- 0)
ifelse((exists("c5") == TRUE) & (years %in% c(5:10)), c5 <- as.numeric(gsub(",", "", c5)), c5 <- 0)
ifelse((exists("c6") == TRUE) & (years %in% c(6:10)), c6 <- as.numeric(gsub(",", "", c6)), c6 <- 0)
ifelse((exists("c7") == TRUE) & (years %in% c(7:10)), c7 <- as.numeric(gsub(",", "", c7)), c7 <- 0)
ifelse((exists("c8") == TRUE) & (years %in% c(8:10)), c8 <- as.numeric(gsub(",", "", c8)), c8 <- 0)
ifelse((exists("c9") == TRUE) & (years %in% c(9:10)), c9 <- as.numeric(gsub(",", "", c9)), c9 <- 0)
ifelse((exists("c10") == TRUE) & (years %in% c(10)), c10 <- as.numeric(gsub(",", "", c10)), c10 <- 0)
ifelse((exists("c16") == TRUE), c16 <- as.numeric(gsub(",", "", c16)), c16 <- 0)
ifelse((exists("c1") == TRUE), c1 <- as.numeric(gsub("$", "", c1)), c1 <- 0)
ifelse((exists("c2") == TRUE), c2 <- as.numeric(gsub("$", "", c2)), c2 <- 0)
ifelse((exists("c3") == TRUE), c3 <- as.numeric(gsub("$", "", c3)), c3 <- 0)
ifelse((exists("c4") == TRUE), c4 <- as.numeric(gsub("$", "", c4)), c4 <- 0)
ifelse((exists("c5") == TRUE), c5 <- as.numeric(gsub("$", "", c5)), c5 <- 0)
ifelse((exists("c6") == TRUE), c6 <- as.numeric(gsub("$", "", c6)), c6 <- 0)
ifelse((exists("c7") == TRUE), c7 <- as.numeric(gsub("$", "", c7)), c7 <- 0)
ifelse((exists("c8") == TRUE), c8 <- as.numeric(gsub("$", "", c8)), c8 <- 0)
ifelse((exists("c9") == TRUE), c9 <- as.numeric(gsub("$", "", c9)), c9 <- 0)
ifelse((exists("c10") == TRUE), c10 <- as.numeric(gsub("$", "", c10)), c10 <- 0)
ifelse((exists("c16") == TRUE), c16 <- as.numeric(gsub("$", "", c16)), c16 <- 0)
if(input$club_option == "YES")
{
years <- input$n15
all_year <- as.numeric(c1) + as.numeric(c2) + as.numeric(c3) + as.numeric(c4) + as.numeric(c5) + as.numeric(c6) + as.numeric(c7) + as.numeric(c8) + as.numeric(c9) + as.numeric(c10)
option_money <- as.numeric(round_any(as.numeric(all_year) / as.numeric(years) * 1.25,1000))
option_buy_out <- round_any(as.numeric((as.numeric(option_money) * 0.1)),100000)
}
if(input$club_option != "YES")
{
option_money <- 0
option_buy_out <- 0
}
if(input$vest_option == "YES")
{
all_year_vest <- as.numeric(c1) + as.numeric(c2) + as.numeric(c3) + as.numeric(c4) + as.numeric(c5) + as.numeric(c6) + as.numeric(c7) + as.numeric(c8) + as.numeric(c9) + as.numeric(c10)
years <- input$n15
vest_money <- as.numeric(round_any(as.numeric(all_year_vest) / as.numeric(years) * 1.25,1000))
vest_buy_out <- as.numeric(round_any(as.numeric((as.numeric(vest_money) * 0.1)),100000))
}
if(input$vest_option != "YES")
{
vest_money <- 0
vest_buy_out <- 0
}
years <- input$n15
total <- as.numeric(c1) + as.numeric(c2) + as.numeric(c3) + as.numeric(c4) + as.numeric(c5) + as.numeric(c6) + as.numeric(c7) + as.numeric(c8) + as.numeric(c9) + as.numeric(c10) + as.numeric(option_buy_out) + as.numeric(vest_buy_out) + as.numeric(c16)
AAV <- as.numeric(total) / as.numeric(years)
points <- as.numeric(round_any((as.numeric(total) + (as.numeric(AAV) * 3) + (as.numeric(c1) * 1) - (as.numeric(option_buy_out) * 1) + (as.numeric(vest_buy_out) * 1)) / (1000000) - (as.numeric(years) * 1.5),1))
points <- as.numeric(points)
points
}
}
)
output$points <- renderPrint({
if (USER$Logged == TRUE){
sliderValues2()
}
})
recent_email <- reactive({
# Connects to database
invalidateLater(20000,session)
dbConnect(drv = MySQL(),user="userid",password="password",dbname="db_name",host="host_address")
# Loads FA_TABLE from mysql
tbl <- dbReadTable(con,"tb_name",row.names=NULL)
# Subsets the tbl by desired columns and saves it to tbl7
tbl7 <- tbl[,c("Player","Bid_Num","Start_Time","End_Time","Points")]
# Get a list of all FA names
name_list <- unique(tbl7$Player)
# Vector that keeps the row of FA with highest bid_number
retain_row <- vector()
# For loop that will assign the row to retain to the vector called 'retain_row'
for(k in 1:length(name_list))
{
max_bid <- max(tbl7$Points[tbl7$Player %in% name_list[k]],na.rm = TRUE)
retain_row[k] <- which((tbl7$Points == max_bid) & (tbl7$Player == name_list[k]))
}
# Subsets the tbl7 by row number saved on "retain_row"
tbl7 <- tbl7[retain_row,]
# Create column called "Time_Left"
tbl7$Time_Left <- ""
# If Bid_Num is more than 1, add time left. If Bid_Num is not more than 1, Then add "NA"
for(l in 1:nrow(tbl7))
{
#ifelse(tbl7$Bid_Num[l] > 1, tbl7$Time_Left[l] <- round(as.numeric(as.POSIXct(tbl7$End_Time[l]),units="sec") - as.numeric(as.POSIXct(now(tzone="EST")),units="sec"),digits=0),tbl7$Time_Left[l] <- "NA")
ifelse(tbl7$Bid_Num[l] > 1, tbl7$Time_Left[l] <- round(as.numeric(as.POSIXct(tbl7$End_Time[l]),units="sec") - as.numeric(as.POSIXct(now(tzone="EST")),units="sec"),digits=0) + 18000,tbl7$Time_Left[l] <- "NA")
}
# Remove row with NA value in Time Left column
tbl7 <- tbl7[!tbl7$Time_Left %in% c(NA,"NA"),]
tbl7$Time_Left <- as.numeric(tbl7$Time_Left)
# Read "clock" table from mysql server
clock <- dbReadTable(con,"clock2",row.names=NULL)
clock$send <- as.character(clock$send)
# 24hr, 12hr, 1hr, and 0hr convert to seconds
t24 <- 3600 * 24 # 86400
t12 <- 3600 * 12 # 43200
t1 <- 3600 # 3600
t0 <- 0 # 0
# Checks the clock24, clock12, and clock1 variable. Clock shows "YES" when email already has been
# sent out at the hours specified. (e.g. 24, 12, or 1 hr). "NO" when email has not been sent out.
# Here is what this loop does:
# 1) If email has been sent out at the hours specified (So "YES" is given) but 'time remaining'
# for specific player from tbl7 is greater than the hours (24,12 or 1), then reset the clock24,
# clock12, and clock1 of specific player to "yes" from "no", making player eligible for mass email again.
# 2) If email has not been sent out at the hours specified (So "NO" is given) but 'time remaining'
# for specific player from tbl7 is less than the hours (24: less than 24 but more than 12,
#12: less than 12 but more than 1, or 1: less than 1 but has not been timed out), then send out a
# mass email about the player.
for(m in 1:nrow(clock))
{
#clock <- dbReadTable(con,"tb_name",row.names=NULL)
if(length(which(tbl7$Player %in% clock$Player[m])) > 0)
{
# If time left for particular player is more than 24 hours and labeled "YES", that means
# email has been sent out before, but bidding increased time left, making it eligible for email
# alert again. So switch label to "NO"
# Run this if time left of particular is more than 24 hours
if(((tbl7$Time_Left[which(tbl7$Player %in% clock$Player[m])]) >= t24) == TRUE)
{
# "NO" assigned if email was already sent out before and it has more than 24 hours left.
# "YES" assigned if email was not sent out before and it has more than 24
ifelse((clock$clock24[m] == "YES") == TRUE,clock$clock24[m] <- "NO",clock$clock24[m] <- "NO")
clock$clock12[m] <- "NO"
clock$clock1[m] <- "NO"
}
# Run this if time left of particular player between 12 and 24
if((((tbl7$Time_Left[which(tbl7$Player %in% clock$Player[m])]) < t24) == TRUE) & (((tbl7$Time_Left[which(tbl7$Player %in% clock$Player[m])]) >= t12) == TRUE))
{
# If email has been sent out 24hr and time remaining is between 12 and 24, keep it "YES". If not email hasn't been sent, keep it "NO" so you can send email.
ifelse((clock$clock24[m] == "YES") == TRUE, clock$clock24[m] <- "YES", clock$clock24[m] <- "NO")
# Email has not been sent out, write "24" into "send" form. This is a way to signal the system
# to send 24-hour warning email.
if(clock$clock24[m] == "NO")
{
clock$send[m] <- 24
clock$clock24[m] <- "YES"
}
}
###
if(((tbl7$Time_Left[which(tbl7$Player %in% clock$Player[m])]) >= t12) == TRUE)
{
# "NO" assigned if email was already sent out before and it has more than 12 hours left.
# "YES" assigned if email was not sent out before and it has more than 12
ifelse((clock$clock12[m] == "YES") == TRUE,clock$clock12[m] <- "NO",clock$clock12[m] <- "NO")
clock$clock1[m] <- "NO"
}
# Run this if time left of particular between 12 and 24
if((((tbl7$Time_Left[which(tbl7$Player %in% clock$Player[m])]) < t12) == TRUE) & (((tbl7$Time_Left[which(tbl7$Player %in% clock$Player[m])]) >= t1) == TRUE))
{
#
ifelse((clock$clock12[m] == "YES") == TRUE, clock$clock12[m] <- "YES", clock$clock12[m] <- "NO")
if(clock$clock12[m] == "NO")
{
clock$send[m] <- 12
clock$clock12[m] <- "YES"
clock$clock24[m] <- "YES"
}
}
###
if(((tbl7$Time_Left[which(tbl7$Player %in% clock$Player[m])]) >= t1) == TRUE)
{
# "NO" assigned if email was already sent out before and it has more than 1 hour left.
# "YES" assigned if email was not sent out before and it has more than 1
ifelse((clock$clock1[m] == "YES") == TRUE,clock$clock1[m] <- "NO",clock$clock1[m] <- "NO")
}
# Run this if time left of particular between 0 and 1
if((((tbl7$Time_Left[which(tbl7$Player %in% clock$Player[m])]) < t1) == TRUE) & (((tbl7$Time_Left[which(tbl7$Player %in% clock$Player[m])]) > 0) == TRUE))
{
#
ifelse((clock$clock1[m] == "YES") == TRUE, clock$clock1[m] <- "YES", clock$clock1[m] <- "NO")
if(clock$clock1[m] == "NO")
{
clock$send[m] <- 1
clock$clock1[m] <- "YES"
clock$clock12[m] <- "YES"
clock$clock24[m] <- "YES"
}
}
# Insert code for 0hr email
if(((tbl7$Time_Left[which(tbl7$Player %in% clock$Player[m])]) <= 0) == TRUE)
{
# "NO" assigned if email was already sent out before and it has more than 1 hour left.
# "YES" assigned if email was not sent out before and it has more than 1
ifelse((clock$clockend[m] == "YES") == TRUE,clock$clockend[m] <- "YES",clock$clockend[m] <- "NO")
if(clock$clockend[m] == "NO")
{
clock$send[m] <- 0
clock$clockend[m] <- "YES"
clock$clock1[m] <- "YES"
clock$clock12[m] <- "YES"
clock$clock24[m] <- "YES"
}
}
}
if(length(which(tbl7$Player %in% clock$Player[m])) == 0)
{
next;
}
}
mail_out <- which(clock$send %in% c("0","1","12","24"))
if(length(mail_out) > 0)
{
for(d in 1:length(mail_out))
{
#clock <- dbReadTable(con,"tb_name",row.names=NULL)
which_hour <- clock$send[mail_out[d]]
if(which_hour %in% c("1","12","24"))
{
t <- try(updateStatus(paste0(which_hour,"-hour alert for ",unlist(strsplit(as.character(clock$Player[mail_out[d]])," "))[2]," ",sub(",","",unlist(strsplit(as.character(clock$Player[mail_out[d]])," "))[1]),". Make your bid by ", tbl7$End_Time[tbl7$Player %in% clock$Player[mail_out[d]]],"ET")))
if("try-error" %in% class(t)){
clock$send[mail_out[d]] <- NA
}
if(!("try-error" %in% class(t)))
{
updateStatus(paste0(which_hour,"-hour alert for ",unlist(strsplit(as.character(clock$Player[mail_out[d]])," "))[2]," ",sub(",","",unlist(strsplit(as.character(clock$Player[mail_out[d]])," "))[1]),". Make your bid by ", tbl7$End_Time[tbl7$Player %in% clock$Player[mail_out[d]]],"ET"))
}
}
if(which_hour %in% c("0",0))
{
tbl_highest <- tbl[tbl$Player == clock$Player[mail_out[d]],]
tbl_highest <- tbl_highest[order(tbl_highest$Points,decreasing = TRUE),]
tbl_highest <- tbl_highest[1,]
t <- try(updateStatus(paste0("Going once, going twice, and..SOLD! ",unlist(strsplit(as.character(clock$Player[mail_out[d]])," "))[2]," ",sub(",","",unlist(strsplit(as.character(clock$Player[mail_out[d]])," "))[1])," signs with ",tbl_highest$Club[1]," on a deal worth ",tbl_highest$summary[1])))
if("try-error" %in% class(t)){
clock$send[mail_out[d]] <- NA
}
if(!("try-error" %in% class(t)))
{
updateStatus(paste0("Going once, going twice, and..SOLD! ",unlist(strsplit(as.character(clock$Player[mail_out[d]])," "))[2]," ",sub(",","",unlist(strsplit(as.character(clock$Player[mail_out[d]])," "))[1])," signs with ",tbl_highest$Club[1]," on a deal worth ",tbl_highest$summary[1]))
}
}
#clock <- clock[,c("Player","clock24","clock12","clock1","clockend","send")]
#dbWriteTable(con,"clock2",clock,overwrite=TRUE)
}
clock$send[which(clock$send %in% c(0,1,12,24))] <- NA
}
clock <- clock[,c("Player","clock24","clock12","clock1","clockend","send")]
dbWriteTable(con,"tb_name",clock,overwrite=TRUE)
dbWriteTable(con,"tb_name",clock,overwrite=TRUE)
write.csv(clock,"clocker.csv",row.names = FALSE)
dbDisconnect(con)
tbl8 <- tbl[(nrow(tbl)):(nrow(tbl)-9),]
tbl8 <- tbl8[,c("row_names","Player","Year_Guaranteed","summary","End_Time")]
tbl8$row_names <- c(1:10)
tbl8
})
output$recent <- renderTable({
recent_email()
})
output$signed <- renderTable({
if(USER$Logged == TRUE){
invalidateLater(300000,session)
con <- dbConnect(drv = MySQL(),user="userid",password="password",dbname="db_name",host="host_address")
clock <- dbReadTable(con,"tb_name",row.names=NULL)
tbl <- dbReadTable(con,"tb_name",row.names=NULL)
signed_player <- unique(as.character(clock$Player[clock$clockend == "YES"]))
tblss <- tbl[(tbl$Player %in% signed_player),]
if(nrow(tblss) > 0)
{
test <- aggregate(x=tblss$Points,by=list(tblss$Player),FUN="max")
retainer <- vector()
for(v in 1:nrow(test))
{
retainer[v] <- which((tblss$Points %in% test$x[v]) & (tblss$Player %in% test$Group.1[v]))[1]
}
tblss <- tblss[retainer,]
tblss <- tblss[tblss$Club != "NONE",]
tblss <- tblss[,c("Player","Club","Year_Guaranteed","summary","Points","Bid_Num","Contract_Status")]
}
if(nrow(tblss) > 0)
{
tblss <- tblss
}
if(nrow(tblss) == 0)
{
tblss <- "No FA has been signed yet"
}
dbDisconnect(con)
tblss
}
})
output$sort_by_team <- renderTable({
if(USER$Logged == TRUE){
con <- dbConnect(drv = MySQL(),user="userid",password="password",dbname="db_name",host="host_address")
tbl56 <- dbReadTable(con,"tb_name",row.names=NULL)
tbl10 <- dbReadTable(con,"tb_name",row.names=NULL)
dbDisconnect(con)
club_name <- tbl56$team[tbl56$user == input$Username]
tbl11 <- tbl10[tbl10$Club == tbl56$team[tbl56$user == input$Username],]
all_bidded <- unique(tbl11$Player)
tbl14 <- data.frame(matrix(NA,nrow=1,ncol=6))
colnames(tbl14) <- c("Player","Year_Guaranteed","summary","Points","Winning_Bid_Points","Your_Rank")
if(length(all_bidded) > 0)
{
for(t in 1:length(all_bidded))
{
tbl12 <- tbl10[tbl10$Player %in% all_bidded[t],]
tbl12 <- tbl12[tbl12$Club != "NONE",]
tbl12 <- tbl12[order(tbl12$Points,decreasing=TRUE),]
max_point <- unique(max(tbl12$Points[tbl12$Club %in% club_name]))
rank <- which((tbl12$Points == max_point) & (tbl12$Club %in% club_name))
rank <- rank[1]
seg <- data.frame(matrix(NA,nrow=1,ncol=6))
colnames(seg) <- c("Player","Year_Guaranteed","summary","Points","Winning_Bid_Points","Your_Rank")
seg[1:6] <- c(all_bidded[t],tbl12$Year_Guaranteed[rank],tbl12$summary[rank],max_point,tbl12$Points[1],rank)
tbl14 <- rbind(tbl14,seg)
tbl14 <- tbl14[!tbl14$Player %in% c(NA),]
}
}
if(length(all_bidded) == 0)
{
tbl14 <- "You have not pursued any FA yet."
}
tbl14
}
})
output$all_results <- renderDataTable({
if(USER$Logged == TRUE){
con <- dbConnect(drv = MySQL(),user="userid",password="password",dbname="db_name",host="host_address")
tbl <- dbReadTable(con,"FA_TABLE",row.names=NULL)
clock <- dbReadTable(con,"tb_name",row.names=NULL)
dbDisconnect(con)
# Get a names of all FA
all_names <- unique(tbl$Player)
# Order them by highest to lowest point
tbl3 <- tbl[order(tbl$Points,decreasing=TRUE),]
# House highest bidding point for all players
high_bid <- vector()
# Assign highest bidding point for all players
for(i in 1:length(all_names))
{
max_point <- max(tbl3$Bid_Num[tbl3$Player %in% all_names[i]],na.rm = TRUE)
high_bid[i] <- which((tbl3$Bid_Num %in% max_point) & (tbl3$Player %in% all_names[i]))
}
# Retain only row that contains highest bid for each player
tbl3 <- tbl3[high_bid,]
# Retain only the columns specified below in tbl2
tbl2 <- tbl[,c("Player","Points","summary","Start_Time","End_Time","Bid_Num")]
tbl2$Position <- ""
# In 'highest' vector, only keep max points from each player
highest <- vector()
# Keep only the rows with highest bid
for(i in 1:length(all_names))
{
max_point <- max(tbl2$Points[tbl2$Player %in% all_names[i]],na.rm = TRUE)
highest[i] <- which((tbl2$Points %in% max_point) & (tbl2$Player %in% all_names[i]))[1]
}
tbl2 <- tbl2[highest,]
tbl2$Time_left <- ""
Free_agent <- read.csv("FA.csv")
for(k in 1:nrow(tbl2))
{
tbl2$Start_Time[k] <- tbl2$Start_Time[tbl3$Player %in% tbl2$Player[k]]
tbl2$End_Time[k] <- tbl2$End_Time[tbl3$Player %in% tbl2$Player[k]]
tbl2$Position[k] <- as.character(Free_agent$Position[Free_agent$Name %in% tbl2$Player[k]])
if((tbl2$Bid_Num[k] == 1))
{
tbl2$Start_Time[k] <- "Clock hasn't started"
tbl2$End_Time[k] <- "Clock hasn't started"
tbl2$Time_left[k] <- "Clock hasn't started"
}
if((tbl2$Bid_Num[k] > 1))
{
tbl2$Time_left[k] <- as.numeric(as.numeric(as.POSIXct(tbl2$End_Time[tbl2$Player %in% tbl3$Player[k]]),units="sec") - as.numeric(as.POSIXct(now(tzone="EST")),units="sec")) + 18000
tbl2$Time_left[k] <- as.character(seconds_to_period(tbl2$Time_left[k]))
tbl2$Time_left[k] <- paste(as.character(unlist(strsplit(tbl2$Time_left[k]," "))[1]),as.character(unlist(strsplit(tbl2$Time_left[k]," "))[2]),as.character(unlist(strsplit(tbl2$Time_left[k]," "))[3]),paste(substr(unlist(strsplit(tbl2$Time_left[k]," "))[4],1,4)," S",sep=""),sep=" ")
tbl2$Time_left[k] <- sub(pattern = "NAS",replacement = "",x = tbl2$Time_left[k])
tbl2$Time_left[k] <- paste0(tbl2$Time_left[k]," left.")
}
}
if((tbl2$Bid_Num[k] %in% c(1)))
{
tbl2$summary[k] <- "--"
}
tbl2 <- tbl2[order(tbl2$Player,decreasing=TRUE),]
ineligibles <- clock$Player[clock$clockend == "YES"]
tbl2 <- tbl2[!tbl2$Player %in% ineligibles,]
tbl2$Bid_Num <- NULL
tbl2 <- DT::datatable(tbl2,options=list(pageLength = 10))
tbl2
}
})
output$announce <- renderText({
"
Jan-16-2016 2:10 AM: New free agents:
Huff, David
Giavotella, Johnny
Rasmus, Cory
Gentry, Craig
Pena, Brayan
Hamilton, Josh
Fryer, Eric
Freeman, Mike
Lobaton, Jose
Thank you,
JR
--------
Here are the list of just added FAs:
Javy Guerra
Al Albuquerque
Hector Santiago
A.J. Achter
Justin Miller
Brandon Barnes
David Lough
Emmanuel Burris
David Buchanan
Steve Clevenger
Eric Sogard
Matt McBride
Felix Doubront
Jarrod Parker
Collin Cowgill
Michael Kirkman
Tom Wilhelmsen
Brett Lawrie
Avisail Garcia
Daniel Webb
Neil Ramirez
Kyle Gibson
Eduardo Escobar
Oswaldo Arcia
Jake Elmore
Daniel Fields
Chris Bassitt
Tyler Olson
Jabari Blash
Mark Canha
Tyler Ladendorf
Andrew Lambo
Josh Rodriguez
Erasmo Ramirez
Austin Adams
Ben Revere
Charlie Furbush
Dillon Gee
Alexi Amarista
Vicente Campos
Jose Pirela
Christian Friedrich
Blake Wood
Ryan Flaherty
Cesar Ramos
Eric Surkamp
Rene Rivera
Jeff Walters
Eric Campbell
Aaron Brooks
Spencer Patton
Clayton Richard
Cody Ege"
})
output$downloadData <- downloadHandler(
filename = function() {
paste('contract_info.csv', sep='')
},
content = function(filename,results) {
con <- dbConnect(drv = MySQL(),user="userid",password="password",dbname="db_name",host="host_address")
clocked <- dbReadTable(con,"tb_name",row.names=NULL)
clocked_name <- clocked$Player[clocked$clockend %in% "YES"]
results <- dbReadTable(con,"tb_name",row.names=NULL)
total_dollar_table <- dbReadTable(con,"tb_name",row.names = NULL)
results <- results[,c("Player","Club","Year_Guaranteed","summary","Signing_Bonus","X2017","X2018","X2019","X2020","X2021","X2022","X2023","X2024","X2025","X2026","Club_Option","Buyout_1","Vesting","Buyout2","Points","AAV","Total","Contract_Status","End_Time")]
results <- results[results$Player %in% clocked_name,]
vecs <- vector()
for(n in 1:length(clocked_name))
{
maximal <- max(results$Points[results$Player %in% clocked_name[n]])[1]
if(length(which((results$Player %in% clocked_name[n]) & (results$Points %in% maximal))) == 1)
{
vecs[n] <- which((results$Player %in% clocked_name[n]) & (results$Points %in% maximal))
}
if(length(which((results$Player %in% clocked_name[n]) & (results$Points %in% maximal))) > 1)
{
vecs[n] <- which((results$Player %in% clocked_name[n]) & (results$Points %in% maximal))[length(which((results$Player %in% clocked_name[n]) & (results$Points %in% maximal)))]
}
}
results <- results[vecs,]
for(r in 5:22)
{
results[,r] <- prettyNum(results[,r],big.mark = ",",scientific=FALSE)
}
total_dollar_table <- results
dbWriteTable(conn=con,"tb_name",total_dollar_table,overwrite=TRUE)
dbDisconnect(con)
write.csv(results, filename,row.names = FALSE)
}
)
output$downloadData2 <- downloadHandler(
filename = function() {
paste('your_bidding_history.csv', sep='')
},
content = function(filename,tables) {
if(USER$Logged == TRUE){
con <- dbConnect(drv = MySQL(),user="userid",password="password",dbname="db_name",host="host_address")
tbl56 <- dbReadTable(con,"tb_name",row.names=NULL)
tables <- dbReadTable(con,"tb_name",row.names=NULL)
dbDisconnect(con)
team_selected <- tbl56$team[as.character(tbl56$user) %in% as.character(input$Username)]
tables <- tables[tables$Club %in% team_selected,]
tables <- tables[order(tables$Start_Time, decreasing=FALSE),]
#tables <- tables[,c(6:24)]
tables <- tables[,c("Player","Club","Year_Guaranteed","summary","Signing_Bonus","X2017","X2018","X2019","X2020","X2021","X2022","X2023","X2024","X2025","X2026","Club_Option","Buyout_1","Vesting","Buyout2","Points","AAV","Total","Contract_Status")]
for(s in 5:22)
{
tables[,s] <- prettyNum(tables[,s],big.mark = ",",scientific=FALSE)
}
write.csv(tables,filename,row.names = FALSE)
}
}
)
})
shinyApp(ui = ui, server = server)
|
/app.R
|
no_license
|
ryuth/FA_app
|
R
| false
| false
| 52,228
|
r
|
library(lubridate)
library(mailR)
library(shiny)
library(RMySQL)
library(dplyr)
library(plyr)
library(DT)
library(twitteR)
library(shinythemes)
Free_agent <- read.csv("FA.csv")
team <- read.csv("team.csv")
team$Team <- as.character(team$Team)
setup_twitter_oauth(consumer_key, consumer_secret, access_token, access_secret)
ui <- shinyUI(
fluidPage(
shinythemes::themeSelector(),
theme = shinytheme("superhero"),
{
sidebarLayout(
sidebarPanel(
uiOutput("uiLogin"),
#Part 1 Drop down menu to choose FA
selectInput("FA_players", "Choose a FA:", choices = Free_agent$Name),
#Part 3 Action button
p("Note: After clicking the submit button, allow 5-10 seconds for the message confirming successful submission of contract offer. Buffering time may differ based on condition of your internet connectivity"),
br(),
actionButton("choose","Submit Your Contract Offer to the Selected FA"),
br(),
p("You can set the value to $0 in the contract years that you don't want to commit to. e.g. If you want to give 1-year contract, set 2017 salary but set everything else to 0. (Everything is defaulted to zero for you. Please check all the parameters before submitting contracts"),
br(),
p("Note: Raises in salary from one year to the next are not allowed to be higher than 100%. Decreases in salary from one year to the next are not allowed to be lower than -50% "),
#Action button for checking point values
actionButton("check","Check points"),
p("Note about 'check points' button: Fill out all contract info below to figure out your points. Continue to play around with the parameters until you outbid the current highest points."),
#Part 5
selectInput("club_option", "Yes or no to club option:",
choices = c("YES","NO"),selected="NO"),
selectInput("vest_option", "Yes or no to vesting option:",
choices = c("YES","NO"),selected="NO"),
sliderInput("n15", "Guaranteed Year:",
min = 1, max = 10, value = 1, step = 1),
uiOutput("tickers"),
textInput("n16", "Signing Bonus. (Avoid entering $ sign)",
value = 0),
selectInput("n17", "Contract Type:",choices = c("Major League Contract","Minor League Contract")),
wellPanel(
downloadButton('downloadData', 'Download contract details of signed FAs'),
downloadButton('downloadData2','Download bidding history for your team.')
),
tags$head(
tags$style(HTML("
@import url('//fonts.googleapis.com/css?family=Lobster|Cabin:400,700');
h1 {
font-family: 'Lobster',cursive;
font-weight: bold;
line-height: 1.1;
color: #000000;
}
"))
),
tags$head(
tags$style(HTML("
@import url('//fonts.googleapis.com/css?family=Lobster|Cabin:400,700');
h3 {
font-family: 'Open Sans';
font-weight: bold;
line-height: 1.1;
color: #0013F7;
}
"))
),
tags$head(tags$style("#timer{color: blue;
font-size: 20px;
font-style: italic;
}"
)
),
tags$head(tags$style("#values{color: red;
font-size: 20px;
font-style: bold;
}"
)
)
),
mainPanel(
h1("World Series of Fantasy Baseball Free Agency"),
tabsetPanel(type = "tabs",
tabPanel("main",h3("Login Status."),
verbatimTextOutput("pass"),
h3("Timer"),
verbatimTextOutput("timer"),
h3("Point checker"),
verbatimTextOutput("points"),
h3("Update"),
verbatimTextOutput("values"),h3("10 Most Recent Bidding (Most recent bid at the top. Time in ET)"),
tableOutput("recent")
),
tabPanel("Announcements",h3("Announcements"),
verbatimTextOutput("announce")),
tabPanel("Summary", h3("Who Signed Where?"),
tableOutput("signed")),
tabPanel("History", h3("Bidding history of FA in target"),
tableOutput("table")),
tabPanel("Progress", h3("Your Team Bidding Progress (BETA Version)"),
tableOutput("sort_by_team")),
tabPanel("all result", h3("Highest bidding point for each of 2016 FA. (Time in ET)"),
dataTableOutput("all_results"))
)
)
)
})
)
server <- shinyServer(function(input, output, session){
USER <- reactiveValues(Logged = FALSE)
TEAM <- reactiveValues(name = NA)
output$uiLogin <- renderUI({
if (USER$Logged == FALSE) {
wellPanel(
textInput("Username", "Username: (Case sensitive)"),
textInput("Password", "Password:"),
br(),
actionButton("Login", "Log in")
)
}
})
output$pass <- eventReactive(input$Login,{
con <- dbConnect(drv = MySQL(),user="userid",password="password",dbname="db_name",host="host_address")
PASSWORD <- dbReadTable(con,"Password",row.names=NULL)
dbDisconnect(con)
Username <- isolate(input$Username)
Password <- isolate(input$Password)
Id.username <- which(PASSWORD$user == Username)
Id.password <- which(PASSWORD$password == Password)
if (length(Id.username) > 0 & length(Id.password) > 0) {
if (Id.username == Id.password) {
USER$Logged <- TRUE
TEAM <- reactiveValues(name = PASSWORD$team[which(PASSWORD$user == input$Username)])
"Log in successful. (Exit the browser to logoff)"
}
} else {
"User name or password failed!!!"
}
})
output$tickers <- renderUI({
num_year <- as.integer(input$n15)
lapply(1:num_year, function(i) {
list(textInput(paste0("n",i), label = paste0("Year", i+2016," (Avoid entering $ sign)"), value = 0))
})
})
output$timer <- renderText({
if (USER$Logged == TRUE){
con <- dbConnect(drv = MySQL(),user="userid",password="password",dbname="db_name",host="host_address")
tbl <- dbReadTable(con,"FA_TABLE",row.names=NULL)
clock <- dbReadTable(con,"clock2",row.names=NULL)
dbDisconnect(con)
ineligible <- as.character(clock$Player[clock$clockend %in% "YES"])
tbl <- tbl[tbl$Player %in% input$FA_players,]
tbl <- tbl[order(tbl$Bid_Num, decreasing = TRUE),]
tbl <- tbl[1,]
if(tbl$Player[1] %in% ineligible)
{
print_it <- paste0("Time is up on ",tbl$Player[1])
}
if((tbl$Bid_Num[1] == 1) & (!tbl$Player[1] %in% ineligible))
{
tbl$Start_Time[1] <- "Clock hasn't started"
tbl$End_Time[1] <- "Clock hasn't started"
tbl$Time_left[1] <- "Clock hasn't started"
print_it <- tbl$Time_left[1]
}
if((tbl$Bid_Num[1] > 1) & (!tbl$Player[1] %in% ineligible))
{
end_time <- as.POSIXct(tbl$End_Time[1])
start_time <- as.POSIXct(tbl$Start_Time[1])
time_diff <- (as.numeric(as.POSIXct(end_time),units="sec") - as.numeric(as.POSIXct(start_time),units="sec")) - (as.numeric(as.POSIXct(now(tzone="EST")),units="sec") - as.numeric(as.POSIXct(start_time),units="sec")) + 18000
hour <- time_diff %/% 3600
min <- time_diff %% 3600
second <- min %% 60
min <- min %/% 60
second <- floor(second)
print_it <- paste0(hour," hours ",min," mins ",second," seconds to go")
invalidateLater(1000, session)
print_it
}
print_it
}
})
# PART5
sliderValues <- eventReactive(input$choose,{
con <- dbConnect(drv = MySQL(),user="userid",password="password",dbname="db_name",host="host_address")
clock <- dbReadTable(con,"tb_name",row.names=NULL)
dbDisconnect(con)
ineligible <- as.character(clock$Player[clock$clockend %in% "YES"])
finished <- "NO"
if(input$FA_players %in% ineligible)
{
finished <- "YES"
word <- paste0(input$FA_players," already signed FA contract.")
}
if(input$Username == "Tony")
{
word <- "You are not authorized to sign any FA"
word
}
if ((USER$Logged == TRUE) & (input$Username != "Tony") & (finished == "NO")){
con <- dbConnect(drv = MySQL(),user="userid",password="password",dbname="db_name",host="host_address")
tbl <- dbReadTable(con,"tb_name",row.names=NULL)
years <- input$n15
illegal_minor <- FALSE
ifelse((length(input$n1) > 0) & (years %in% c(1:10)),assign(paste0("c",1),input$n1),assign(paste0("c",1),0))
ifelse((length(input$n2) > 0) & (years %in% c(2:10)),assign(paste0("c",2),input$n2),assign(paste0("c",2),0))
ifelse((length(input$n3) > 0) & (years %in% c(3:10)),assign(paste0("c",3),input$n3),assign(paste0("c",3),0))
ifelse((length(input$n4) > 0) & (years %in% c(4:10)),assign(paste0("c",4),input$n4),assign(paste0("c",4),0))
ifelse((length(input$n5) > 0) & (years %in% c(5:10)),assign(paste0("c",5),input$n5),assign(paste0("c",5),0))
ifelse((length(input$n6) > 0) & (years %in% c(6:10)),assign(paste0("c",6),input$n6),assign(paste0("c",6),0))
ifelse((length(input$n7) > 0) & (years %in% c(7:10)),assign(paste0("c",7),input$n7),assign(paste0("c",7),0))
ifelse((length(input$n8) > 0) & (years %in% c(8:10)),assign(paste0("c",8),input$n8),assign(paste0("c",8),0))
ifelse((length(input$n9) > 0) & (years %in% c(9:10)),assign(paste0("c",9),input$n9),assign(paste0("c",9),0))
ifelse((length(input$n10) > 0) & (years %in% c(10)),assign(paste0("c",10),input$n10),assign(paste0("c",10),0))
ifelse((length(input$n16) > 0),assign(paste0("c",16),input$n16),assign(paste0("c",16),0))
ifelse((exists("c1") == TRUE) & (years %in% c(1:10)), c1 <- as.numeric(gsub(",", "", c1)), c1 <- 0)
ifelse((exists("c2") == TRUE) & (years %in% c(2:10)), c2 <- as.numeric(gsub(",", "", c2)), c2 <- 0)
ifelse((exists("c3") == TRUE) & (years %in% c(3:10)), c3 <- as.numeric(gsub(",", "", c3)), c3 <- 0)
ifelse((exists("c4") == TRUE) & (years %in% c(4:10)), c4 <- as.numeric(gsub(",", "", c4)), c4 <- 0)
ifelse((exists("c5") == TRUE) & (years %in% c(5:10)), c5 <- as.numeric(gsub(",", "", c5)), c5 <- 0)
ifelse((exists("c6") == TRUE) & (years %in% c(6:10)), c6 <- as.numeric(gsub(",", "", c6)), c6 <- 0)
ifelse((exists("c7") == TRUE) & (years %in% c(7:10)), c7 <- as.numeric(gsub(",", "", c7)), c7 <- 0)
ifelse((exists("c8") == TRUE) & (years %in% c(8:10)), c8 <- as.numeric(gsub(",", "", c8)), c8 <- 0)
ifelse((exists("c9") == TRUE) & (years %in% c(9:10)), c9 <- as.numeric(gsub(",", "", c9)), c9 <- 0)
ifelse((exists("c10") == TRUE) & (years %in% c(10)), c10 <- as.numeric(gsub(",", "", c10)), c10 <- 0)
ifelse((exists("c16") == TRUE), c16 <- as.numeric(gsub(",", "", c16)), c16 <- 0)
ifelse((exists("c1") == TRUE), c1 <- as.numeric(gsub("$", "", c1)), c1 <- 0)
ifelse((exists("c2") == TRUE), c2 <- as.numeric(gsub("$", "", c2)), c2 <- 0)
ifelse((exists("c3") == TRUE), c3 <- as.numeric(gsub("$", "", c3)), c3 <- 0)
ifelse((exists("c4") == TRUE), c4 <- as.numeric(gsub("$", "", c4)), c4 <- 0)
ifelse((exists("c5") == TRUE), c5 <- as.numeric(gsub("$", "", c5)), c5 <- 0)
ifelse((exists("c6") == TRUE), c6 <- as.numeric(gsub("$", "", c6)), c6 <- 0)
ifelse((exists("c7") == TRUE), c7 <- as.numeric(gsub("$", "", c7)), c7 <- 0)
ifelse((exists("c8") == TRUE), c8 <- as.numeric(gsub("$", "", c8)), c8 <- 0)
ifelse((exists("c9") == TRUE), c9 <- as.numeric(gsub("$", "", c9)), c9 <- 0)
ifelse((exists("c10") == TRUE), c10 <- as.numeric(gsub("$", "", c10)), c10 <- 0)
ifelse((exists("c16") == TRUE), c16 <- as.numeric(gsub("$", "", c16)), c16 <- 0)
ifelse((c1 > 0) & (c1 < 535000), c1 <- as.numeric(535000),c1 <- c1)
ifelse((c2 > 0) & (c2 < 535000), c2 <- as.numeric(535000),c2 <- c2)
ifelse((c3 > 0) & (c3 < 535000), c3 <- as.numeric(535000),c3 <- c3)
ifelse((c4 > 0) & (c4 < 535000), c4 <- as.numeric(535000),c4 <- c4)
ifelse((c5 > 0) & (c5 < 535000), c5 <- as.numeric(535000),c5 <- c5)
ifelse((c6 > 0) & (c6 < 535000), c6 <- as.numeric(535000),c6 <- c6)
ifelse((c7 > 0) & (c7 < 535000), c7 <- as.numeric(535000),c7 <- c7)
ifelse((c8 > 0) & (c8 < 535000), c8 <- as.numeric(535000),c8 <- c8)
ifelse((c9 > 0) & (c9 < 535000), c9 <- as.numeric(535000),c9 <- c9)
ifelse((c10 > 0) & (c10 < 535000), c10 <- as.numeric(535000),c10 <- c10)
if(input$club_option %in% "YES")
{
option_money <- 0
option_buy_out <- 0
years <- input$n15
all_year <- as.numeric(c1) + as.numeric(c2) + as.numeric(c3) + as.numeric(c4) + as.numeric(c5) + as.numeric(c6) + as.numeric(c7) + as.numeric(c8) + as.numeric(c9) + as.numeric(c10)
option_money <- as.numeric(round_any(as.numeric(all_year) / as.numeric(years) * 1.25,1000))
option_buy_out <- round_any(as.numeric((option_money * 0.1)),100000)
}
if(!input$club_option %in% "YES")
{
option_money <- 0
option_money <- as.numeric(option_money)
option_buy_out <- 0
option_buy_out <- as.numeric(option_buy_out)
}
if(input$vest_option %in% "YES")
{
vest_money <- 0
vest_buy_out <- 0
years <- input$n15
all_year_vest <- as.numeric(c1) + as.numeric(c2) + as.numeric(c3) + as.numeric(c4) + as.numeric(c5) + as.numeric(c6) + as.numeric(c7) + as.numeric(c8) + as.numeric(c9) + as.numeric(c10)
vest_money <- as.numeric(round_any(as.numeric(all_year_vest) / as.numeric(years) * 1.25,1000))
vest_buy_out <- round_any(as.numeric((vest_money * 0.1)),100000)
}
if(!input$vest_option %in% "YES")
{
vest_money <- 0
vest_money <- as.numeric(vest_money)
vest_buy_out <- 0
vest_buy_out <- as.numeric(vest_buy_out)
}
years <- input$n15
total <- as.numeric(c1) + as.numeric(c2) + as.numeric(c3) + as.numeric(c4) + as.numeric(c5) + as.numeric(c6) + as.numeric(c7) + as.numeric(c8) + as.numeric(c9) + as.numeric(c10) + as.numeric(option_buy_out) + as.numeric(vest_buy_out) + as.numeric(c16)
AAV <- as.numeric(total) / as.numeric(years)
points <- as.numeric(round_any((as.numeric(total) + (as.numeric(AAV) * 3) + (as.numeric(c1) * 1) - (as.numeric(option_buy_out) * 1) + (as.numeric(vest_buy_out) * 1)) / (1000000) - (as.numeric(years) * 1.5),1))
points <- as.numeric(points)
tbl4 <- tbl[tbl$Player == input$FA_players,]
max_point_player <- max(tbl4$Points)
if(points > max_point_player)
{
success <- TRUE
}
if(points < max_point_player)
{
success <- FALSE
}
if(points == max_point_player)
{
success <- FALSE
}
year <- c("c1","c2","c3","c4","c5","c6","c7","c8","c9","c10")
existing <- vector()
ifelse(c1 >= 535000, existing[1] <- TRUE, existing[1] <- FALSE)
ifelse(c2 >= 535000, existing[2] <- TRUE, existing[2] <- FALSE)
ifelse(c3 >= 535000, existing[3] <- TRUE, existing[3] <- FALSE)
ifelse(c4 >= 535000, existing[4] <- TRUE, existing[4] <- FALSE)
ifelse(c5 >= 535000, existing[5] <- TRUE, existing[5] <- FALSE)
ifelse(c6 >= 535000, existing[6] <- TRUE, existing[6] <- FALSE)
ifelse(c7 >= 535000, existing[7] <- TRUE, existing[7] <- FALSE)
ifelse(c8 >= 535000, existing[8] <- TRUE, existing[8] <- FALSE)
ifelse(c9 >= 535000, existing[9] <- TRUE, existing[9] <- FALSE)
ifelse(c10 >= 535000, existing[10] <- TRUE, existing[10] <- FALSE)
if((length(which(existing == TRUE)) > 1) & (input$n17 == "Minor League Contract"))
{
success <- FALSE
illegal_minor <- TRUE
}
if((success == TRUE) & (input$Username != "Tony") & (illegal_minor == FALSE)){
tbl <- tbl[(tbl$Player == input$FA_players),]
difference <- 0
tbl <- tbl[tbl$Player %in% input$FA_players,]
tbl <- tbl[order(tbl$Bid_Num, decreasing = TRUE),]
tbl <- tbl[1,]
tbl$Club <- as.character(tbl$Club)
#tbl$Club <- as.character(tbl$Club)
# Initial end and start time
end_time <- ymd_hms(tbl$End_Time[1],tz="EST")
start_time <- ymd_hms(tbl$Start_Time[1],tz="EST")
# Time at the bidding
bid_time <- now(tzone = "EST")
# Time difference between at the time of bidding to the deadline
if(tbl$Bid_Num[1] == 1)
{
difference <- 86400 * 10
}
if(tbl$Bid_Num[1] > 1)
{
difference <- as.numeric(end_time - start_time,units="secs") - as.numeric(bid_time - start_time,units="secs")
}
# Max time difference possible. (240 hrs)
max <- (240*3600)
# Time added at the first bidding
if((tbl$Bid_Num[1]+1) == 2)
{
X2nd_bid <- 86400 * 10
difference <- difference + X2nd_bid
}
if((tbl$Bid_Num[1]+1) == 3)
{
X3rd_bid <- 86400 * 5
difference <- difference + X3rd_bid
}
if((tbl$Bid_Num[1]+1) == 4)
{
X4th_bid <- 86400 * 2
difference <- difference + X4th_bid
}
if((tbl$Bid_Num[1]+1) == 5)
{
X5th_bid <- 86400 * 1
difference <- difference + X5th_bid
}
if((tbl$Bid_Num[1]+1) >= 6)
{
X6th_bid <- 86400 * 0.5
difference <- difference + X6th_bid
}
# If "difference" is larger than max, difference equals max
if(difference >= max)
{
difference <- max
end_time <- bid_time + difference
start_time <- bid_time
}
# If "difference" is less than max, difference equals difference
if(difference < max)
{
difference <- difference
end_time <- bid_time + difference
start_time <- bid_time
}
tbl55 <- dbReadTable(con,"tb_name",row.names=NULL)
test <- data.frame(matrix("NA",nrow=1,ncol=27),stringsAsFactors = FALSE)
colnames(test) <- c("row_names","Player","Club","Year_Guaranteed","summary","Signing_Bonus","X2017","X2018","X2019","X2020",
"X2021","X2022","X2023","X2024","X2025","X2026","Club_Option","Buyout_1","Vesting",
"Buyout2","Points","AAV","Total","Bid_Num","Start_Time","End_Time","Contract_Status")
test$row_names[1] <- NA
test$Player[1] <- input$FA_players
test$Club[1] <- tbl55$team[as.character(tbl55$user) %in% as.character(input$Username)]
years <- input$n15
test$Year_Guaranteed[1] <- years
test$Signing_Bonus[1] <- c16
test$Points[1] <- points
ifelse((total %in% c(1,0) & (tbl$Bid_Num[1] %in% c(1))),summary <- paste0("$0M for ",as.numeric(years),"yr(s)"),summary <- paste0("$",round((total) / 1000000,digits=2),"M for ",years,"yr(s)"))
test[1,] <- as.character(c(NA,input$FA_players,as.character(tbl55$team[tbl55$user %in% input$Username]),years,summary,c16,c1,
c2,c3,c4,c5,c6,c7,c8,c9,c10,option_money,option_buy_out,vest_money,vest_buy_out,
points,AAV,total,tbl$Bid_Num[tbl$Player %in% input$FA_players] + 1,strftime(start_time,"%Y-%m-%d %H:%M:%S",tz="EST"),strftime(end_time,"%Y-%m-%d %H:%M:%S",tz="EST"),input$n17))
test <- test[1,]
email <- read.csv("email.csv")
email$email <- as.character(email$email)
email$Team <- as.character(email$Team)
check <- tbl$Points[!is.na(tbl$Points)]
if(all(points > check))
{
max_points <- max(tbl$Points,na.rm=TRUE)
teams_to_send_email <- tbl$Club[which(tbl$Points == max_points)]
teams_to_send_email <- unique(teams_to_send_email)
for(jack in 1:length(teams_to_send_email))
{
recipients <- email$email[email$X %in% teams_to_send_email[jack]]
namer <- unique(email$Team[email$X %in% teams_to_send_email[jack]])
body <- paste("Hello ",namer,", You have been outbidded by an unknown team for: ",input$FA_players,". ",
"An unknown club bidded ",test$Points[1], " points for the lead. Challenge that bid by following this link: https://wsfbdraft.shinyapps.io/free_agents/",
" Thank you.
Best,
James Ryu",sep="")
send.mail(from = "email_address",
to = recipients,
subject = paste("You have been outbidded by somebody for the following FA: ",input$FA_players,sep=""),
body = body,
smtp = list(host.name = "smtp.gmail.com", port = 587, user.name = "username", passwd = "password", tls = TRUE),
authenticate = TRUE,
send = TRUE)
}
}
dbWriteTable(conn=con,"FA_TABLE",test,append=T)
dbWriteTable(conn=con,"FA_TABLE_backup",test,append=T)
}
dbDisconnect(con)
if(success == TRUE)
{
word <- paste("Submitted contracts to ",unlist(strsplit(input$FA_players," "))[2]," ",sub(",","",unlist(strsplit(input$FA_players," "))[1]),sep="")
}
if(success == FALSE)
{
word <- paste0("Your bid to ", input$FA_players, " at ",points," is not higher than ",max_point_player,". Try again.")
}
if((success == FALSE) & (illegal_minor == TRUE))
{
word <- "You entered illegal minor contract. You can't give multi-year minor league contract"
}
word
}
word
})
# Show the values using an HTML table
output$values <- renderPrint({
if (USER$Logged == TRUE){
sliderValues()
}
})
output$table <- renderTable({
if (USER$Logged == TRUE){
con <- dbConnect(drv = MySQL(),user="userid",password="password",dbname="data",host="host")
tbl <- dbReadTable(con,"FA_TABLE",row.names=NULL)
dbDisconnect(con)
tbl3 <- tbl[tbl$Player %in% input$FA_players,]
tbl3 <- tbl3[tbl3$Club != "NONE",]
tbl3 <- tbl3[,c("Player","Year_Guaranteed","Points","Start_Time","Contract_Status","summary")]
tbl3 <- tbl3[order(tbl3$Points,decreasing=FALSE),]
tbl3
}
})
sliderValues2 <- eventReactive(input$check,{
if (USER$Logged == TRUE){
option_money <- 0
option_buy_out <- 0
vest_money <- 0
vest_buy_out <- 0
years <- input$n15
ifelse((length(input$n1) > 0) & (years %in% c(1:10)),assign(paste0("c",1),input$n1),assign(paste0("c",1),0))
ifelse((length(input$n2) > 0) & (years %in% c(2:10)),assign(paste0("c",2),input$n2),assign(paste0("c",2),0))
ifelse((length(input$n3) > 0) & (years %in% c(3:10)),assign(paste0("c",3),input$n3),assign(paste0("c",3),0))
ifelse((length(input$n4) > 0) & (years %in% c(4:10)),assign(paste0("c",4),input$n4),assign(paste0("c",4),0))
ifelse((length(input$n5) > 0) & (years %in% c(5:10)),assign(paste0("c",5),input$n5),assign(paste0("c",5),0))
ifelse((length(input$n6) > 0) & (years %in% c(6:10)),assign(paste0("c",6),input$n6),assign(paste0("c",6),0))
ifelse((length(input$n7) > 0) & (years %in% c(7:10)),assign(paste0("c",7),input$n7),assign(paste0("c",7),0))
ifelse((length(input$n8) > 0) & (years %in% c(8:10)),assign(paste0("c",8),input$n8),assign(paste0("c",8),0))
ifelse((length(input$n9) > 0) & (years %in% c(9:10)),assign(paste0("c",9),input$n9),assign(paste0("c",9),0))
ifelse((length(input$n10) > 0) & (years %in% c(10)),assign(paste0("c",10),input$n10),assign(paste0("c",10),0))
ifelse((length(input$n16) > 0),assign(paste0("c",16),input$n16),assign(paste0("c",16),0))
years <- input$n15
ifelse((exists("c1") == TRUE) & (years %in% c(1:10)), c1 <- as.numeric(gsub(",", "", c1)), c1 <- 0)
ifelse((exists("c2") == TRUE) & (years %in% c(2:10)), c2 <- as.numeric(gsub(",", "", c2)), c2 <- 0)
ifelse((exists("c3") == TRUE) & (years %in% c(3:10)), c3 <- as.numeric(gsub(",", "", c3)), c3 <- 0)
ifelse((exists("c4") == TRUE) & (years %in% c(4:10)), c4 <- as.numeric(gsub(",", "", c4)), c4 <- 0)
ifelse((exists("c5") == TRUE) & (years %in% c(5:10)), c5 <- as.numeric(gsub(",", "", c5)), c5 <- 0)
ifelse((exists("c6") == TRUE) & (years %in% c(6:10)), c6 <- as.numeric(gsub(",", "", c6)), c6 <- 0)
ifelse((exists("c7") == TRUE) & (years %in% c(7:10)), c7 <- as.numeric(gsub(",", "", c7)), c7 <- 0)
ifelse((exists("c8") == TRUE) & (years %in% c(8:10)), c8 <- as.numeric(gsub(",", "", c8)), c8 <- 0)
ifelse((exists("c9") == TRUE) & (years %in% c(9:10)), c9 <- as.numeric(gsub(",", "", c9)), c9 <- 0)
ifelse((exists("c10") == TRUE) & (years %in% c(10)), c10 <- as.numeric(gsub(",", "", c10)), c10 <- 0)
ifelse((exists("c16") == TRUE), c16 <- as.numeric(gsub(",", "", c16)), c16 <- 0)
ifelse((exists("c1") == TRUE), c1 <- as.numeric(gsub("$", "", c1)), c1 <- 0)
ifelse((exists("c2") == TRUE), c2 <- as.numeric(gsub("$", "", c2)), c2 <- 0)
ifelse((exists("c3") == TRUE), c3 <- as.numeric(gsub("$", "", c3)), c3 <- 0)
ifelse((exists("c4") == TRUE), c4 <- as.numeric(gsub("$", "", c4)), c4 <- 0)
ifelse((exists("c5") == TRUE), c5 <- as.numeric(gsub("$", "", c5)), c5 <- 0)
ifelse((exists("c6") == TRUE), c6 <- as.numeric(gsub("$", "", c6)), c6 <- 0)
ifelse((exists("c7") == TRUE), c7 <- as.numeric(gsub("$", "", c7)), c7 <- 0)
ifelse((exists("c8") == TRUE), c8 <- as.numeric(gsub("$", "", c8)), c8 <- 0)
ifelse((exists("c9") == TRUE), c9 <- as.numeric(gsub("$", "", c9)), c9 <- 0)
ifelse((exists("c10") == TRUE), c10 <- as.numeric(gsub("$", "", c10)), c10 <- 0)
ifelse((exists("c16") == TRUE), c16 <- as.numeric(gsub("$", "", c16)), c16 <- 0)
if(input$club_option == "YES")
{
years <- input$n15
all_year <- as.numeric(c1) + as.numeric(c2) + as.numeric(c3) + as.numeric(c4) + as.numeric(c5) + as.numeric(c6) + as.numeric(c7) + as.numeric(c8) + as.numeric(c9) + as.numeric(c10)
option_money <- as.numeric(round_any(as.numeric(all_year) / as.numeric(years) * 1.25,1000))
option_buy_out <- round_any(as.numeric((as.numeric(option_money) * 0.1)),100000)
}
if(input$club_option != "YES")
{
option_money <- 0
option_buy_out <- 0
}
if(input$vest_option == "YES")
{
all_year_vest <- as.numeric(c1) + as.numeric(c2) + as.numeric(c3) + as.numeric(c4) + as.numeric(c5) + as.numeric(c6) + as.numeric(c7) + as.numeric(c8) + as.numeric(c9) + as.numeric(c10)
years <- input$n15
vest_money <- as.numeric(round_any(as.numeric(all_year_vest) / as.numeric(years) * 1.25,1000))
vest_buy_out <- as.numeric(round_any(as.numeric((as.numeric(vest_money) * 0.1)),100000))
}
if(input$vest_option != "YES")
{
vest_money <- 0
vest_buy_out <- 0
}
years <- input$n15
total <- as.numeric(c1) + as.numeric(c2) + as.numeric(c3) + as.numeric(c4) + as.numeric(c5) + as.numeric(c6) + as.numeric(c7) + as.numeric(c8) + as.numeric(c9) + as.numeric(c10) + as.numeric(option_buy_out) + as.numeric(vest_buy_out) + as.numeric(c16)
AAV <- as.numeric(total) / as.numeric(years)
points <- as.numeric(round_any((as.numeric(total) + (as.numeric(AAV) * 3) + (as.numeric(c1) * 1) - (as.numeric(option_buy_out) * 1) + (as.numeric(vest_buy_out) * 1)) / (1000000) - (as.numeric(years) * 1.5),1))
points <- as.numeric(points)
points
}
}
)
output$points <- renderPrint({
if (USER$Logged == TRUE){
sliderValues2()
}
})
recent_email <- reactive({
# Connects to database
invalidateLater(20000,session)
dbConnect(drv = MySQL(),user="userid",password="password",dbname="db_name",host="host_address")
# Loads FA_TABLE from mysql
tbl <- dbReadTable(con,"tb_name",row.names=NULL)
# Subsets the tbl by desired columns and saves it to tbl7
tbl7 <- tbl[,c("Player","Bid_Num","Start_Time","End_Time","Points")]
# Get a list of all FA names
name_list <- unique(tbl7$Player)
# Vector that keeps the row of FA with highest bid_number
retain_row <- vector()
# For loop that will assign the row to retain to the vector called 'retain_row'
for(k in 1:length(name_list))
{
max_bid <- max(tbl7$Points[tbl7$Player %in% name_list[k]],na.rm = TRUE)
retain_row[k] <- which((tbl7$Points == max_bid) & (tbl7$Player == name_list[k]))
}
# Subsets the tbl7 by row number saved on "retain_row"
tbl7 <- tbl7[retain_row,]
# Create column called "Time_Left"
tbl7$Time_Left <- ""
# If Bid_Num is more than 1, add time left. If Bid_Num is not more than 1, Then add "NA"
for(l in 1:nrow(tbl7))
{
#ifelse(tbl7$Bid_Num[l] > 1, tbl7$Time_Left[l] <- round(as.numeric(as.POSIXct(tbl7$End_Time[l]),units="sec") - as.numeric(as.POSIXct(now(tzone="EST")),units="sec"),digits=0),tbl7$Time_Left[l] <- "NA")
ifelse(tbl7$Bid_Num[l] > 1, tbl7$Time_Left[l] <- round(as.numeric(as.POSIXct(tbl7$End_Time[l]),units="sec") - as.numeric(as.POSIXct(now(tzone="EST")),units="sec"),digits=0) + 18000,tbl7$Time_Left[l] <- "NA")
}
# Remove row with NA value in Time Left column
tbl7 <- tbl7[!tbl7$Time_Left %in% c(NA,"NA"),]
tbl7$Time_Left <- as.numeric(tbl7$Time_Left)
# Read "clock" table from mysql server
clock <- dbReadTable(con,"clock2",row.names=NULL)
clock$send <- as.character(clock$send)
# 24hr, 12hr, 1hr, and 0hr convert to seconds
t24 <- 3600 * 24 # 86400
t12 <- 3600 * 12 # 43200
t1 <- 3600 # 3600
t0 <- 0 # 0
# Checks the clock24, clock12, and clock1 variable. Clock shows "YES" when email already has been
# sent out at the hours specified. (e.g. 24, 12, or 1 hr). "NO" when email has not been sent out.
# Here is what this loop does:
# 1) If email has been sent out at the hours specified (So "YES" is given) but 'time remaining'
# for specific player from tbl7 is greater than the hours (24,12 or 1), then reset the clock24,
# clock12, and clock1 of specific player to "yes" from "no", making player eligible for mass email again.
# 2) If email has not been sent out at the hours specified (So "NO" is given) but 'time remaining'
# for specific player from tbl7 is less than the hours (24: less than 24 but more than 12,
#12: less than 12 but more than 1, or 1: less than 1 but has not been timed out), then send out a
# mass email about the player.
for(m in 1:nrow(clock))
{
#clock <- dbReadTable(con,"tb_name",row.names=NULL)
if(length(which(tbl7$Player %in% clock$Player[m])) > 0)
{
# If time left for particular player is more than 24 hours and labeled "YES", that means
# email has been sent out before, but bidding increased time left, making it eligible for email
# alert again. So switch label to "NO"
# Run this if time left of particular is more than 24 hours
if(((tbl7$Time_Left[which(tbl7$Player %in% clock$Player[m])]) >= t24) == TRUE)
{
# "NO" assigned if email was already sent out before and it has more than 24 hours left.
# "YES" assigned if email was not sent out before and it has more than 24
ifelse((clock$clock24[m] == "YES") == TRUE,clock$clock24[m] <- "NO",clock$clock24[m] <- "NO")
clock$clock12[m] <- "NO"
clock$clock1[m] <- "NO"
}
# Run this if time left of particular player between 12 and 24
if((((tbl7$Time_Left[which(tbl7$Player %in% clock$Player[m])]) < t24) == TRUE) & (((tbl7$Time_Left[which(tbl7$Player %in% clock$Player[m])]) >= t12) == TRUE))
{
# If email has been sent out 24hr and time remaining is between 12 and 24, keep it "YES". If not email hasn't been sent, keep it "NO" so you can send email.
ifelse((clock$clock24[m] == "YES") == TRUE, clock$clock24[m] <- "YES", clock$clock24[m] <- "NO")
# Email has not been sent out, write "24" into "send" form. This is a way to signal the system
# to send 24-hour warning email.
if(clock$clock24[m] == "NO")
{
clock$send[m] <- 24
clock$clock24[m] <- "YES"
}
}
###
if(((tbl7$Time_Left[which(tbl7$Player %in% clock$Player[m])]) >= t12) == TRUE)
{
# "NO" assigned if email was already sent out before and it has more than 12 hours left.
# "YES" assigned if email was not sent out before and it has more than 12
ifelse((clock$clock12[m] == "YES") == TRUE,clock$clock12[m] <- "NO",clock$clock12[m] <- "NO")
clock$clock1[m] <- "NO"
}
# Run this if time left of particular between 12 and 24
if((((tbl7$Time_Left[which(tbl7$Player %in% clock$Player[m])]) < t12) == TRUE) & (((tbl7$Time_Left[which(tbl7$Player %in% clock$Player[m])]) >= t1) == TRUE))
{
#
ifelse((clock$clock12[m] == "YES") == TRUE, clock$clock12[m] <- "YES", clock$clock12[m] <- "NO")
if(clock$clock12[m] == "NO")
{
clock$send[m] <- 12
clock$clock12[m] <- "YES"
clock$clock24[m] <- "YES"
}
}
###
if(((tbl7$Time_Left[which(tbl7$Player %in% clock$Player[m])]) >= t1) == TRUE)
{
# "NO" assigned if email was already sent out before and it has more than 1 hour left.
# "YES" assigned if email was not sent out before and it has more than 1
ifelse((clock$clock1[m] == "YES") == TRUE,clock$clock1[m] <- "NO",clock$clock1[m] <- "NO")
}
# Run this if time left of particular between 0 and 1
if((((tbl7$Time_Left[which(tbl7$Player %in% clock$Player[m])]) < t1) == TRUE) & (((tbl7$Time_Left[which(tbl7$Player %in% clock$Player[m])]) > 0) == TRUE))
{
#
ifelse((clock$clock1[m] == "YES") == TRUE, clock$clock1[m] <- "YES", clock$clock1[m] <- "NO")
if(clock$clock1[m] == "NO")
{
clock$send[m] <- 1
clock$clock1[m] <- "YES"
clock$clock12[m] <- "YES"
clock$clock24[m] <- "YES"
}
}
# Insert code for 0hr email
if(((tbl7$Time_Left[which(tbl7$Player %in% clock$Player[m])]) <= 0) == TRUE)
{
# "NO" assigned if email was already sent out before and it has more than 1 hour left.
# "YES" assigned if email was not sent out before and it has more than 1
ifelse((clock$clockend[m] == "YES") == TRUE,clock$clockend[m] <- "YES",clock$clockend[m] <- "NO")
if(clock$clockend[m] == "NO")
{
clock$send[m] <- 0
clock$clockend[m] <- "YES"
clock$clock1[m] <- "YES"
clock$clock12[m] <- "YES"
clock$clock24[m] <- "YES"
}
}
}
if(length(which(tbl7$Player %in% clock$Player[m])) == 0)
{
next;
}
}
mail_out <- which(clock$send %in% c("0","1","12","24"))
if(length(mail_out) > 0)
{
for(d in 1:length(mail_out))
{
#clock <- dbReadTable(con,"tb_name",row.names=NULL)
which_hour <- clock$send[mail_out[d]]
if(which_hour %in% c("1","12","24"))
{
t <- try(updateStatus(paste0(which_hour,"-hour alert for ",unlist(strsplit(as.character(clock$Player[mail_out[d]])," "))[2]," ",sub(",","",unlist(strsplit(as.character(clock$Player[mail_out[d]])," "))[1]),". Make your bid by ", tbl7$End_Time[tbl7$Player %in% clock$Player[mail_out[d]]],"ET")))
if("try-error" %in% class(t)){
clock$send[mail_out[d]] <- NA
}
if(!("try-error" %in% class(t)))
{
updateStatus(paste0(which_hour,"-hour alert for ",unlist(strsplit(as.character(clock$Player[mail_out[d]])," "))[2]," ",sub(",","",unlist(strsplit(as.character(clock$Player[mail_out[d]])," "))[1]),". Make your bid by ", tbl7$End_Time[tbl7$Player %in% clock$Player[mail_out[d]]],"ET"))
}
}
if(which_hour %in% c("0",0))
{
tbl_highest <- tbl[tbl$Player == clock$Player[mail_out[d]],]
tbl_highest <- tbl_highest[order(tbl_highest$Points,decreasing = TRUE),]
tbl_highest <- tbl_highest[1,]
t <- try(updateStatus(paste0("Going once, going twice, and..SOLD! ",unlist(strsplit(as.character(clock$Player[mail_out[d]])," "))[2]," ",sub(",","",unlist(strsplit(as.character(clock$Player[mail_out[d]])," "))[1])," signs with ",tbl_highest$Club[1]," on a deal worth ",tbl_highest$summary[1])))
if("try-error" %in% class(t)){
clock$send[mail_out[d]] <- NA
}
if(!("try-error" %in% class(t)))
{
updateStatus(paste0("Going once, going twice, and..SOLD! ",unlist(strsplit(as.character(clock$Player[mail_out[d]])," "))[2]," ",sub(",","",unlist(strsplit(as.character(clock$Player[mail_out[d]])," "))[1])," signs with ",tbl_highest$Club[1]," on a deal worth ",tbl_highest$summary[1]))
}
}
#clock <- clock[,c("Player","clock24","clock12","clock1","clockend","send")]
#dbWriteTable(con,"clock2",clock,overwrite=TRUE)
}
clock$send[which(clock$send %in% c(0,1,12,24))] <- NA
}
clock <- clock[,c("Player","clock24","clock12","clock1","clockend","send")]
dbWriteTable(con,"tb_name",clock,overwrite=TRUE)
dbWriteTable(con,"tb_name",clock,overwrite=TRUE)
write.csv(clock,"clocker.csv",row.names = FALSE)
dbDisconnect(con)
tbl8 <- tbl[(nrow(tbl)):(nrow(tbl)-9),]
tbl8 <- tbl8[,c("row_names","Player","Year_Guaranteed","summary","End_Time")]
tbl8$row_names <- c(1:10)
tbl8
})
output$recent <- renderTable({
recent_email()
})
output$signed <- renderTable({
if(USER$Logged == TRUE){
invalidateLater(300000,session)
con <- dbConnect(drv = MySQL(),user="userid",password="password",dbname="db_name",host="host_address")
clock <- dbReadTable(con,"tb_name",row.names=NULL)
tbl <- dbReadTable(con,"tb_name",row.names=NULL)
signed_player <- unique(as.character(clock$Player[clock$clockend == "YES"]))
tblss <- tbl[(tbl$Player %in% signed_player),]
if(nrow(tblss) > 0)
{
test <- aggregate(x=tblss$Points,by=list(tblss$Player),FUN="max")
retainer <- vector()
for(v in 1:nrow(test))
{
retainer[v] <- which((tblss$Points %in% test$x[v]) & (tblss$Player %in% test$Group.1[v]))[1]
}
tblss <- tblss[retainer,]
tblss <- tblss[tblss$Club != "NONE",]
tblss <- tblss[,c("Player","Club","Year_Guaranteed","summary","Points","Bid_Num","Contract_Status")]
}
if(nrow(tblss) > 0)
{
tblss <- tblss
}
if(nrow(tblss) == 0)
{
tblss <- "No FA has been signed yet"
}
dbDisconnect(con)
tblss
}
})
output$sort_by_team <- renderTable({
if(USER$Logged == TRUE){
con <- dbConnect(drv = MySQL(),user="userid",password="password",dbname="db_name",host="host_address")
tbl56 <- dbReadTable(con,"tb_name",row.names=NULL)
tbl10 <- dbReadTable(con,"tb_name",row.names=NULL)
dbDisconnect(con)
club_name <- tbl56$team[tbl56$user == input$Username]
tbl11 <- tbl10[tbl10$Club == tbl56$team[tbl56$user == input$Username],]
all_bidded <- unique(tbl11$Player)
tbl14 <- data.frame(matrix(NA,nrow=1,ncol=6))
colnames(tbl14) <- c("Player","Year_Guaranteed","summary","Points","Winning_Bid_Points","Your_Rank")
if(length(all_bidded) > 0)
{
for(t in 1:length(all_bidded))
{
tbl12 <- tbl10[tbl10$Player %in% all_bidded[t],]
tbl12 <- tbl12[tbl12$Club != "NONE",]
tbl12 <- tbl12[order(tbl12$Points,decreasing=TRUE),]
max_point <- unique(max(tbl12$Points[tbl12$Club %in% club_name]))
rank <- which((tbl12$Points == max_point) & (tbl12$Club %in% club_name))
rank <- rank[1]
seg <- data.frame(matrix(NA,nrow=1,ncol=6))
colnames(seg) <- c("Player","Year_Guaranteed","summary","Points","Winning_Bid_Points","Your_Rank")
seg[1:6] <- c(all_bidded[t],tbl12$Year_Guaranteed[rank],tbl12$summary[rank],max_point,tbl12$Points[1],rank)
tbl14 <- rbind(tbl14,seg)
tbl14 <- tbl14[!tbl14$Player %in% c(NA),]
}
}
if(length(all_bidded) == 0)
{
tbl14 <- "You have not pursued any FA yet."
}
tbl14
}
})
output$all_results <- renderDataTable({
if(USER$Logged == TRUE){
con <- dbConnect(drv = MySQL(),user="userid",password="password",dbname="db_name",host="host_address")
tbl <- dbReadTable(con,"FA_TABLE",row.names=NULL)
clock <- dbReadTable(con,"tb_name",row.names=NULL)
dbDisconnect(con)
# Get a names of all FA
all_names <- unique(tbl$Player)
# Order them by highest to lowest point
tbl3 <- tbl[order(tbl$Points,decreasing=TRUE),]
# House highest bidding point for all players
high_bid <- vector()
# Assign highest bidding point for all players
for(i in 1:length(all_names))
{
max_point <- max(tbl3$Bid_Num[tbl3$Player %in% all_names[i]],na.rm = TRUE)
high_bid[i] <- which((tbl3$Bid_Num %in% max_point) & (tbl3$Player %in% all_names[i]))
}
# Retain only row that contains highest bid for each player
tbl3 <- tbl3[high_bid,]
# Retain only the columns specified below in tbl2
tbl2 <- tbl[,c("Player","Points","summary","Start_Time","End_Time","Bid_Num")]
tbl2$Position <- ""
# In 'highest' vector, only keep max points from each player
highest <- vector()
# Keep only the rows with highest bid
for(i in 1:length(all_names))
{
max_point <- max(tbl2$Points[tbl2$Player %in% all_names[i]],na.rm = TRUE)
highest[i] <- which((tbl2$Points %in% max_point) & (tbl2$Player %in% all_names[i]))[1]
}
tbl2 <- tbl2[highest,]
tbl2$Time_left <- ""
Free_agent <- read.csv("FA.csv")
for(k in 1:nrow(tbl2))
{
tbl2$Start_Time[k] <- tbl2$Start_Time[tbl3$Player %in% tbl2$Player[k]]
tbl2$End_Time[k] <- tbl2$End_Time[tbl3$Player %in% tbl2$Player[k]]
tbl2$Position[k] <- as.character(Free_agent$Position[Free_agent$Name %in% tbl2$Player[k]])
if((tbl2$Bid_Num[k] == 1))
{
tbl2$Start_Time[k] <- "Clock hasn't started"
tbl2$End_Time[k] <- "Clock hasn't started"
tbl2$Time_left[k] <- "Clock hasn't started"
}
if((tbl2$Bid_Num[k] > 1))
{
tbl2$Time_left[k] <- as.numeric(as.numeric(as.POSIXct(tbl2$End_Time[tbl2$Player %in% tbl3$Player[k]]),units="sec") - as.numeric(as.POSIXct(now(tzone="EST")),units="sec")) + 18000
tbl2$Time_left[k] <- as.character(seconds_to_period(tbl2$Time_left[k]))
tbl2$Time_left[k] <- paste(as.character(unlist(strsplit(tbl2$Time_left[k]," "))[1]),as.character(unlist(strsplit(tbl2$Time_left[k]," "))[2]),as.character(unlist(strsplit(tbl2$Time_left[k]," "))[3]),paste(substr(unlist(strsplit(tbl2$Time_left[k]," "))[4],1,4)," S",sep=""),sep=" ")
tbl2$Time_left[k] <- sub(pattern = "NAS",replacement = "",x = tbl2$Time_left[k])
tbl2$Time_left[k] <- paste0(tbl2$Time_left[k]," left.")
}
}
if((tbl2$Bid_Num[k] %in% c(1)))
{
tbl2$summary[k] <- "--"
}
tbl2 <- tbl2[order(tbl2$Player,decreasing=TRUE),]
ineligibles <- clock$Player[clock$clockend == "YES"]
tbl2 <- tbl2[!tbl2$Player %in% ineligibles,]
tbl2$Bid_Num <- NULL
tbl2 <- DT::datatable(tbl2,options=list(pageLength = 10))
tbl2
}
})
output$announce <- renderText({
"
Jan-16-2016 2:10 AM: New free agents:
Huff, David
Giavotella, Johnny
Rasmus, Cory
Gentry, Craig
Pena, Brayan
Hamilton, Josh
Fryer, Eric
Freeman, Mike
Lobaton, Jose
Thank you,
JR
--------
Here are the list of just added FAs:
Javy Guerra
Al Albuquerque
Hector Santiago
A.J. Achter
Justin Miller
Brandon Barnes
David Lough
Emmanuel Burris
David Buchanan
Steve Clevenger
Eric Sogard
Matt McBride
Felix Doubront
Jarrod Parker
Collin Cowgill
Michael Kirkman
Tom Wilhelmsen
Brett Lawrie
Avisail Garcia
Daniel Webb
Neil Ramirez
Kyle Gibson
Eduardo Escobar
Oswaldo Arcia
Jake Elmore
Daniel Fields
Chris Bassitt
Tyler Olson
Jabari Blash
Mark Canha
Tyler Ladendorf
Andrew Lambo
Josh Rodriguez
Erasmo Ramirez
Austin Adams
Ben Revere
Charlie Furbush
Dillon Gee
Alexi Amarista
Vicente Campos
Jose Pirela
Christian Friedrich
Blake Wood
Ryan Flaherty
Cesar Ramos
Eric Surkamp
Rene Rivera
Jeff Walters
Eric Campbell
Aaron Brooks
Spencer Patton
Clayton Richard
Cody Ege"
})
output$downloadData <- downloadHandler(
filename = function() {
paste('contract_info.csv', sep='')
},
content = function(filename,results) {
con <- dbConnect(drv = MySQL(),user="userid",password="password",dbname="db_name",host="host_address")
clocked <- dbReadTable(con,"tb_name",row.names=NULL)
clocked_name <- clocked$Player[clocked$clockend %in% "YES"]
results <- dbReadTable(con,"tb_name",row.names=NULL)
total_dollar_table <- dbReadTable(con,"tb_name",row.names = NULL)
results <- results[,c("Player","Club","Year_Guaranteed","summary","Signing_Bonus","X2017","X2018","X2019","X2020","X2021","X2022","X2023","X2024","X2025","X2026","Club_Option","Buyout_1","Vesting","Buyout2","Points","AAV","Total","Contract_Status","End_Time")]
results <- results[results$Player %in% clocked_name,]
vecs <- vector()
for(n in 1:length(clocked_name))
{
maximal <- max(results$Points[results$Player %in% clocked_name[n]])[1]
if(length(which((results$Player %in% clocked_name[n]) & (results$Points %in% maximal))) == 1)
{
vecs[n] <- which((results$Player %in% clocked_name[n]) & (results$Points %in% maximal))
}
if(length(which((results$Player %in% clocked_name[n]) & (results$Points %in% maximal))) > 1)
{
vecs[n] <- which((results$Player %in% clocked_name[n]) & (results$Points %in% maximal))[length(which((results$Player %in% clocked_name[n]) & (results$Points %in% maximal)))]
}
}
results <- results[vecs,]
for(r in 5:22)
{
results[,r] <- prettyNum(results[,r],big.mark = ",",scientific=FALSE)
}
total_dollar_table <- results
dbWriteTable(conn=con,"tb_name",total_dollar_table,overwrite=TRUE)
dbDisconnect(con)
write.csv(results, filename,row.names = FALSE)
}
)
output$downloadData2 <- downloadHandler(
filename = function() {
paste('your_bidding_history.csv', sep='')
},
content = function(filename,tables) {
if(USER$Logged == TRUE){
con <- dbConnect(drv = MySQL(),user="userid",password="password",dbname="db_name",host="host_address")
tbl56 <- dbReadTable(con,"tb_name",row.names=NULL)
tables <- dbReadTable(con,"tb_name",row.names=NULL)
dbDisconnect(con)
team_selected <- tbl56$team[as.character(tbl56$user) %in% as.character(input$Username)]
tables <- tables[tables$Club %in% team_selected,]
tables <- tables[order(tables$Start_Time, decreasing=FALSE),]
#tables <- tables[,c(6:24)]
tables <- tables[,c("Player","Club","Year_Guaranteed","summary","Signing_Bonus","X2017","X2018","X2019","X2020","X2021","X2022","X2023","X2024","X2025","X2026","Club_Option","Buyout_1","Vesting","Buyout2","Points","AAV","Total","Contract_Status")]
for(s in 5:22)
{
tables[,s] <- prettyNum(tables[,s],big.mark = ",",scientific=FALSE)
}
write.csv(tables,filename,row.names = FALSE)
}
}
)
})
shinyApp(ui = ui, server = server)
|
#' cum_usage_plot UI Function
#'
#' @description A shiny Module.
#'
#' @param id,input,output,session Internal parameters for {shiny}.
#'
#' @noRd
#'
#' @importFrom shiny NS tagList
mod_cum_usage_plot_ui <- function(id){
ns <- NS(id)
tagList(
column(10,
shinycssloaders::withSpinner(dygraphs::dygraphOutput(ns("cum_usage_plot")))
),
column(2,
shinycssloaders::withSpinner(uiOutput(ns("cum_usage_info"))),
shinycssloaders::withSpinner(tableOutput(ns("savings_table")))
)
)
}
#' cum_usage_plot Server Function
#'
#' @noRd
mod_cum_usage_plot_server <- function(id, tidy_energy, plot1vars){
moduleServer( id, function(input, output, session){
ns <- session$ns
id_fuel_cumsum <- reactive({
id_codes <- tibble::tribble(
~seq_id, ~code,
1, "SP",
2, "YE",
3, "TE",
4, "GS",
5, ""
)
tidy_energy %>%
dplyr::filter(
var == plot1vars$var(),
seq_id %in% c(plot1vars$tariff(), if(plot1vars$history()) c(5) else c()),
fuel %in% plot1vars$fuel()
) %>%
dplyr::select(seq_id, fuel, date, value) %>%
dplyr::left_join(id_codes, by = "seq_id") %>%
dplyr::mutate(
fuel_code = substr(fuel, 1, 1),
id_fuel = paste0(code, "_", fuel_code)
) %>%
#dplyr::select(id_fuel, date, value) %>%
dplyr::group_by(id_fuel) %>%
dplyr::mutate(
seq_id = dplyr::first(seq_id),
fuel = dplyr::first(fuel),
value = cumsum(value)
) %>%
dplyr::ungroup()
})
id_fuel_cumsum_window_summary <- reactive({
date_window <- as.Date(req(input$cum_usage_plot_date_window))
id_fuel_cumsum() %>%
dplyr::filter(
date >= date_window[1],
date <= date_window[2]
) %>%
dplyr::group_by(id_fuel) %>%
dplyr::summarise(
seq_id = dplyr::first(seq_id),
fuel = dplyr::first(fuel),
max_value = max(value),
min_value = min(value),
diff_value = max_value - min_value
)
})
gas_total <- reactive({
id_fuel_cumsum_window_summary() %>%
dplyr::filter(id_fuel == "_g") %>%
dplyr::pull("diff_value") %>%
round(2)
})
electricity_total <- reactive({
id_fuel_cumsum_window_summary() %>%
dplyr::filter(id_fuel == "_e") %>%
dplyr::pull("diff_value") %>%
round(2)
})
gas_savings <- reactive({
id_fuel_cumsum_window_summary() %>%
dplyr::filter(fuel == "gas") %>%
dplyr::mutate(
savings = diff_value - gas_total()
)
})
electricity_savings <- reactive({
id_fuel_cumsum_window_summary() %>%
dplyr::filter(fuel == "electricity") %>%
dplyr::mutate(
savings = diff_value - electricity_total()
)
})
savings <- reactive({
dplyr::bind_rows(gas_savings(), electricity_savings())
})
output$savings_table <- renderTable({
savings_data <- savings() %>%
dplyr::filter(id_fuel != "_e") %>%
dplyr::filter(id_fuel != "_g")
result <- tibble::tibble()
if(nrow(savings_data) > 0) {
result <- savings_data %>%
dplyr::select(id_fuel, savings) %>%
tidyr::separate(id_fuel, into = c("id", "fuel")) %>%
tidyr::pivot_wider(names_from = "fuel", values_from = "savings")
}
if("g" %in% colnames(result) & "e" %in% colnames(result)) {
result <- result %>%
dplyr::mutate(
t = g + e
)
}
result
})
output$cum_usage_info <- renderUI({
shinydashboardPlus::boxPad(
color = "green",
shinydashboardPlus::descriptionBlock(
header = gas_total(),
text = "Gas",
rightBorder = FALSE,
marginBottom = TRUE
),
shinydashboardPlus::descriptionBlock(
header = electricity_total(),
text = "Electricity",
rightBorder = FALSE,
marginBottom = TRUE
),
shinydashboardPlus::descriptionBlock(
header = gas_total() + electricity_total(),
text = "Total",
rightBorder = FALSE,
marginBottom = TRUE
)
)
})
output$cum_usage_plot <- dygraphs::renderDygraph({
q <- id_fuel_cumsum() %>%
dplyr::select(id_fuel, date, value) %>%
tidyr::pivot_wider(names_from = id_fuel, values_from = value)
if(nrow(q) == 0) return()
q <- tibble::as_tibble(q)
xq <- xts::xts(q[,-1], order.by = q$date)
if(plot1vars$var() == "cost") {
y_axis_label <- "GBP"
} else {
y_axis_label <- "kWh"
}
p <- dygraphs::dygraph(xq, group = "usage") %>%
dygraphs::dyRangeSelector(dateWindow = c("2021-01-01", as.character(Sys.Date()))) %>%
dygraphs::dyOptions(stepPlot = TRUE) %>%
dygraphs::dyLegend(show = "follow") %>%
dygraphs::dyAxis("y", label = y_axis_label) %>%
dygraphs::dyHighlight(highlightSeriesBackgroundAlpha = 0.2)
if("_g" %in% colnames(q)) {
p <- p %>%
dygraphs::dySeries("_g", color = "#BDBDBD", strokePattern = "dashed")
}
if("_e" %in% colnames(q)) {
p <- p %>%
dygraphs::dySeries("_e", color = "#636363", strokePattern = "dashed")
}
p
})
})
}
## To be copied in the UI
# mod_cum_usage_plot_ui("cum_usage_plot_ui_1")
## To be copied in the server
# callModule(mod_cum_usage_plot_server, "cum_usage_plot_ui_1")
|
/R/mod_cum_usage_plot.R
|
permissive
|
MHenderson/energy-use
|
R
| false
| false
| 5,316
|
r
|
#' cum_usage_plot UI Function
#'
#' @description A shiny Module.
#'
#' @param id,input,output,session Internal parameters for {shiny}.
#'
#' @noRd
#'
#' @importFrom shiny NS tagList
mod_cum_usage_plot_ui <- function(id){
ns <- NS(id)
tagList(
column(10,
shinycssloaders::withSpinner(dygraphs::dygraphOutput(ns("cum_usage_plot")))
),
column(2,
shinycssloaders::withSpinner(uiOutput(ns("cum_usage_info"))),
shinycssloaders::withSpinner(tableOutput(ns("savings_table")))
)
)
}
#' cum_usage_plot Server Function
#'
#' @noRd
mod_cum_usage_plot_server <- function(id, tidy_energy, plot1vars){
moduleServer( id, function(input, output, session){
ns <- session$ns
id_fuel_cumsum <- reactive({
id_codes <- tibble::tribble(
~seq_id, ~code,
1, "SP",
2, "YE",
3, "TE",
4, "GS",
5, ""
)
tidy_energy %>%
dplyr::filter(
var == plot1vars$var(),
seq_id %in% c(plot1vars$tariff(), if(plot1vars$history()) c(5) else c()),
fuel %in% plot1vars$fuel()
) %>%
dplyr::select(seq_id, fuel, date, value) %>%
dplyr::left_join(id_codes, by = "seq_id") %>%
dplyr::mutate(
fuel_code = substr(fuel, 1, 1),
id_fuel = paste0(code, "_", fuel_code)
) %>%
#dplyr::select(id_fuel, date, value) %>%
dplyr::group_by(id_fuel) %>%
dplyr::mutate(
seq_id = dplyr::first(seq_id),
fuel = dplyr::first(fuel),
value = cumsum(value)
) %>%
dplyr::ungroup()
})
id_fuel_cumsum_window_summary <- reactive({
date_window <- as.Date(req(input$cum_usage_plot_date_window))
id_fuel_cumsum() %>%
dplyr::filter(
date >= date_window[1],
date <= date_window[2]
) %>%
dplyr::group_by(id_fuel) %>%
dplyr::summarise(
seq_id = dplyr::first(seq_id),
fuel = dplyr::first(fuel),
max_value = max(value),
min_value = min(value),
diff_value = max_value - min_value
)
})
gas_total <- reactive({
id_fuel_cumsum_window_summary() %>%
dplyr::filter(id_fuel == "_g") %>%
dplyr::pull("diff_value") %>%
round(2)
})
electricity_total <- reactive({
id_fuel_cumsum_window_summary() %>%
dplyr::filter(id_fuel == "_e") %>%
dplyr::pull("diff_value") %>%
round(2)
})
gas_savings <- reactive({
id_fuel_cumsum_window_summary() %>%
dplyr::filter(fuel == "gas") %>%
dplyr::mutate(
savings = diff_value - gas_total()
)
})
electricity_savings <- reactive({
id_fuel_cumsum_window_summary() %>%
dplyr::filter(fuel == "electricity") %>%
dplyr::mutate(
savings = diff_value - electricity_total()
)
})
savings <- reactive({
dplyr::bind_rows(gas_savings(), electricity_savings())
})
output$savings_table <- renderTable({
savings_data <- savings() %>%
dplyr::filter(id_fuel != "_e") %>%
dplyr::filter(id_fuel != "_g")
result <- tibble::tibble()
if(nrow(savings_data) > 0) {
result <- savings_data %>%
dplyr::select(id_fuel, savings) %>%
tidyr::separate(id_fuel, into = c("id", "fuel")) %>%
tidyr::pivot_wider(names_from = "fuel", values_from = "savings")
}
if("g" %in% colnames(result) & "e" %in% colnames(result)) {
result <- result %>%
dplyr::mutate(
t = g + e
)
}
result
})
output$cum_usage_info <- renderUI({
shinydashboardPlus::boxPad(
color = "green",
shinydashboardPlus::descriptionBlock(
header = gas_total(),
text = "Gas",
rightBorder = FALSE,
marginBottom = TRUE
),
shinydashboardPlus::descriptionBlock(
header = electricity_total(),
text = "Electricity",
rightBorder = FALSE,
marginBottom = TRUE
),
shinydashboardPlus::descriptionBlock(
header = gas_total() + electricity_total(),
text = "Total",
rightBorder = FALSE,
marginBottom = TRUE
)
)
})
output$cum_usage_plot <- dygraphs::renderDygraph({
q <- id_fuel_cumsum() %>%
dplyr::select(id_fuel, date, value) %>%
tidyr::pivot_wider(names_from = id_fuel, values_from = value)
if(nrow(q) == 0) return()
q <- tibble::as_tibble(q)
xq <- xts::xts(q[,-1], order.by = q$date)
if(plot1vars$var() == "cost") {
y_axis_label <- "GBP"
} else {
y_axis_label <- "kWh"
}
p <- dygraphs::dygraph(xq, group = "usage") %>%
dygraphs::dyRangeSelector(dateWindow = c("2021-01-01", as.character(Sys.Date()))) %>%
dygraphs::dyOptions(stepPlot = TRUE) %>%
dygraphs::dyLegend(show = "follow") %>%
dygraphs::dyAxis("y", label = y_axis_label) %>%
dygraphs::dyHighlight(highlightSeriesBackgroundAlpha = 0.2)
if("_g" %in% colnames(q)) {
p <- p %>%
dygraphs::dySeries("_g", color = "#BDBDBD", strokePattern = "dashed")
}
if("_e" %in% colnames(q)) {
p <- p %>%
dygraphs::dySeries("_e", color = "#636363", strokePattern = "dashed")
}
p
})
})
}
## To be copied in the UI
# mod_cum_usage_plot_ui("cum_usage_plot_ui_1")
## To be copied in the server
# callModule(mod_cum_usage_plot_server, "cum_usage_plot_ui_1")
|
# install.packages("readxl")
# install.packages("sf")
# install.packages("stringr")
# install.packages("dplyr")
# install.packages("leaflet")
# install.packages("janitor")
# install.packages("leaflet.extras")
# install.packages("htmlwidgets")
# install.packages("htmltools")
# install.packages("glue")
# install.packages("htmltools")
# install.packages("htmlwidgets")
library(readxl)
library(sf)
library(stringr)
library(dplyr)
library(leaflet)
library(janitor)
library(leaflet.extras)
library(htmlwidgets)
library(htmltools)
library(glue)
library(htmltools)
library(htmlwidgets)
# geographical data ##################################################################################
desired_areas <- "Bolton"
# wards are hard coded as no borough indicator on the clipped to 20m ward boundaries
# LSOA
# LSOA boundaries 2011 (current)
# https://geoportal.statistics.gov.uk/datasets/lower-layer-super-output-areas-december-2011-generalised-clipped-boundaries-in-england-and-wales
lsoas_2011 <- st_read("G:\\Mapping Data\\R\\map\\Lower_Layer_Super_Output_Areas_December_2011_Generalised_Clipped__Boundaries_in_England_and_Wales/Lower_Layer_Super_Output_Areas_December_2011_Generalised_Clipped__Boundaries_in_England_and_Wales.shp")
# add boroughs variable from LSOA name
lsoas_2011 <- lsoas_2011 %>%
mutate(borough = str_sub(lsoa11nm, 1, nchar(as.character(lsoa11nm))-5)) %>%
st_transform(crs = 4326) # transforms to lat/ long from OSGB36
# filter lsoas 2011 Bolton only
lsoas_bolton <- filter(lsoas_2011, borough %in% desired_areas)
# plot(st_geometry(lsoas_bolton)) # check areas look right
rm(lsoas_2011) # remove whole country of lsoas
# # OA - no name!!! so can't filter to bolton only
# # https://geoportal.statistics.gov.uk/datasets/output-areas-december-2001-generalised-clipped-boundaries-in-england-and-wales
# oas_2011 <- st_read("G:\\Mapping Data\\R\\map\\OA/Output_Areas_December_2001_Generalised_Clipped_Boundaries_in_England_and_Wales.shp")
#
# # add boroughs variable from LSOA name - no name!!!!!!
# oas_20112 <- oas_2011 %>%
# mutate(borough = str_sub(lsoa11nm, 1, nchar(as.character(lsoa11nm))-5)) %>%
# st_transform(crs = 4326) # transforms to lat/ long from OSGB36
#
# # filter lsoas 2011 Bolton only
# lsoas_bolton <- filter(lsoas_2011, borough %in% desired_areas)
# # plot(st_geometry(lsoas_bolton)) # check areas look right
# rm(lsoas_2011) # remove whole country of lsoas
# neighbourhoods
# neighbourhoods<- c(st_union(lsoas_bolton[1:3,]),
# st_union(lsoas_bolton[4:6,]))
neighbourhoods <- st_read("G:\\Mapping Data\\neighbourhoods 9 areas for integ care\\boundaries/9 Areas 121216.TAB")
new_names <- data.frame(AreaCode = c("Area 1A", "Area 1B", "Area 1C", "Area 2A", "Area 2B", "Area 2C", "Area 3A", "Area 3B", "Area 3C"),
newname = c("Horwich", "Chorley Roads", "Westhoughton", "Rumworth", "Farnworth/Kearsley", "Central/Great Lever", "Crompton/Halliwell", "Breightmet/Little Lever", "Turton")
)
neighbourhoods <- left_join(neighbourhoods, new_names, by = "AreaCode")
st_crs(neighbourhoods) <- "+proj=tmerc +lat_0=49 +lon_0=-2 +k=0.9996012717 +x_0=400000 +y_0=-100000 +datum=OSGB36 +units=m +no_defs"
neighbourhoods <- st_transform(neighbourhoods, crs = 4326)
rm(new_names) # remove new names list as no longer needed
#plot(st_geometry(neighbourhoods)) # check geometry loooks ok.
# MSOA
# https://geoportal.statistics.gov.uk/datasets/middle-layer-super-output-areas-december-2011-boundaries-bgc
msoas_2011 <- st_read("G:\\Mapping Data\\R\\map\\MSOA/Middle_Layer_Super_Output_Areas_December_2011_Boundaries_BGC.shp")
# add borough variable from MSOA name
msoas_2011 <- msoas_2011 %>%
mutate(borough = str_sub(msoa11nm, 1, nchar(as.character(msoa11nm))-4)) %>%
st_transform(crs = 4326) # transforms to lat/ long from OSGB36
# filter msoas 2011 Bolton only
msoas_bolton <- filter(msoas_2011, borough %in% desired_areas)
# plot(st_geometry(msoas_bolton)) # check areas look right
rm(msoas_2011) # remove whole country of lsoas
# local names
msoa_localnames <- data.frame(msoa11cd = c("E02000984","E02000985","E02000986","E02000987","E02000988",
"E02000989","E02000990","E02000991","E02000992","E02000993",
"E02000994","E02000995","E02000996","E02000997","E02000998",
"E02000999","E02001000","E02001001","E02001002","E02001003",
"E02001004","E02001005","E02001006","E02001007","E02001008",
"E02001009","E02001010","E02001011","E02001012","E02001013",
"E02001014","E02001015","E02001016","E02001017","E02001018"),
local_name = c("Egerton & Dunscar","Turton","Sharples","Horwich Town","Sweetlove",
"Harwood","Horwich Loco","Smithills N&E","Blackrod","Tonge Moor & Hall i'th' Wood",
"Halliwell Rd","Johnson Fold & Doffcocker","Breightmet N & Withins","Middlebrook & Brazley","Victory",
"Town Centre","Tonge Fold","Heaton","Leverhulme & Darcy Lever","Lostock & Ladybridge",
"Lower Deane & The Willows","Burnden","Daubhill","Little Lever","Lever Edge",
"Deane & Middle Hulton","Moses Gate","Westhoughton East","Townleys","Over Hulton",
"Wingates & Washacre","Central Farnworth","Highfield & New Bury","Central Kearsley","Daisy Hill")
)
# merge in local names
msoas_bolton <- left_join(msoas_bolton, msoa_localnames, by = "msoa11cd")
rm(msoa_localnames) # remove localnames as no longer needed
# # wards
# # https://www.ordnancesurvey.co.uk/business-government/products/boundaryline
# wards2 <- st_read("G:\\Mapping Data\\R\\map\\OS boundary file\\Data\\GB\\district_borough_unitary_ward.TAB")
# wards2 <- wards2 %>%
# mutate(borough = str_replace_all(File_Name, "_", " "),
# borough = str_replace_all(borough, " ", " "),
# borough = str_remove(borough, " \\(B\\)"), # () are special characters, need to escape them.
# borough = str_remove(borough, " DISTRICT"),
# borough = str_to_title(borough)) %>%
# st_transform(crs = 4326) # transforms to lat/ long from OSGB36
#
# # filter wards bolton only
# wards_bolton <- filter(wards2, borough %in% desired_areas)
# # plot(st_geometry(wards_bolton)) # check areas look right
# rm(wards2) # remove whole country of wards
# wards clipped to 20m
# https://geoportal.statistics.gov.uk/datasets/wards-december-2011-boundaries-ew-bgc
#G:\Mapping Data\R\map\wards BGC
wards <- st_read("G:\\Mapping Data\\R\\map\\wards BGC/Wards_December_2011_Boundaries_EW_BGC.shp")
# filter wards bolton only, no borough column on this file, so done on wardcode number range
# but welsh have same num so only 'E' codes
wards_bolton <- wards %>%
mutate(wd11cd = as.character(wd11cd),
wardcode_num = str_sub(wd11cd, 2, nchar(wd11cd)),
wardcode_num = as.numeric(wardcode_num),
wardcode_letter = str_sub(wd11cd, 1, 1)) %>%
filter(between(wardcode_num, 5000650, 5000669),
wardcode_letter == "E") %>%
st_transform(crs = 4326) # transforms to lat/ long from OSGB36
# filter wards bolton only
wards_bolton <- filter(wards, borough %in% desired_areas)
# plot(st_geometry(wards_bolton)) # check areas look right
rm(wards) # remove whole country of wards
# boroughs
boroughs <- st_read("G:\\Mapping Data\\R\\map\\OS boundary file\\Data\\GB\\district_borough_unitary.TAB")
boroughs <- boroughs %>%
mutate(borough = str_remove(Name, " District \\(B\\)")) %>%
st_transform(crs = 4326) # transforms to lat/ long from OSGB36
# filter boroughs Bolton only
boroughs_bolton <- filter(boroughs, borough %in% desired_areas)
# plot(st_geometry(boroughs_bolton)) # check areas look right
rm(boroughs) # remove whole country of boroughs
# map ########################################################################################
my_bbox <- as.vector(st_bbox(boroughs_bolton)) # boundary box around selected areas for to get corners for default map view
borough_labels = (glue("<b>Borough</b><br>
Local Authority code: {boroughs_bolton$Census_Code}<br>
CCG code: 00T<br>
Name: {boroughs_bolton$borough}<br><br>
<i>Bolton CCG and Bolton Council use the same boundaries. However the CCG must also consider the needs of people who live outside Bolton but are registered with a Bolton GP.</i>"))
ward_labels = (glue("<b>Ward</b><br>
Code: {wards_bolton$Census_Code}<br>
Name: {str_sub(wards_bolton$Name, 1, nchar(as.character(wards_bolton$Name)) - 5)}"))
msoa_labels = (glue("<b>MSOA</b><br>
Code: {msoas_bolton$msoa11cd}<br>
Name: {msoas_bolton$msoa11nmw}<br>
Local name: {msoas_bolton$local_name}"))
lsoa_labels = (glue("<b>LSOA</b><br>
Code: {lsoas_bolton$lsoa11cd}<br>
Name: {lsoas_bolton$lsoa11nmw}"))
neighbourhood_labels = (glue("<b>Neighbourhood</b><br>
Name: {neighbourhoods$newname}<br><br>
<i>Neighbourhoods are local geographies for integrated health & social care, made up of LSOAs</i>"))
# oa_labels = (glue("<b>OA</b><br>
# Code:
# Name: <br>
# Only census data is availale at this level as it's so small"))
my_title <- (glue("<h2>Bolton geographies</h2>
Click on an area to find out more | Turn layers on and off to compare<br>
(Boundaries from <a href=https://geoportal.statistics.gov.uk/ target=_blank>ONS Open Geographies Portal)</a>"))
# # make colour palatte
# imd_decile_colours <- colorFactor(
# palette = c("#B30000", "#418FDE"),
# levels = c(1, 10),
# na.color = "white")
geographies_map <-
leaflet() %>%
addResetMapButton() %>%
fitBounds(lng1 = my_bbox[1], lat1 = my_bbox[2], lng2 = my_bbox[3], lat2= my_bbox[4]) %>%
addProviderTiles("Stamen.TonerLite") %>%
# Borough boundary - bolton brand darker green #009639
addPolygons(data = boroughs_bolton, weight = 5, color = "#009639",
fillColor = "white", fillOpacity = 0, group = "Borough",
highlight = highlightOptions(weight = 5, color = "#009639", bringToFront = FALSE),
popup = ~borough_labels,
labelOptions = labelOptions(
style = list("font-weight" = "normal", padding = "3px 8px"),
textsize = "15px",
direction = "auto")) %>%
# ward boundaries - bolton brand lighter blue
addPolygons(data = wards_bolton, weight = 2, color = "#4FA8FF",
fillColor = "white", fillOpacity = 0, group = "Wards",
highlight = highlightOptions(weight = 4, color = "#4FA8FF", bringToFront = TRUE),
popup = ~ward_labels,
labelOptions = labelOptions(
style = list("font-weight" = "normal", padding = "3px 8px"),
textsize = "15px",
direction = "auto")) %>%
# MSOA boundaries - bolton brand turquoise
addPolygons(data = msoas_bolton, weight = 1.5, color = "#00C7B1",
fillColor = "white", fillOpacity = 0, group = "Middle Super Output Areas",
highlight = highlightOptions(weight = 4, color = "#00C7B1", bringToFront = TRUE),
popup = ~msoa_labels,
labelOptions = labelOptions(
style = list("font-weight" = "normal", padding = "3px 8px"),
textsize = "15px",
direction = "auto")) %>%
# LSOA boundaries - bolton brand orange
addPolygons(data = lsoas_bolton, weight = 0.75, color = "#ff6600",
fillColor = "white", fillOpacity = 0, group = "Lower Super Output Areas",
highlight = highlightOptions(weight = 4, color = "#ff6600", bringToFront = TRUE),
popup = ~lsoa_labels,
labelOptions = labelOptions(
style = list("font-weight" = "normal", padding = "3px 8px"),
textsize = "15px",
direction = "auto")) %>%
# neighbourhood boundaries - bolton brand yellow
addPolygons(data = neighbourhoods, weight = 0.8, color = "#FFB300",
fillColor = "white", fillOpacity = 0, group = "Neighbourhoods",
highlight = highlightOptions(weight = 4, color = "#FFB300", bringToFront = TRUE),
popup = ~neighbourhood_labels,
labelOptions = labelOptions(
style = list("font-weight" = "normal", padding = "3px 8px"),
textsize = "15px",
direction = "auto")) %>%
addLayersControl(overlayGroups = c("Borough", "Wards", "Middle Super Output Areas", "Lower Super Output Areas", "Neighbourhoods"), position = "topleft") %>%
addControl(my_title, position = "topright")
# save map
# htmltools::save_html(geographies_map, "geographies_map.html") # smaller file size but in a fixed window size
saveWidget(geographies_map, "geographies_map.html") # larger file size but window size adjusts properly
################################## working copy for making adjustments on below #############
|
/bolton geographies map.R
|
no_license
|
shanwilkinson2/random_leafletmaps
|
R
| false
| false
| 14,161
|
r
|
# install.packages("readxl")
# install.packages("sf")
# install.packages("stringr")
# install.packages("dplyr")
# install.packages("leaflet")
# install.packages("janitor")
# install.packages("leaflet.extras")
# install.packages("htmlwidgets")
# install.packages("htmltools")
# install.packages("glue")
# install.packages("htmltools")
# install.packages("htmlwidgets")
library(readxl)
library(sf)
library(stringr)
library(dplyr)
library(leaflet)
library(janitor)
library(leaflet.extras)
library(htmlwidgets)
library(htmltools)
library(glue)
library(htmltools)
library(htmlwidgets)
# geographical data ##################################################################################
desired_areas <- "Bolton"
# wards are hard coded as no borough indicator on the clipped to 20m ward boundaries
# LSOA
# LSOA boundaries 2011 (current)
# https://geoportal.statistics.gov.uk/datasets/lower-layer-super-output-areas-december-2011-generalised-clipped-boundaries-in-england-and-wales
lsoas_2011 <- st_read("G:\\Mapping Data\\R\\map\\Lower_Layer_Super_Output_Areas_December_2011_Generalised_Clipped__Boundaries_in_England_and_Wales/Lower_Layer_Super_Output_Areas_December_2011_Generalised_Clipped__Boundaries_in_England_and_Wales.shp")
# add boroughs variable from LSOA name
lsoas_2011 <- lsoas_2011 %>%
mutate(borough = str_sub(lsoa11nm, 1, nchar(as.character(lsoa11nm))-5)) %>%
st_transform(crs = 4326) # transforms to lat/ long from OSGB36
# filter lsoas 2011 Bolton only
lsoas_bolton <- filter(lsoas_2011, borough %in% desired_areas)
# plot(st_geometry(lsoas_bolton)) # check areas look right
rm(lsoas_2011) # remove whole country of lsoas
# # OA - no name!!! so can't filter to bolton only
# # https://geoportal.statistics.gov.uk/datasets/output-areas-december-2001-generalised-clipped-boundaries-in-england-and-wales
# oas_2011 <- st_read("G:\\Mapping Data\\R\\map\\OA/Output_Areas_December_2001_Generalised_Clipped_Boundaries_in_England_and_Wales.shp")
#
# # add boroughs variable from LSOA name - no name!!!!!!
# oas_20112 <- oas_2011 %>%
# mutate(borough = str_sub(lsoa11nm, 1, nchar(as.character(lsoa11nm))-5)) %>%
# st_transform(crs = 4326) # transforms to lat/ long from OSGB36
#
# # filter lsoas 2011 Bolton only
# lsoas_bolton <- filter(lsoas_2011, borough %in% desired_areas)
# # plot(st_geometry(lsoas_bolton)) # check areas look right
# rm(lsoas_2011) # remove whole country of lsoas
# neighbourhoods
# neighbourhoods<- c(st_union(lsoas_bolton[1:3,]),
# st_union(lsoas_bolton[4:6,]))
neighbourhoods <- st_read("G:\\Mapping Data\\neighbourhoods 9 areas for integ care\\boundaries/9 Areas 121216.TAB")
new_names <- data.frame(AreaCode = c("Area 1A", "Area 1B", "Area 1C", "Area 2A", "Area 2B", "Area 2C", "Area 3A", "Area 3B", "Area 3C"),
newname = c("Horwich", "Chorley Roads", "Westhoughton", "Rumworth", "Farnworth/Kearsley", "Central/Great Lever", "Crompton/Halliwell", "Breightmet/Little Lever", "Turton")
)
neighbourhoods <- left_join(neighbourhoods, new_names, by = "AreaCode")
st_crs(neighbourhoods) <- "+proj=tmerc +lat_0=49 +lon_0=-2 +k=0.9996012717 +x_0=400000 +y_0=-100000 +datum=OSGB36 +units=m +no_defs"
neighbourhoods <- st_transform(neighbourhoods, crs = 4326)
rm(new_names) # remove new names list as no longer needed
#plot(st_geometry(neighbourhoods)) # check geometry loooks ok.
# MSOA
# https://geoportal.statistics.gov.uk/datasets/middle-layer-super-output-areas-december-2011-boundaries-bgc
msoas_2011 <- st_read("G:\\Mapping Data\\R\\map\\MSOA/Middle_Layer_Super_Output_Areas_December_2011_Boundaries_BGC.shp")
# add borough variable from MSOA name
msoas_2011 <- msoas_2011 %>%
mutate(borough = str_sub(msoa11nm, 1, nchar(as.character(msoa11nm))-4)) %>%
st_transform(crs = 4326) # transforms to lat/ long from OSGB36
# filter msoas 2011 Bolton only
msoas_bolton <- filter(msoas_2011, borough %in% desired_areas)
# plot(st_geometry(msoas_bolton)) # check areas look right
rm(msoas_2011) # remove whole country of lsoas
# local names
msoa_localnames <- data.frame(msoa11cd = c("E02000984","E02000985","E02000986","E02000987","E02000988",
"E02000989","E02000990","E02000991","E02000992","E02000993",
"E02000994","E02000995","E02000996","E02000997","E02000998",
"E02000999","E02001000","E02001001","E02001002","E02001003",
"E02001004","E02001005","E02001006","E02001007","E02001008",
"E02001009","E02001010","E02001011","E02001012","E02001013",
"E02001014","E02001015","E02001016","E02001017","E02001018"),
local_name = c("Egerton & Dunscar","Turton","Sharples","Horwich Town","Sweetlove",
"Harwood","Horwich Loco","Smithills N&E","Blackrod","Tonge Moor & Hall i'th' Wood",
"Halliwell Rd","Johnson Fold & Doffcocker","Breightmet N & Withins","Middlebrook & Brazley","Victory",
"Town Centre","Tonge Fold","Heaton","Leverhulme & Darcy Lever","Lostock & Ladybridge",
"Lower Deane & The Willows","Burnden","Daubhill","Little Lever","Lever Edge",
"Deane & Middle Hulton","Moses Gate","Westhoughton East","Townleys","Over Hulton",
"Wingates & Washacre","Central Farnworth","Highfield & New Bury","Central Kearsley","Daisy Hill")
)
# merge in local names
msoas_bolton <- left_join(msoas_bolton, msoa_localnames, by = "msoa11cd")
rm(msoa_localnames) # remove localnames as no longer needed
# # wards
# # https://www.ordnancesurvey.co.uk/business-government/products/boundaryline
# wards2 <- st_read("G:\\Mapping Data\\R\\map\\OS boundary file\\Data\\GB\\district_borough_unitary_ward.TAB")
# wards2 <- wards2 %>%
# mutate(borough = str_replace_all(File_Name, "_", " "),
# borough = str_replace_all(borough, " ", " "),
# borough = str_remove(borough, " \\(B\\)"), # () are special characters, need to escape them.
# borough = str_remove(borough, " DISTRICT"),
# borough = str_to_title(borough)) %>%
# st_transform(crs = 4326) # transforms to lat/ long from OSGB36
#
# # filter wards bolton only
# wards_bolton <- filter(wards2, borough %in% desired_areas)
# # plot(st_geometry(wards_bolton)) # check areas look right
# rm(wards2) # remove whole country of wards
# wards clipped to 20m
# https://geoportal.statistics.gov.uk/datasets/wards-december-2011-boundaries-ew-bgc
#G:\Mapping Data\R\map\wards BGC
wards <- st_read("G:\\Mapping Data\\R\\map\\wards BGC/Wards_December_2011_Boundaries_EW_BGC.shp")
# filter wards bolton only, no borough column on this file, so done on wardcode number range
# but welsh have same num so only 'E' codes
wards_bolton <- wards %>%
mutate(wd11cd = as.character(wd11cd),
wardcode_num = str_sub(wd11cd, 2, nchar(wd11cd)),
wardcode_num = as.numeric(wardcode_num),
wardcode_letter = str_sub(wd11cd, 1, 1)) %>%
filter(between(wardcode_num, 5000650, 5000669),
wardcode_letter == "E") %>%
st_transform(crs = 4326) # transforms to lat/ long from OSGB36
# filter wards bolton only
wards_bolton <- filter(wards, borough %in% desired_areas)
# plot(st_geometry(wards_bolton)) # check areas look right
rm(wards) # remove whole country of wards
# boroughs
boroughs <- st_read("G:\\Mapping Data\\R\\map\\OS boundary file\\Data\\GB\\district_borough_unitary.TAB")
boroughs <- boroughs %>%
mutate(borough = str_remove(Name, " District \\(B\\)")) %>%
st_transform(crs = 4326) # transforms to lat/ long from OSGB36
# filter boroughs Bolton only
boroughs_bolton <- filter(boroughs, borough %in% desired_areas)
# plot(st_geometry(boroughs_bolton)) # check areas look right
rm(boroughs) # remove whole country of boroughs
# map ########################################################################################
my_bbox <- as.vector(st_bbox(boroughs_bolton)) # boundary box around selected areas for to get corners for default map view
borough_labels = (glue("<b>Borough</b><br>
Local Authority code: {boroughs_bolton$Census_Code}<br>
CCG code: 00T<br>
Name: {boroughs_bolton$borough}<br><br>
<i>Bolton CCG and Bolton Council use the same boundaries. However the CCG must also consider the needs of people who live outside Bolton but are registered with a Bolton GP.</i>"))
ward_labels = (glue("<b>Ward</b><br>
Code: {wards_bolton$Census_Code}<br>
Name: {str_sub(wards_bolton$Name, 1, nchar(as.character(wards_bolton$Name)) - 5)}"))
msoa_labels = (glue("<b>MSOA</b><br>
Code: {msoas_bolton$msoa11cd}<br>
Name: {msoas_bolton$msoa11nmw}<br>
Local name: {msoas_bolton$local_name}"))
lsoa_labels = (glue("<b>LSOA</b><br>
Code: {lsoas_bolton$lsoa11cd}<br>
Name: {lsoas_bolton$lsoa11nmw}"))
neighbourhood_labels = (glue("<b>Neighbourhood</b><br>
Name: {neighbourhoods$newname}<br><br>
<i>Neighbourhoods are local geographies for integrated health & social care, made up of LSOAs</i>"))
# oa_labels = (glue("<b>OA</b><br>
# Code:
# Name: <br>
# Only census data is availale at this level as it's so small"))
my_title <- (glue("<h2>Bolton geographies</h2>
Click on an area to find out more | Turn layers on and off to compare<br>
(Boundaries from <a href=https://geoportal.statistics.gov.uk/ target=_blank>ONS Open Geographies Portal)</a>"))
# # make colour palatte
# imd_decile_colours <- colorFactor(
# palette = c("#B30000", "#418FDE"),
# levels = c(1, 10),
# na.color = "white")
geographies_map <-
leaflet() %>%
addResetMapButton() %>%
fitBounds(lng1 = my_bbox[1], lat1 = my_bbox[2], lng2 = my_bbox[3], lat2= my_bbox[4]) %>%
addProviderTiles("Stamen.TonerLite") %>%
# Borough boundary - bolton brand darker green #009639
addPolygons(data = boroughs_bolton, weight = 5, color = "#009639",
fillColor = "white", fillOpacity = 0, group = "Borough",
highlight = highlightOptions(weight = 5, color = "#009639", bringToFront = FALSE),
popup = ~borough_labels,
labelOptions = labelOptions(
style = list("font-weight" = "normal", padding = "3px 8px"),
textsize = "15px",
direction = "auto")) %>%
# ward boundaries - bolton brand lighter blue
addPolygons(data = wards_bolton, weight = 2, color = "#4FA8FF",
fillColor = "white", fillOpacity = 0, group = "Wards",
highlight = highlightOptions(weight = 4, color = "#4FA8FF", bringToFront = TRUE),
popup = ~ward_labels,
labelOptions = labelOptions(
style = list("font-weight" = "normal", padding = "3px 8px"),
textsize = "15px",
direction = "auto")) %>%
# MSOA boundaries - bolton brand turquoise
addPolygons(data = msoas_bolton, weight = 1.5, color = "#00C7B1",
fillColor = "white", fillOpacity = 0, group = "Middle Super Output Areas",
highlight = highlightOptions(weight = 4, color = "#00C7B1", bringToFront = TRUE),
popup = ~msoa_labels,
labelOptions = labelOptions(
style = list("font-weight" = "normal", padding = "3px 8px"),
textsize = "15px",
direction = "auto")) %>%
# LSOA boundaries - bolton brand orange
addPolygons(data = lsoas_bolton, weight = 0.75, color = "#ff6600",
fillColor = "white", fillOpacity = 0, group = "Lower Super Output Areas",
highlight = highlightOptions(weight = 4, color = "#ff6600", bringToFront = TRUE),
popup = ~lsoa_labels,
labelOptions = labelOptions(
style = list("font-weight" = "normal", padding = "3px 8px"),
textsize = "15px",
direction = "auto")) %>%
# neighbourhood boundaries - bolton brand yellow
addPolygons(data = neighbourhoods, weight = 0.8, color = "#FFB300",
fillColor = "white", fillOpacity = 0, group = "Neighbourhoods",
highlight = highlightOptions(weight = 4, color = "#FFB300", bringToFront = TRUE),
popup = ~neighbourhood_labels,
labelOptions = labelOptions(
style = list("font-weight" = "normal", padding = "3px 8px"),
textsize = "15px",
direction = "auto")) %>%
addLayersControl(overlayGroups = c("Borough", "Wards", "Middle Super Output Areas", "Lower Super Output Areas", "Neighbourhoods"), position = "topleft") %>%
addControl(my_title, position = "topright")
# save map
# htmltools::save_html(geographies_map, "geographies_map.html") # smaller file size but in a fixed window size
saveWidget(geographies_map, "geographies_map.html") # larger file size but window size adjusts properly
################################## working copy for making adjustments on below #############
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/databasemigrationservice_operations.R
\name{databasemigrationservice_modify_replication_instance}
\alias{databasemigrationservice_modify_replication_instance}
\title{Modifies the replication instance to apply new settings}
\usage{
databasemigrationservice_modify_replication_instance(
ReplicationInstanceArn, AllocatedStorage, ApplyImmediately,
ReplicationInstanceClass, VpcSecurityGroupIds,
PreferredMaintenanceWindow, MultiAZ, EngineVersion,
AllowMajorVersionUpgrade, AutoMinorVersionUpgrade,
ReplicationInstanceIdentifier)
}
\arguments{
\item{ReplicationInstanceArn}{[required] The Amazon Resource Name (ARN) of the replication instance.}
\item{AllocatedStorage}{The amount of storage (in gigabytes) to be allocated for the replication
instance.}
\item{ApplyImmediately}{Indicates whether the changes should be applied immediately or during
the next maintenance window.}
\item{ReplicationInstanceClass}{The compute and memory capacity of the replication instance as defined
for the specified replication instance class. For example to specify the
instance class dms.c4.large, set this parameter to \code{"dms.c4.large"}.
For more information on the settings and capacities for the available
replication instance classes, see \href{https://docs.aws.amazon.com/dms/latest/userguide/CHAP_ReplicationInstance.html#CHAP_ReplicationInstance.InDepth}{Selecting the right AWS DMS replication instance for your migration}.}
\item{VpcSecurityGroupIds}{Specifies the VPC security group to be used with the replication
instance. The VPC security group must work with the VPC containing the
replication instance.}
\item{PreferredMaintenanceWindow}{The weekly time range (in UTC) during which system maintenance can
occur, which might result in an outage. Changing this parameter does not
result in an outage, except in the following situation, and the change
is asynchronously applied as soon as possible. If moving this window to
the current time, there must be at least 30 minutes between the current
time and end of the window to ensure pending changes are applied.
Default: Uses existing setting
Format: ddd:hh24:mi-ddd:hh24:mi
Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun
Constraints: Must be at least 30 minutes}
\item{MultiAZ}{Specifies whether the replication instance is a Multi-AZ deployment. You
can't set the \code{AvailabilityZone} parameter if the Multi-AZ parameter is
set to \code{true}.}
\item{EngineVersion}{The engine version number of the replication instance.
When modifying a major engine version of an instance, also set
\code{AllowMajorVersionUpgrade} to \code{true}.}
\item{AllowMajorVersionUpgrade}{Indicates that major version upgrades are allowed. Changing this
parameter does not result in an outage, and the change is asynchronously
applied as soon as possible.
This parameter must be set to \code{true} when specifying a value for the
\code{EngineVersion} parameter that is a different major version than the
replication instance's current version.}
\item{AutoMinorVersionUpgrade}{A value that indicates that minor version upgrades are applied
automatically to the replication instance during the maintenance window.
Changing this parameter doesn't result in an outage, except in the case
described following. The change is asynchronously applied as soon as
possible.
An outage does result if these factors apply:
\itemize{
\item This parameter is set to \code{true} during the maintenance window.
\item A newer minor version is available.
\item AWS DMS has enabled automatic patching for the given engine version.
}}
\item{ReplicationInstanceIdentifier}{The replication instance identifier. This parameter is stored as a
lowercase string.}
}
\value{
A list with the following syntax:\preformatted{list(
ReplicationInstance = list(
ReplicationInstanceIdentifier = "string",
ReplicationInstanceClass = "string",
ReplicationInstanceStatus = "string",
AllocatedStorage = 123,
InstanceCreateTime = as.POSIXct(
"2015-01-01"
),
VpcSecurityGroups = list(
list(
VpcSecurityGroupId = "string",
Status = "string"
)
),
AvailabilityZone = "string",
ReplicationSubnetGroup = list(
ReplicationSubnetGroupIdentifier = "string",
ReplicationSubnetGroupDescription = "string",
VpcId = "string",
SubnetGroupStatus = "string",
Subnets = list(
list(
SubnetIdentifier = "string",
SubnetAvailabilityZone = list(
Name = "string"
),
SubnetStatus = "string"
)
)
),
PreferredMaintenanceWindow = "string",
PendingModifiedValues = list(
ReplicationInstanceClass = "string",
AllocatedStorage = 123,
MultiAZ = TRUE|FALSE,
EngineVersion = "string"
),
MultiAZ = TRUE|FALSE,
EngineVersion = "string",
AutoMinorVersionUpgrade = TRUE|FALSE,
KmsKeyId = "string",
ReplicationInstanceArn = "string",
ReplicationInstancePublicIpAddress = "string",
ReplicationInstancePrivateIpAddress = "string",
ReplicationInstancePublicIpAddresses = list(
"string"
),
ReplicationInstancePrivateIpAddresses = list(
"string"
),
PubliclyAccessible = TRUE|FALSE,
SecondaryAvailabilityZone = "string",
FreeUntil = as.POSIXct(
"2015-01-01"
),
DnsNameServers = "string"
)
)
}
}
\description{
Modifies the replication instance to apply new settings. You can change
one or more parameters by specifying these parameters and the new values
in the request.
Some settings are applied during the maintenance window.
}
\section{Request syntax}{
\preformatted{svc$modify_replication_instance(
ReplicationInstanceArn = "string",
AllocatedStorage = 123,
ApplyImmediately = TRUE|FALSE,
ReplicationInstanceClass = "string",
VpcSecurityGroupIds = list(
"string"
),
PreferredMaintenanceWindow = "string",
MultiAZ = TRUE|FALSE,
EngineVersion = "string",
AllowMajorVersionUpgrade = TRUE|FALSE,
AutoMinorVersionUpgrade = TRUE|FALSE,
ReplicationInstanceIdentifier = "string"
)
}
}
\examples{
\dontrun{
# Modifies the replication instance to apply new settings. You can change
# one or more parameters by specifying these parameters and the new values
# in the request. Some settings are applied during the maintenance window.
svc$modify_replication_instance(
AllocatedStorage = 123L,
AllowMajorVersionUpgrade = TRUE,
ApplyImmediately = TRUE,
AutoMinorVersionUpgrade = TRUE,
EngineVersion = "1.5.0",
MultiAZ = TRUE,
PreferredMaintenanceWindow = "sun:06:00-sun:14:00",
ReplicationInstanceArn = "arn:aws:dms:us-east-1:123456789012:rep:6UTDJGBOUS3VI3SUWA66XFJCJQ",
ReplicationInstanceClass = "dms.t2.micro",
ReplicationInstanceIdentifier = "test-rep-1",
VpcSecurityGroupIds = list()
)
}
}
\keyword{internal}
|
/cran/paws.migration/man/databasemigrationservice_modify_replication_instance.Rd
|
permissive
|
paws-r/paws
|
R
| false
| true
| 6,915
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/databasemigrationservice_operations.R
\name{databasemigrationservice_modify_replication_instance}
\alias{databasemigrationservice_modify_replication_instance}
\title{Modifies the replication instance to apply new settings}
\usage{
databasemigrationservice_modify_replication_instance(
ReplicationInstanceArn, AllocatedStorage, ApplyImmediately,
ReplicationInstanceClass, VpcSecurityGroupIds,
PreferredMaintenanceWindow, MultiAZ, EngineVersion,
AllowMajorVersionUpgrade, AutoMinorVersionUpgrade,
ReplicationInstanceIdentifier)
}
\arguments{
\item{ReplicationInstanceArn}{[required] The Amazon Resource Name (ARN) of the replication instance.}
\item{AllocatedStorage}{The amount of storage (in gigabytes) to be allocated for the replication
instance.}
\item{ApplyImmediately}{Indicates whether the changes should be applied immediately or during
the next maintenance window.}
\item{ReplicationInstanceClass}{The compute and memory capacity of the replication instance as defined
for the specified replication instance class. For example to specify the
instance class dms.c4.large, set this parameter to \code{"dms.c4.large"}.
For more information on the settings and capacities for the available
replication instance classes, see \href{https://docs.aws.amazon.com/dms/latest/userguide/CHAP_ReplicationInstance.html#CHAP_ReplicationInstance.InDepth}{Selecting the right AWS DMS replication instance for your migration}.}
\item{VpcSecurityGroupIds}{Specifies the VPC security group to be used with the replication
instance. The VPC security group must work with the VPC containing the
replication instance.}
\item{PreferredMaintenanceWindow}{The weekly time range (in UTC) during which system maintenance can
occur, which might result in an outage. Changing this parameter does not
result in an outage, except in the following situation, and the change
is asynchronously applied as soon as possible. If moving this window to
the current time, there must be at least 30 minutes between the current
time and end of the window to ensure pending changes are applied.
Default: Uses existing setting
Format: ddd:hh24:mi-ddd:hh24:mi
Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun
Constraints: Must be at least 30 minutes}
\item{MultiAZ}{Specifies whether the replication instance is a Multi-AZ deployment. You
can't set the \code{AvailabilityZone} parameter if the Multi-AZ parameter is
set to \code{true}.}
\item{EngineVersion}{The engine version number of the replication instance.
When modifying a major engine version of an instance, also set
\code{AllowMajorVersionUpgrade} to \code{true}.}
\item{AllowMajorVersionUpgrade}{Indicates that major version upgrades are allowed. Changing this
parameter does not result in an outage, and the change is asynchronously
applied as soon as possible.
This parameter must be set to \code{true} when specifying a value for the
\code{EngineVersion} parameter that is a different major version than the
replication instance's current version.}
\item{AutoMinorVersionUpgrade}{A value that indicates that minor version upgrades are applied
automatically to the replication instance during the maintenance window.
Changing this parameter doesn't result in an outage, except in the case
described following. The change is asynchronously applied as soon as
possible.
An outage does result if these factors apply:
\itemize{
\item This parameter is set to \code{true} during the maintenance window.
\item A newer minor version is available.
\item AWS DMS has enabled automatic patching for the given engine version.
}}
\item{ReplicationInstanceIdentifier}{The replication instance identifier. This parameter is stored as a
lowercase string.}
}
\value{
A list with the following syntax:\preformatted{list(
ReplicationInstance = list(
ReplicationInstanceIdentifier = "string",
ReplicationInstanceClass = "string",
ReplicationInstanceStatus = "string",
AllocatedStorage = 123,
InstanceCreateTime = as.POSIXct(
"2015-01-01"
),
VpcSecurityGroups = list(
list(
VpcSecurityGroupId = "string",
Status = "string"
)
),
AvailabilityZone = "string",
ReplicationSubnetGroup = list(
ReplicationSubnetGroupIdentifier = "string",
ReplicationSubnetGroupDescription = "string",
VpcId = "string",
SubnetGroupStatus = "string",
Subnets = list(
list(
SubnetIdentifier = "string",
SubnetAvailabilityZone = list(
Name = "string"
),
SubnetStatus = "string"
)
)
),
PreferredMaintenanceWindow = "string",
PendingModifiedValues = list(
ReplicationInstanceClass = "string",
AllocatedStorage = 123,
MultiAZ = TRUE|FALSE,
EngineVersion = "string"
),
MultiAZ = TRUE|FALSE,
EngineVersion = "string",
AutoMinorVersionUpgrade = TRUE|FALSE,
KmsKeyId = "string",
ReplicationInstanceArn = "string",
ReplicationInstancePublicIpAddress = "string",
ReplicationInstancePrivateIpAddress = "string",
ReplicationInstancePublicIpAddresses = list(
"string"
),
ReplicationInstancePrivateIpAddresses = list(
"string"
),
PubliclyAccessible = TRUE|FALSE,
SecondaryAvailabilityZone = "string",
FreeUntil = as.POSIXct(
"2015-01-01"
),
DnsNameServers = "string"
)
)
}
}
\description{
Modifies the replication instance to apply new settings. You can change
one or more parameters by specifying these parameters and the new values
in the request.
Some settings are applied during the maintenance window.
}
\section{Request syntax}{
\preformatted{svc$modify_replication_instance(
ReplicationInstanceArn = "string",
AllocatedStorage = 123,
ApplyImmediately = TRUE|FALSE,
ReplicationInstanceClass = "string",
VpcSecurityGroupIds = list(
"string"
),
PreferredMaintenanceWindow = "string",
MultiAZ = TRUE|FALSE,
EngineVersion = "string",
AllowMajorVersionUpgrade = TRUE|FALSE,
AutoMinorVersionUpgrade = TRUE|FALSE,
ReplicationInstanceIdentifier = "string"
)
}
}
\examples{
\dontrun{
# Modifies the replication instance to apply new settings. You can change
# one or more parameters by specifying these parameters and the new values
# in the request. Some settings are applied during the maintenance window.
svc$modify_replication_instance(
AllocatedStorage = 123L,
AllowMajorVersionUpgrade = TRUE,
ApplyImmediately = TRUE,
AutoMinorVersionUpgrade = TRUE,
EngineVersion = "1.5.0",
MultiAZ = TRUE,
PreferredMaintenanceWindow = "sun:06:00-sun:14:00",
ReplicationInstanceArn = "arn:aws:dms:us-east-1:123456789012:rep:6UTDJGBOUS3VI3SUWA66XFJCJQ",
ReplicationInstanceClass = "dms.t2.micro",
ReplicationInstanceIdentifier = "test-rep-1",
VpcSecurityGroupIds = list()
)
}
}
\keyword{internal}
|
\name{multComb}
\alias{multComb}
\title{
Combinations of the first n integers in k groups
}
\description{
This is a function, used for generating the permutations used for the Exact distribution of many of the statistical procedures in Hollander, Wolfe, Chicken - Nonparametric Statistical Methods Third Edition, to generate possible combinations of the first n=n1+n2+...+nk integers within k groups.
}
\usage{
multComb(n.vec)
}
\arguments{
\item{n.vec}{Contains the group sizes n1,n2,...,nk}
}
\details{
The computations involved get very time consuming very quickly, so be careful not to use it for too many large groups.
}
\value{
Returns a matrix of n!/(n1!*n2!*...*nk!) rows, where each row represents one possible combination.
}
\author{
Grant Schneider
}
\examples{
##What are the ways that we can group 1,2,3,4,5 into groups of 2, 2, and 1?
multComb(c(2,2,1))
##Another example, with four groups
multComb(c(2,2,3,2))
}
\keyword{Combinations}
\keyword{k groups}
|
/man/multComb.Rd
|
no_license
|
cran/NSM3
|
R
| false
| false
| 1,004
|
rd
|
\name{multComb}
\alias{multComb}
\title{
Combinations of the first n integers in k groups
}
\description{
This is a function, used for generating the permutations used for the Exact distribution of many of the statistical procedures in Hollander, Wolfe, Chicken - Nonparametric Statistical Methods Third Edition, to generate possible combinations of the first n=n1+n2+...+nk integers within k groups.
}
\usage{
multComb(n.vec)
}
\arguments{
\item{n.vec}{Contains the group sizes n1,n2,...,nk}
}
\details{
The computations involved get very time consuming very quickly, so be careful not to use it for too many large groups.
}
\value{
Returns a matrix of n!/(n1!*n2!*...*nk!) rows, where each row represents one possible combination.
}
\author{
Grant Schneider
}
\examples{
##What are the ways that we can group 1,2,3,4,5 into groups of 2, 2, and 1?
multComb(c(2,2,1))
##Another example, with four groups
multComb(c(2,2,3,2))
}
\keyword{Combinations}
\keyword{k groups}
|
######################################################################
path="LncEvoDevo/"
pathStringTie=paste(path,"results/stringtie_assembly/",sep="")
pathEnsembl=paste(path, "data/ensembl_annotations/",sep="")
pathUCSC=paste(path, "data/UCSC_sequences/",sep="")
release=94
options(scipen=999) ## remove scientific notation ## options(scipen=0) to get it back
######################################################################
for(sp in c("Mouse", "Rat", "Chicken")){
corresp=read.table(paste(pathUCSC,sp,"/chromosomes_Ensembl_UCSC.txt",sep=""), h=T, stringsAsFactors=F, sep="\t")
rownames(corresp)=corresp[,1]
###### only StringTie assembly
print(paste(sp, "StringTie"))
st=read.table(paste(pathStringTie,sp,"/combined/ExonBlocks_FilteredTranscripts_StringTie_Ensembl", release,".txt", sep=""), h=F, stringsAsFactors=F, sep="\t", quote="")
st=st[,c(3,4,5,6)]
colnames(st)=c("chr", "start", "end", "strand")
st$id=paste(st$chr, st$start, st$end, st$strand, sep=",")
print(dim(st)[1])
print(all(st$chr%in%rownames(corresp)))
st$chr=corresp[st$chr,2]
print(dim(st)[1])
st$score=rep("1000", dim(st)[1])
st$start=st$start-1 ## bed format
dupli=which(duplicated(st$id))
if(length(dupli)>0){
st=st[-dupli,]
print(paste("removed", length(dupli), "duplicated lines"))
}
st$outstrand=rep(NA, dim(st)[1])
st$outstrand[which(st$strand=="1")]="+"
st$outstrand[which(st$strand=="-1")]="-"
print(length(which(is.na(st$outstrand))))
write.table(st[,c("chr", "start", "end", "id", "score", "outstrand")], file=paste(pathStringTie,sp,"/combined/ExonBlocks_FilteredTranscripts_StringTie_Ensembl",release,".bed", sep=""), row.names=F, col.names=F, sep="\t", quote=F)
}
######################################################################
|
/ortho_genes/exon_projections/format.exon.blocks.bed.R
|
no_license
|
anecsulea/LncEvoDevo
|
R
| false
| false
| 1,810
|
r
|
######################################################################
path="LncEvoDevo/"
pathStringTie=paste(path,"results/stringtie_assembly/",sep="")
pathEnsembl=paste(path, "data/ensembl_annotations/",sep="")
pathUCSC=paste(path, "data/UCSC_sequences/",sep="")
release=94
options(scipen=999) ## remove scientific notation ## options(scipen=0) to get it back
######################################################################
for(sp in c("Mouse", "Rat", "Chicken")){
corresp=read.table(paste(pathUCSC,sp,"/chromosomes_Ensembl_UCSC.txt",sep=""), h=T, stringsAsFactors=F, sep="\t")
rownames(corresp)=corresp[,1]
###### only StringTie assembly
print(paste(sp, "StringTie"))
st=read.table(paste(pathStringTie,sp,"/combined/ExonBlocks_FilteredTranscripts_StringTie_Ensembl", release,".txt", sep=""), h=F, stringsAsFactors=F, sep="\t", quote="")
st=st[,c(3,4,5,6)]
colnames(st)=c("chr", "start", "end", "strand")
st$id=paste(st$chr, st$start, st$end, st$strand, sep=",")
print(dim(st)[1])
print(all(st$chr%in%rownames(corresp)))
st$chr=corresp[st$chr,2]
print(dim(st)[1])
st$score=rep("1000", dim(st)[1])
st$start=st$start-1 ## bed format
dupli=which(duplicated(st$id))
if(length(dupli)>0){
st=st[-dupli,]
print(paste("removed", length(dupli), "duplicated lines"))
}
st$outstrand=rep(NA, dim(st)[1])
st$outstrand[which(st$strand=="1")]="+"
st$outstrand[which(st$strand=="-1")]="-"
print(length(which(is.na(st$outstrand))))
write.table(st[,c("chr", "start", "end", "id", "score", "outstrand")], file=paste(pathStringTie,sp,"/combined/ExonBlocks_FilteredTranscripts_StringTie_Ensembl",release,".bed", sep=""), row.names=F, col.names=F, sep="\t", quote=F)
}
######################################################################
|
# This is the server logic for a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
library(shiny)
shinyServer(function(input, output) {
output$contents <- renderTable({
# input$file1 will be NULL initially. After the user selects
# and uploads a file, head of that data file by default,
# or all rows if selected, will be shown.
req(input$file1)
# when reading semicolon separated files,
# having a comma separator causes `read.csv` to error
tryCatch(
{
df <- read.csv(input$file1$datapath,
header = input$header,
sep = input$sep,
quote = input$quote)
},
error = function(e) {
# return a safeError if a parsing error occurs
stop(safeError(e))
}
)
if(input$disp == "head") {
return(head(df))
}
else {
return(df)
}
})
})
|
/upload/server.R
|
no_license
|
divensambhwani/R-Shiny
|
R
| false
| false
| 1,013
|
r
|
# This is the server logic for a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
library(shiny)
shinyServer(function(input, output) {
output$contents <- renderTable({
# input$file1 will be NULL initially. After the user selects
# and uploads a file, head of that data file by default,
# or all rows if selected, will be shown.
req(input$file1)
# when reading semicolon separated files,
# having a comma separator causes `read.csv` to error
tryCatch(
{
df <- read.csv(input$file1$datapath,
header = input$header,
sep = input$sep,
quote = input$quote)
},
error = function(e) {
# return a safeError if a parsing error occurs
stop(safeError(e))
}
)
if(input$disp == "head") {
return(head(df))
}
else {
return(df)
}
})
})
|
#=====================================================================================================================
# Analysis of A2058 response in PLX4720
#=====================================================================================================================
# Set the working directory, read the file, and pre-process the data
dd <- "/Users/paudelbb/Paudel_et_al_2016/LongTerm"
setwd(dd)
require(gplots)
require(ggplot2)
output <- "/Users/paudelbb/Paudel_et_al_2016/LongTerm/Figs"
#=====================================================================================================================
data <- read.csv("20150227 A2058_PLX4720_Processed.csv", header=T, sep=",")
cell <- unique(data$CellLine);
dr <- "PLX4720"
s1 <- data
cnc = "8"
#=====================================================================================================================
ggplot(data = s1, aes(x=s1$Time, y=s1$nl2, col=Date))+
theme_bw()+geom_smooth(span=.25, aes(group=1), method = "loess", size=.5, alpha=0.6, col="blue")+
scale_colour_manual(values="blue") + ylim(-3,3)+ xlim(0,360)+
theme(legend.position="none") + theme(axis.text=element_text(size=12)) +
theme(text = element_text(size=12)) + ggtitle(paste0()) + labs(x="", y="") +
ggsave(paste0(cell, " + ", cnc, "μΜ.pdf"), path=output, width=3, height=3)
#=====================================================================================================================
|
/LongTerm/A2058_PLX4720_Response.R
|
no_license
|
paudelbb/Paudel_et_al_2016
|
R
| false
| false
| 1,457
|
r
|
#=====================================================================================================================
# Analysis of A2058 response in PLX4720
#=====================================================================================================================
# Set the working directory, read the file, and pre-process the data
dd <- "/Users/paudelbb/Paudel_et_al_2016/LongTerm"
setwd(dd)
require(gplots)
require(ggplot2)
output <- "/Users/paudelbb/Paudel_et_al_2016/LongTerm/Figs"
#=====================================================================================================================
data <- read.csv("20150227 A2058_PLX4720_Processed.csv", header=T, sep=",")
cell <- unique(data$CellLine);
dr <- "PLX4720"
s1 <- data
cnc = "8"
#=====================================================================================================================
ggplot(data = s1, aes(x=s1$Time, y=s1$nl2, col=Date))+
theme_bw()+geom_smooth(span=.25, aes(group=1), method = "loess", size=.5, alpha=0.6, col="blue")+
scale_colour_manual(values="blue") + ylim(-3,3)+ xlim(0,360)+
theme(legend.position="none") + theme(axis.text=element_text(size=12)) +
theme(text = element_text(size=12)) + ggtitle(paste0()) + labs(x="", y="") +
ggsave(paste0(cell, " + ", cnc, "μΜ.pdf"), path=output, width=3, height=3)
#=====================================================================================================================
|
library(shiny)
library(dygraphs)
library(shinydashboard)
###UI
ui <- dashboardPage(skin = "purple",
dashboardHeader(title = "Visualization of eddy-covariance data",titleWidth = 350),
dashboardSidebar(
tags$head( tags$script(type="text/javascript",'$(document).ready(function(){
$(".main-sidebar").css("height","100%");
$(".main-sidebar .sidebar").css({"position":"relative","max-height": "100%","overflow": "auto"})
})')),
width = 350,
div(style= "margin : 10px", h4("Data from three different ecosystem stations for year 2016 are available in four temporal resolutions")),
selectInput("stationType", label = "Select ecosystem station",
choices = c("Agroecosystem at Křešín u Pacova with crops harvested during the growing season",
"Evergreen needleleaf forest at Rájec-Jestřebí representing monoculture of Norway spruce",
"Deciduous broadleaf forests at Štítná nad Vláří representing monoculture of European beech")),
uiOutput("station"),
radioButtons("graphType","Graph type", c("XY", "Time series")),
uiOutput("barGraphChoiceUI"),
uiOutput("col1"),
uiOutput("col2"),
hr(),
conditionalPanel(
condition = "input.graphType == 'Time series'",
checkboxInput("single_axis", label = "Display on single y-axis", value = FALSE)
),
uiOutput("showTimeDateSelect"),
checkboxInput("show_label", label = "Highlit y-axis value", value = FALSE),
conditionalPanel(
condition = "input.show_label == 1",
numericInput("y_axis_label", label = "Value", value = NULL)
),
checkboxInput("show_Xlabel", label = "Highlit x-axis value", value = FALSE),
conditionalPanel(
condition = "input.show_Xlabel == 1",
uiOutput("xLabel")
),
### advanced filtering
div(style= "display:inline-block;width:32%;margin:0px;padding:0px;",uiOutput("allInputs")),
div(style= "display:inline-block;width:32%;margin:0px;padding:0px;",uiOutput("center")),
div(style= "display:inline-block;width:32%;margin:0px;padding:0px;",uiOutput("right")),
div(style= "display:inline-block;text-align: center;",actionButton("appendInput", "Add")),
div(style= "display:inline-block;text-align: center;",actionButton("removeInput", "Remove"))
),
dashboardBody(
fluidRow(
box(status = "primary", width = 10000,
dygraphOutput("plot")
),
tabBox(
tabPanel("Axis Information", htmlOutput("point", inline = TRUE)),
tabPanel("Locality Information", htmlOutput("localityInfo", inline = TRUE))
)
)
)
)
|
/ui.R
|
no_license
|
MarekBernhauser/ShinyMeteo
|
R
| false
| false
| 2,735
|
r
|
library(shiny)
library(dygraphs)
library(shinydashboard)
###UI
ui <- dashboardPage(skin = "purple",
dashboardHeader(title = "Visualization of eddy-covariance data",titleWidth = 350),
dashboardSidebar(
tags$head( tags$script(type="text/javascript",'$(document).ready(function(){
$(".main-sidebar").css("height","100%");
$(".main-sidebar .sidebar").css({"position":"relative","max-height": "100%","overflow": "auto"})
})')),
width = 350,
div(style= "margin : 10px", h4("Data from three different ecosystem stations for year 2016 are available in four temporal resolutions")),
selectInput("stationType", label = "Select ecosystem station",
choices = c("Agroecosystem at Křešín u Pacova with crops harvested during the growing season",
"Evergreen needleleaf forest at Rájec-Jestřebí representing monoculture of Norway spruce",
"Deciduous broadleaf forests at Štítná nad Vláří representing monoculture of European beech")),
uiOutput("station"),
radioButtons("graphType","Graph type", c("XY", "Time series")),
uiOutput("barGraphChoiceUI"),
uiOutput("col1"),
uiOutput("col2"),
hr(),
conditionalPanel(
condition = "input.graphType == 'Time series'",
checkboxInput("single_axis", label = "Display on single y-axis", value = FALSE)
),
uiOutput("showTimeDateSelect"),
checkboxInput("show_label", label = "Highlit y-axis value", value = FALSE),
conditionalPanel(
condition = "input.show_label == 1",
numericInput("y_axis_label", label = "Value", value = NULL)
),
checkboxInput("show_Xlabel", label = "Highlit x-axis value", value = FALSE),
conditionalPanel(
condition = "input.show_Xlabel == 1",
uiOutput("xLabel")
),
### advanced filtering
div(style= "display:inline-block;width:32%;margin:0px;padding:0px;",uiOutput("allInputs")),
div(style= "display:inline-block;width:32%;margin:0px;padding:0px;",uiOutput("center")),
div(style= "display:inline-block;width:32%;margin:0px;padding:0px;",uiOutput("right")),
div(style= "display:inline-block;text-align: center;",actionButton("appendInput", "Add")),
div(style= "display:inline-block;text-align: center;",actionButton("removeInput", "Remove"))
),
dashboardBody(
fluidRow(
box(status = "primary", width = 10000,
dygraphOutput("plot")
),
tabBox(
tabPanel("Axis Information", htmlOutput("point", inline = TRUE)),
tabPanel("Locality Information", htmlOutput("localityInfo", inline = TRUE))
)
)
)
)
|
#### 09-1 ####
## -------------------------------------------------------------------- ##
install.packages("foreign") # foreign 패키지 설치
library(foreign) # SPSS 파일 로드
library(dplyr) # 전처리
library(ggplot2) # 시각화
library(readxl) # 엑셀 파일 불러오기
# 데이터 불러오기
raw_welfare <- read.spss(file = "Koweps_hpc10_2015_beta1.sav",
to.data.frame = T)
# 복사본 만들기
welfare <- raw_welfare
head(welfare)
tail(welfare)
View(welfare)
dim(welfare)
str(welfare)
summary(welfare)
welfare <- rename(welfare,
sex = h10_g3, # 성별
birth = h10_g4, # 태어난 연도
marriage = h10_g10, # 혼인 상태
religion = h10_g11, # 종교
income = p1002_8aq1, # 월급
code_job = h10_eco9, # 직종 코드
code_region = h10_reg7) # 지역 코드
#### 09-2 ####
## -------------------------------------------------------------------- ##
class(welfare$sex)
table(welfare$sex)
# 이상치 확인
table(welfare$sex)
# 이상치 결측 처리
welfare$sex <- ifelse(welfare$sex == 9, NA, welfare$sex)
# 결측치 확인
table(is.na(welfare$sex))
# 성별 항목 이름 부여
welfare$sex <- ifelse(welfare$sex == 1, "male", "female")
table(welfare$sex)
qplot(welfare$sex)
## -------------------------------------------------------------------- ##
class(welfare$income)
summary(welfare$income)
qplot(welfare$income)
qplot(welfare$income) + xlim(0, 1000)
# 이상치 확인
summary(welfare$income)
# 이상치 결측 처리
welfare$income <- ifelse(welfare$income %in% c(0, 9999), NA, welfare$income)
# 결측치 확인
table(is.na(welfare$income))
## -------------------------------------------------------------------- ##
sex_income <- welfare %>%
filter(!is.na(income)) %>%
group_by(sex) %>%
summarise(mean_income = mean(income))
sex_income
ggplot(data = sex_income, aes(x = sex, y = mean_income)) + geom_col()
#### 09-3 ####
## -------------------------------------------------------------------- ##
class(welfare$birth)
summary(welfare$birth)
qplot(welfare$birth)
# 이상치 확인
summary(welfare$birth)
# 결측치 확인
table(is.na(welfare$birth))
# 이상치 결측 처리
welfare$birth <- ifelse(welfare$birth == 9999, NA, welfare$birth)
table(is.na(welfare$birth))
## -------------------------------------------------------------------- ##
welfare$age <- 2015 - welfare$birth + 1
summary(welfare$age)
qplot(welfare$age)
## -------------------------------------------------------------------- ##
age_income <- welfare %>%
filter(!is.na(income)) %>%
group_by(age) %>%
summarise(mean_income = mean(income))
head(age_income)
ggplot(data = age_income, aes(x = age, y = mean_income)) + geom_line()
#### 09-4 ####
## -------------------------------------------------------------------- ##
welfare <- welfare %>%
mutate(ageg = ifelse(age < 30, "young",
ifelse(age <= 59, "middle", "old")))
table(welfare$ageg)
qplot(welfare$ageg)
## -------------------------------------------------------------------- ##
ageg_income <- welfare %>%
filter(!is.na(income)) %>%
group_by(ageg) %>%
summarise(mean_income = mean(income))
ageg_income
ggplot(data = ageg_income, aes(x = ageg, y = mean_income)) + geom_col()
## -------------------------------------------------------------------- ##
ggplot(data = ageg_income, aes(x = ageg, y = mean_income)) +
geom_col() +
scale_x_discrete(limits = c("young", "middle", "old"))
#### 09-5 ####
## -------------------------------------------------------------------- ##
sex_income <- welfare %>%
filter(!is.na(income)) %>%
group_by(ageg, sex) %>%
summarise(mean_income = mean(income))
sex_income
ggplot(data = sex_income, aes(x = ageg, y = mean_income, fill = sex)) +
geom_col() +
scale_x_discrete(limits = c("young", "middle", "old"))
ggplot(data = sex_income, aes(x = ageg, y = mean_income, fill = sex)) +
geom_col(position = "dodge") +
scale_x_discrete(limits = c("young", "middle", "old"))
## -------------------------------------------------------------------- ##
# 성별 연령별 월급 평균표 만들기
sex_age <- welfare %>%
filter(!is.na(income)) %>%
group_by(age, sex) %>%
summarise(mean_income = mean(income))
head(sex_age)
# 그래프 만들기
ggplot(data = sex_age, aes(x = age, y = mean_income, col = sex)) + geom_line()
#### 09-6 ####
## -------------------------------------------------------------------- ##
class(welfare$code_job)
table(welfare$code_job)
library(readxl)
list_job <- read_excel("Koweps_Codebook.xlsx", col_names = T, sheet = 2)
head(list_job)
dim(list_job)
welfare <- left_join(welfare, list_job, id = "code_job")
welfare %>%
filter(!is.na(code_job)) %>%
select(code_job, job) %>%
head(10)
## -------------------------------------------------------------------- ##
job_income <- welfare %>%
filter(!is.na(job) & !is.na(income)) %>%
group_by(job) %>%
summarise(mean_income = mean(income))
head(job_income)
top10 <- job_income %>%
arrange(desc(mean_income)) %>%
head(10)
top10
ggplot(data = top10, aes(x = reorder(job, mean_income), y = mean_income)) +
geom_col() +
coord_flip()
# 하위 10위 추출
bottom10 <- job_income %>%
arrange(mean_income) %>%
head(10)
bottom10
# 그래프 만들기
ggplot(data = bottom10, aes(x = reorder(job, -mean_income),
y = mean_income)) +
geom_col() +
coord_flip() +
ylim(0, 850)
#### 09-7 ####
## -------------------------------------------------------------------- ##
# 남성 직업 빈도 상위 10개 추출
job_male <- welfare %>%
filter(!is.na(job) & sex == "male") %>%
group_by(job) %>%
summarise(n = n()) %>%
arrange(desc(n)) %>%
head(10)
job_male
# 여성 직업 빈도 상위 10개 추출
job_female <- welfare %>%
filter(!is.na(job) & sex == "female") %>%
group_by(job) %>%
summarise(n = n()) %>%
arrange(desc(n)) %>%
head(10)
job_female
# 남성 직업 빈도 상위 10개 직업
ggplot(data = job_male, aes(x = reorder(job, n), y = n)) +
geom_col() +
coord_flip()
# 여성 직업 빈도 상위 10개 직업
ggplot(data = job_female, aes(x = reorder(job, n), y = n)) +
geom_col() +
coord_flip()
#### 09-8 ####
## -------------------------------------------------------------------- ##
class(welfare$religion)
table(welfare$religion)
# 종교 유무 이름 부여
welfare$religion <- ifelse(welfare$religion == 1, "yes", "no")
table(welfare$religion)
qplot(welfare$religion)
## -------------------------------------------------------------------- ##
class(welfare$marriage)
table(welfare$marriage)
# 이혼 여부 변수 만들기
welfare$group_marriage <- ifelse(welfare$marriage == 1, "marriage",
ifelse(welfare$marriage == 3, "divorce", NA))
table(welfare$group_marriage)
table(is.na(welfare$group_marriage))
qplot(welfare$group_marriage)
## -------------------------------------------------------------------- ##
religion_marriage <- welfare %>%
filter(!is.na(group_marriage)) %>%
group_by(religion, group_marriage) %>%
summarise(n = n()) %>%
mutate(tot_group = sum(n)) %>%
mutate(pct = round(n/tot_group*100, 1))
religion_marriage
religion_marriage <- welfare %>%
filter(!is.na(group_marriage)) %>%
count(religion, group_marriage) %>%
group_by(religion) %>%
mutate(pct = round(n/sum(n)*100, 1))
# 이혼 추출
divorce <- religion_marriage %>%
filter(group_marriage == "divorce") %>%
select(religion, pct)
divorce
ggplot(data = divorce, aes(x = religion, y = pct)) + geom_col()
## -------------------------------------------------------------------- ##
ageg_marriage <- welfare %>%
filter(!is.na(group_marriage)) %>%
group_by(ageg, group_marriage) %>%
summarise(n = n()) %>%
mutate(tot_group = sum(n)) %>%
mutate(pct = round(n/tot_group*100, 1))
ageg_marriage
ageg_marriage <- welfare %>%
filter(!is.na(group_marriage)) %>%
count(ageg, group_marriage) %>%
group_by(ageg) %>%
mutate(pct = round(n/sum(n)*100, 1))
# 초년 제외, 이혼 추출
ageg_divorce <- ageg_marriage %>%
filter(ageg != "young" & group_marriage == "divorce") %>%
select(ageg, pct)
ageg_divorce
# 그래프 만들기
ggplot(data = ageg_divorce, aes(x = ageg, y = pct)) + geom_col()
## -------------------------------------------------------------------- ##
# 연령대, 종교유무, 결혼상태별 비율표 만들기
ageg_religion_marriage <- welfare %>%
filter(!is.na(group_marriage) & ageg != "young") %>%
group_by(ageg, religion, group_marriage) %>%
summarise(n = n()) %>%
mutate(tot_group = sum(n)) %>%
mutate(pct = round(n/tot_group*100, 1))
ageg_religion_marriage
ageg_religion_marriage <- welfare %>%
filter(!is.na(group_marriage) & ageg != "young") %>%
count(ageg, religion, group_marriage) %>%
group_by(ageg, religion) %>%
mutate(pct = round(n/sum(n)*100, 1))
# 연령대 및 종교 유무별 이혼율 표 만들기
df_divorce <- ageg_religion_marriage %>%
filter(group_marriage == "divorce") %>%
select(ageg, religion, pct)
df_divorce
ggplot(data = df_divorce, aes(x = ageg, y = pct, fill = religion )) +
geom_col(position = "dodge")
#### 09-9 ####
## -------------------------------------------------------------------- ##
class(welfare$code_region)
table(welfare$code_region)
# 지역 코드 목록 만들기
list_region <- data.frame(code_region = c(1:7),
region = c("서울",
"수도권(인천/경기)",
"부산/경남/울산",
"대구/경북",
"대전/충남",
"강원/충북",
"광주/전남/전북/제주도"))
list_region
# 지역명 변수 추가
welfare <- left_join(welfare, list_region, id = "code_region")
welfare %>%
select(code_region, region) %>%
head
## -------------------------------------------------------------------- ##
region_ageg <- welfare %>%
group_by(region, ageg) %>%
summarise(n = n()) %>%
mutate(tot_group = sum(n)) %>%
mutate(pct = round(n/tot_group*100, 2))
head(region_ageg)
region_ageg <- welfare %>%
count(region, ageg) %>%
group_by(region) %>%
mutate(pct = round(n/sum(n)*100, 2))
ggplot(data = region_ageg, aes(x = region, y = pct, fill = ageg)) +
geom_col() +
coord_flip()
## -------------------------------------------------------------------- ##
# 노년층 비율 오름차순 정렬
list_order_old <- region_ageg %>%
filter(ageg == "old") %>%
arrange(pct)
list_order_old
# 지역명 순서 변수 만들기
order <- list_order_old$region
order
ggplot(data = region_ageg, aes(x = region, y = pct, fill = ageg)) +
geom_col() +
coord_flip() +
scale_x_discrete(limits = order)
class(region_ageg$ageg)
levels(region_ageg$ageg)
region_ageg$ageg <- factor(region_ageg$ageg,
level = c("old", "middle", "young"))
class(region_ageg$ageg)
levels(region_ageg$ageg)
ggplot(data = region_ageg, aes(x = region, y = pct, fill = ageg)) +
geom_col() +
coord_flip() +
scale_x_discrete(limits = order)
|
/Script_Part09.r
|
no_license
|
CheonYoonHan/BigData_R
|
R
| false
| false
| 11,400
|
r
|
#### 09-1 ####
## -------------------------------------------------------------------- ##
install.packages("foreign") # foreign 패키지 설치
library(foreign) # SPSS 파일 로드
library(dplyr) # 전처리
library(ggplot2) # 시각화
library(readxl) # 엑셀 파일 불러오기
# 데이터 불러오기
raw_welfare <- read.spss(file = "Koweps_hpc10_2015_beta1.sav",
to.data.frame = T)
# 복사본 만들기
welfare <- raw_welfare
head(welfare)
tail(welfare)
View(welfare)
dim(welfare)
str(welfare)
summary(welfare)
welfare <- rename(welfare,
sex = h10_g3, # 성별
birth = h10_g4, # 태어난 연도
marriage = h10_g10, # 혼인 상태
religion = h10_g11, # 종교
income = p1002_8aq1, # 월급
code_job = h10_eco9, # 직종 코드
code_region = h10_reg7) # 지역 코드
#### 09-2 ####
## -------------------------------------------------------------------- ##
class(welfare$sex)
table(welfare$sex)
# 이상치 확인
table(welfare$sex)
# 이상치 결측 처리
welfare$sex <- ifelse(welfare$sex == 9, NA, welfare$sex)
# 결측치 확인
table(is.na(welfare$sex))
# 성별 항목 이름 부여
welfare$sex <- ifelse(welfare$sex == 1, "male", "female")
table(welfare$sex)
qplot(welfare$sex)
## -------------------------------------------------------------------- ##
class(welfare$income)
summary(welfare$income)
qplot(welfare$income)
qplot(welfare$income) + xlim(0, 1000)
# 이상치 확인
summary(welfare$income)
# 이상치 결측 처리
welfare$income <- ifelse(welfare$income %in% c(0, 9999), NA, welfare$income)
# 결측치 확인
table(is.na(welfare$income))
## -------------------------------------------------------------------- ##
sex_income <- welfare %>%
filter(!is.na(income)) %>%
group_by(sex) %>%
summarise(mean_income = mean(income))
sex_income
ggplot(data = sex_income, aes(x = sex, y = mean_income)) + geom_col()
#### 09-3 ####
## -------------------------------------------------------------------- ##
class(welfare$birth)
summary(welfare$birth)
qplot(welfare$birth)
# 이상치 확인
summary(welfare$birth)
# 결측치 확인
table(is.na(welfare$birth))
# 이상치 결측 처리
welfare$birth <- ifelse(welfare$birth == 9999, NA, welfare$birth)
table(is.na(welfare$birth))
## -------------------------------------------------------------------- ##
welfare$age <- 2015 - welfare$birth + 1
summary(welfare$age)
qplot(welfare$age)
## -------------------------------------------------------------------- ##
age_income <- welfare %>%
filter(!is.na(income)) %>%
group_by(age) %>%
summarise(mean_income = mean(income))
head(age_income)
ggplot(data = age_income, aes(x = age, y = mean_income)) + geom_line()
#### 09-4 ####
## -------------------------------------------------------------------- ##
welfare <- welfare %>%
mutate(ageg = ifelse(age < 30, "young",
ifelse(age <= 59, "middle", "old")))
table(welfare$ageg)
qplot(welfare$ageg)
## -------------------------------------------------------------------- ##
ageg_income <- welfare %>%
filter(!is.na(income)) %>%
group_by(ageg) %>%
summarise(mean_income = mean(income))
ageg_income
ggplot(data = ageg_income, aes(x = ageg, y = mean_income)) + geom_col()
## -------------------------------------------------------------------- ##
ggplot(data = ageg_income, aes(x = ageg, y = mean_income)) +
geom_col() +
scale_x_discrete(limits = c("young", "middle", "old"))
#### 09-5 ####
## -------------------------------------------------------------------- ##
sex_income <- welfare %>%
filter(!is.na(income)) %>%
group_by(ageg, sex) %>%
summarise(mean_income = mean(income))
sex_income
ggplot(data = sex_income, aes(x = ageg, y = mean_income, fill = sex)) +
geom_col() +
scale_x_discrete(limits = c("young", "middle", "old"))
ggplot(data = sex_income, aes(x = ageg, y = mean_income, fill = sex)) +
geom_col(position = "dodge") +
scale_x_discrete(limits = c("young", "middle", "old"))
## -------------------------------------------------------------------- ##
# 성별 연령별 월급 평균표 만들기
sex_age <- welfare %>%
filter(!is.na(income)) %>%
group_by(age, sex) %>%
summarise(mean_income = mean(income))
head(sex_age)
# 그래프 만들기
ggplot(data = sex_age, aes(x = age, y = mean_income, col = sex)) + geom_line()
#### 09-6 ####
## -------------------------------------------------------------------- ##
class(welfare$code_job)
table(welfare$code_job)
library(readxl)
list_job <- read_excel("Koweps_Codebook.xlsx", col_names = T, sheet = 2)
head(list_job)
dim(list_job)
welfare <- left_join(welfare, list_job, id = "code_job")
welfare %>%
filter(!is.na(code_job)) %>%
select(code_job, job) %>%
head(10)
## -------------------------------------------------------------------- ##
job_income <- welfare %>%
filter(!is.na(job) & !is.na(income)) %>%
group_by(job) %>%
summarise(mean_income = mean(income))
head(job_income)
top10 <- job_income %>%
arrange(desc(mean_income)) %>%
head(10)
top10
ggplot(data = top10, aes(x = reorder(job, mean_income), y = mean_income)) +
geom_col() +
coord_flip()
# 하위 10위 추출
bottom10 <- job_income %>%
arrange(mean_income) %>%
head(10)
bottom10
# 그래프 만들기
ggplot(data = bottom10, aes(x = reorder(job, -mean_income),
y = mean_income)) +
geom_col() +
coord_flip() +
ylim(0, 850)
#### 09-7 ####
## -------------------------------------------------------------------- ##
# 남성 직업 빈도 상위 10개 추출
job_male <- welfare %>%
filter(!is.na(job) & sex == "male") %>%
group_by(job) %>%
summarise(n = n()) %>%
arrange(desc(n)) %>%
head(10)
job_male
# 여성 직업 빈도 상위 10개 추출
job_female <- welfare %>%
filter(!is.na(job) & sex == "female") %>%
group_by(job) %>%
summarise(n = n()) %>%
arrange(desc(n)) %>%
head(10)
job_female
# 남성 직업 빈도 상위 10개 직업
ggplot(data = job_male, aes(x = reorder(job, n), y = n)) +
geom_col() +
coord_flip()
# 여성 직업 빈도 상위 10개 직업
ggplot(data = job_female, aes(x = reorder(job, n), y = n)) +
geom_col() +
coord_flip()
#### 09-8 ####
## -------------------------------------------------------------------- ##
class(welfare$religion)
table(welfare$religion)
# 종교 유무 이름 부여
welfare$religion <- ifelse(welfare$religion == 1, "yes", "no")
table(welfare$religion)
qplot(welfare$religion)
## -------------------------------------------------------------------- ##
class(welfare$marriage)
table(welfare$marriage)
# 이혼 여부 변수 만들기
welfare$group_marriage <- ifelse(welfare$marriage == 1, "marriage",
ifelse(welfare$marriage == 3, "divorce", NA))
table(welfare$group_marriage)
table(is.na(welfare$group_marriage))
qplot(welfare$group_marriage)
## -------------------------------------------------------------------- ##
religion_marriage <- welfare %>%
filter(!is.na(group_marriage)) %>%
group_by(religion, group_marriage) %>%
summarise(n = n()) %>%
mutate(tot_group = sum(n)) %>%
mutate(pct = round(n/tot_group*100, 1))
religion_marriage
religion_marriage <- welfare %>%
filter(!is.na(group_marriage)) %>%
count(religion, group_marriage) %>%
group_by(religion) %>%
mutate(pct = round(n/sum(n)*100, 1))
# 이혼 추출
divorce <- religion_marriage %>%
filter(group_marriage == "divorce") %>%
select(religion, pct)
divorce
ggplot(data = divorce, aes(x = religion, y = pct)) + geom_col()
## -------------------------------------------------------------------- ##
ageg_marriage <- welfare %>%
filter(!is.na(group_marriage)) %>%
group_by(ageg, group_marriage) %>%
summarise(n = n()) %>%
mutate(tot_group = sum(n)) %>%
mutate(pct = round(n/tot_group*100, 1))
ageg_marriage
ageg_marriage <- welfare %>%
filter(!is.na(group_marriage)) %>%
count(ageg, group_marriage) %>%
group_by(ageg) %>%
mutate(pct = round(n/sum(n)*100, 1))
# 초년 제외, 이혼 추출
ageg_divorce <- ageg_marriage %>%
filter(ageg != "young" & group_marriage == "divorce") %>%
select(ageg, pct)
ageg_divorce
# 그래프 만들기
ggplot(data = ageg_divorce, aes(x = ageg, y = pct)) + geom_col()
## -------------------------------------------------------------------- ##
# 연령대, 종교유무, 결혼상태별 비율표 만들기
ageg_religion_marriage <- welfare %>%
filter(!is.na(group_marriage) & ageg != "young") %>%
group_by(ageg, religion, group_marriage) %>%
summarise(n = n()) %>%
mutate(tot_group = sum(n)) %>%
mutate(pct = round(n/tot_group*100, 1))
ageg_religion_marriage
ageg_religion_marriage <- welfare %>%
filter(!is.na(group_marriage) & ageg != "young") %>%
count(ageg, religion, group_marriage) %>%
group_by(ageg, religion) %>%
mutate(pct = round(n/sum(n)*100, 1))
# 연령대 및 종교 유무별 이혼율 표 만들기
df_divorce <- ageg_religion_marriage %>%
filter(group_marriage == "divorce") %>%
select(ageg, religion, pct)
df_divorce
ggplot(data = df_divorce, aes(x = ageg, y = pct, fill = religion )) +
geom_col(position = "dodge")
#### 09-9 ####
## -------------------------------------------------------------------- ##
class(welfare$code_region)
table(welfare$code_region)
# 지역 코드 목록 만들기
list_region <- data.frame(code_region = c(1:7),
region = c("서울",
"수도권(인천/경기)",
"부산/경남/울산",
"대구/경북",
"대전/충남",
"강원/충북",
"광주/전남/전북/제주도"))
list_region
# 지역명 변수 추가
welfare <- left_join(welfare, list_region, id = "code_region")
welfare %>%
select(code_region, region) %>%
head
## -------------------------------------------------------------------- ##
region_ageg <- welfare %>%
group_by(region, ageg) %>%
summarise(n = n()) %>%
mutate(tot_group = sum(n)) %>%
mutate(pct = round(n/tot_group*100, 2))
head(region_ageg)
region_ageg <- welfare %>%
count(region, ageg) %>%
group_by(region) %>%
mutate(pct = round(n/sum(n)*100, 2))
ggplot(data = region_ageg, aes(x = region, y = pct, fill = ageg)) +
geom_col() +
coord_flip()
## -------------------------------------------------------------------- ##
# 노년층 비율 오름차순 정렬
list_order_old <- region_ageg %>%
filter(ageg == "old") %>%
arrange(pct)
list_order_old
# 지역명 순서 변수 만들기
order <- list_order_old$region
order
ggplot(data = region_ageg, aes(x = region, y = pct, fill = ageg)) +
geom_col() +
coord_flip() +
scale_x_discrete(limits = order)
class(region_ageg$ageg)
levels(region_ageg$ageg)
region_ageg$ageg <- factor(region_ageg$ageg,
level = c("old", "middle", "young"))
class(region_ageg$ageg)
levels(region_ageg$ageg)
ggplot(data = region_ageg, aes(x = region, y = pct, fill = ageg)) +
geom_col() +
coord_flip() +
scale_x_discrete(limits = order)
|
library(ape)
library(phyloch)
source("dstats.R")
source("fastConc.R")
setwd("/1_Aligns/1_raw")
read.csv("Seqs_metadata.csv", stringsAsFactors = F) -> meta
unique(meta[,c(2,3,7)]) -> clades
in.dir <- "/1_Aligns/2_singles_aligned"
out.dir <- "/1_Aligns/3_singles_filtered"
###################################################
### Read singles
###################################################
setwd(in.dir)
list.files(pattern=".fas") -> files
sub(".fas", "", files) -> labels
sapply(files, FUN=function(x)(read.dna(x, "fasta"))) -> aligns
names(aligns) <- labels
setwd(out.dir)
###################################################
### Smart concatenation
###################################################
### Merge samples when possible; exclude duplicates
###################################################
### Initial Markers Table
data.frame(table(unlist(lapply(aligns, rownames)))) -> all.labels
seqs.tab <- data.frame(spp=unlist(lapply(strsplit(as.character(all.labels[,1]), "-"), "[", 1)),
terminal=as.character(all.labels[,1]), markers=all.labels[,2])
seqs.tab[order(seqs.tab[,3], decreasing=T),] -> seqs.tab
as.character(unique(seqs.tab$spp)) -> spp
as.character(seqs.tab$terminal) -> terminals
markers.tab <- matrix(nrow=nrow(seqs.tab), ncol=length(aligns))
rownames(markers.tab) <- seqs.tab$terminal
colnames(markers.tab) <- names(aligns)
markers.tab[] <- 0
for (i in 1:nrow(markers.tab)) {
terminals[i] -> t0
unlist(lapply(aligns, FUN=function(x)(match(t0, rownames(x))))) -> x
x[x > 0] <- 1
which(is.na(x)) -> miss0
if (length(miss0) > 0) {
x[miss0] <- 0
}
x -> markers.tab[i,]
}
cbind(seqs.tab, markers.tab) -> markers.tab
clades$Clade[match(markers.tab$terminal, clades$Terminal)] -> markers.tab$Clade
write.csv(markers.tab, "0 Markers_table_initial.csv", row.names=F)
### Merge then exclude duplicates
markers.tab$merged <- ""
markers.tab$excluded <- ""
for (i in 1:length(spp)) {
spp[i] -> sp0
which(markers.tab$spp == sp0) -> x
markers.tab[x,] -> tab0
tab0[,-c(1:3)] -> tab1
if (nrow(tab0) > 1) {
merged <- vector()
for (k in 2:(nrow(tab0))) {
which(tab1[1,] == 0) -> miss0
if (length(miss0) > 0) {
miss0[which(tab1[k,miss0] == 1)] -> m0
if (length(m0) > 0) {
tab1[1,m0] <- 1
c(merged,as.character(tab0[k,2])) -> merged
for (j in 1:length(m0)) {
aligns[[m0[j]]] -> a0
rownames(a0)[match(as.character(tab0[k,2]), rownames(a0))] <- as.character(tab0[1,2])
a0 -> aligns[[m0[j]]]
}
}
}
}
if (length(merged) > 0) {
paste(merged, collapse=", ") -> markers.tab$merged[x[1]]
}
paste(as.character(markers.tab$terminal[x[2:length(x)]]), collapse=", ") -> markers.tab$excluded[x[1]]
markers.tab[-x[2:length(x)],] -> markers.tab
as.character(tab0[2:nrow(tab0),2]) -> duplis
for (w in 1:length(aligns)) {
aligns[[w]] -> a0
which(is.na(match(rownames(a0), duplis)) == F) -> rem
if (length(rem) > 0) {
a0[-rem,] -> a0
}
a0 -> aligns[[w]]
}
}
}
nrow(markers.tab) == length(unique(markers.tab$spp))
### Final markers table
markers.tab[order(markers.tab$spp),] -> markers.tab
markers.tab$merged -> merged
markers.tab$excluded -> excluded
markers.tab$spp -> spp.temp
data.frame(table(unlist(lapply(aligns, rownames)))) -> all.labels
seqs.tab <- data.frame(spp=unlist(lapply(strsplit(as.character(all.labels[,1]), "-"), "[", 1)),
terminal=as.character(all.labels[,1]), markers=all.labels[,2])
seqs.tab[order(seqs.tab$spp),] -> seqs.tab
as.character(unique(seqs.tab$spp)) -> spp
spp == spp.temp
as.character(seqs.tab$terminal) -> terminals
markers.tab <- matrix(nrow=nrow(seqs.tab), ncol=length(aligns))
rownames(markers.tab) <- seqs.tab$terminal
colnames(markers.tab) <- names(aligns)
markers.tab[] <- 0
for (i in 1:nrow(markers.tab)) {
terminals[i] -> t0
unlist(lapply(aligns, FUN=function(x)(match(t0, rownames(x))))) -> x
x[x > 0] <- 1
which(is.na(x)) -> miss0
if (length(miss0) > 0) {
x[miss0] <- 0
}
x -> markers.tab[i,]
}
clades$Clade[match(seqs.tab$terminal, clades$Terminal)] -> Clade
cbind(Clade,seqs.tab) -> seqs.tab
cbind(seqs.tab, markers.tab, merged, excluded) -> markers.tab
write.csv(markers.tab, "1 Markers_table_final.csv", row.names=F)
### Genbank table
genbank.tab <- markers.tab[,-c(3,18,19)]
genbank.tab[,4:ncol(genbank.tab)] <- NA
for (i in 1:nrow(genbank.tab)) {
as.character(markers.tab$terminal[i]) -> p0
as.character(markers.tab$merged[i]) -> p1
unlist(strsplit(p1, ", ")) -> p1
meta[which(meta$Terminal == p0),] -> m0
genbank.tab[i,match(m0$Marker, colnames(genbank.tab))] <- m0$Accession
if (is.na(match("", p1))) {
for (k in 1:length(p1)) {
colnames(genbank.tab)[which(is.na(genbank.tab[i,]))] -> keep
meta[which(meta$Terminal == p1[k]),] -> m0
m0[which(is.na(match(m0$Marker, keep)) == F),] -> m0
if (nrow(m0) > 0) {
genbank.tab[i,match(m0$Marker, colnames(genbank.tab))] <- m0$Accession
}
}
}
}
sub("_", " ", genbank.tab$spp) -> genbank.tab$spp
as.matrix(genbank.tab) -> genbank.tab
genbank.tab[is.na(genbank.tab[])] <- "-"
data.frame(genbank.tab) -> genbank.tab
write.csv(genbank.tab, "1 Markers_table_final_genbank.csv", row.names=F)
### Export new alignments
paste(labels, ".fas", sep="") -> files
for (i in 1:length(aligns)) {
aligns[[i]] -> a0
delete.empty.cells(a0) -> a0
del.gaps(a0) -> a0
write.dna(a0, file="temp.fas", "fasta")
readDNAStringSet("temp.fas", format="fasta") -> a0
unlink("temp.fas")
AlignSeqs(a0, iterations = 50, refinements = 50, processors=6) -> a0
AdjustAlignment(a0, processors=6) -> a0
writeXStringSet(a0, filepath = files[i], format="fasta")
read.dna(files[i], "fasta") -> a0
fillEndsWithN(a0) -> a0
delete.empty.cells(a0) -> a0
unlist(lapply(strsplit(rownames(a0), "-"), "[", 1)) -> rownames(a0)
write.dna(a0, file=files[i], "fasta")
write.phy(a0, file=paste(labels[i], ".phy", sep=""))
a0 -> aligns[[i]]
}
fastConc(aligns, fill.with.gaps = T, map = T) -> conc
conc$map -> map
conc$align -> conc
rownames(conc) -> spp
spp[which(duplicated(spp))]
write.phy(conc, "concatenated.phy")
write.csv(map, "concatenated_map.csv")
jpeg("conc.jpg")
image(conc)
dev.off()
### Stats
aligns$concatenated <- conc
lapply(aligns, dstats, missing.char = "n") -> stats.out
do.call(rbind, stats.out) -> stats.out
unlist(lapply(aligns, nrow)) -> n
stats.out[,c(1:5)] -> stats.out
data.frame(Terminals=n, stats.out) -> stats.out
colnames(stats.out)[2] <- "Aligned bp"
write.csv(stats.out, "DNA_stats.csv")
|
/1 Concatenate.R
|
no_license
|
mreginato/Melastomataceae_dispersal_mode_scripts
|
R
| false
| false
| 6,891
|
r
|
library(ape)
library(phyloch)
source("dstats.R")
source("fastConc.R")
setwd("/1_Aligns/1_raw")
read.csv("Seqs_metadata.csv", stringsAsFactors = F) -> meta
unique(meta[,c(2,3,7)]) -> clades
in.dir <- "/1_Aligns/2_singles_aligned"
out.dir <- "/1_Aligns/3_singles_filtered"
###################################################
### Read singles
###################################################
setwd(in.dir)
list.files(pattern=".fas") -> files
sub(".fas", "", files) -> labels
sapply(files, FUN=function(x)(read.dna(x, "fasta"))) -> aligns
names(aligns) <- labels
setwd(out.dir)
###################################################
### Smart concatenation
###################################################
### Merge samples when possible; exclude duplicates
###################################################
### Initial Markers Table
data.frame(table(unlist(lapply(aligns, rownames)))) -> all.labels
seqs.tab <- data.frame(spp=unlist(lapply(strsplit(as.character(all.labels[,1]), "-"), "[", 1)),
terminal=as.character(all.labels[,1]), markers=all.labels[,2])
seqs.tab[order(seqs.tab[,3], decreasing=T),] -> seqs.tab
as.character(unique(seqs.tab$spp)) -> spp
as.character(seqs.tab$terminal) -> terminals
markers.tab <- matrix(nrow=nrow(seqs.tab), ncol=length(aligns))
rownames(markers.tab) <- seqs.tab$terminal
colnames(markers.tab) <- names(aligns)
markers.tab[] <- 0
for (i in 1:nrow(markers.tab)) {
terminals[i] -> t0
unlist(lapply(aligns, FUN=function(x)(match(t0, rownames(x))))) -> x
x[x > 0] <- 1
which(is.na(x)) -> miss0
if (length(miss0) > 0) {
x[miss0] <- 0
}
x -> markers.tab[i,]
}
cbind(seqs.tab, markers.tab) -> markers.tab
clades$Clade[match(markers.tab$terminal, clades$Terminal)] -> markers.tab$Clade
write.csv(markers.tab, "0 Markers_table_initial.csv", row.names=F)
### Merge then exclude duplicates
markers.tab$merged <- ""
markers.tab$excluded <- ""
for (i in 1:length(spp)) {
spp[i] -> sp0
which(markers.tab$spp == sp0) -> x
markers.tab[x,] -> tab0
tab0[,-c(1:3)] -> tab1
if (nrow(tab0) > 1) {
merged <- vector()
for (k in 2:(nrow(tab0))) {
which(tab1[1,] == 0) -> miss0
if (length(miss0) > 0) {
miss0[which(tab1[k,miss0] == 1)] -> m0
if (length(m0) > 0) {
tab1[1,m0] <- 1
c(merged,as.character(tab0[k,2])) -> merged
for (j in 1:length(m0)) {
aligns[[m0[j]]] -> a0
rownames(a0)[match(as.character(tab0[k,2]), rownames(a0))] <- as.character(tab0[1,2])
a0 -> aligns[[m0[j]]]
}
}
}
}
if (length(merged) > 0) {
paste(merged, collapse=", ") -> markers.tab$merged[x[1]]
}
paste(as.character(markers.tab$terminal[x[2:length(x)]]), collapse=", ") -> markers.tab$excluded[x[1]]
markers.tab[-x[2:length(x)],] -> markers.tab
as.character(tab0[2:nrow(tab0),2]) -> duplis
for (w in 1:length(aligns)) {
aligns[[w]] -> a0
which(is.na(match(rownames(a0), duplis)) == F) -> rem
if (length(rem) > 0) {
a0[-rem,] -> a0
}
a0 -> aligns[[w]]
}
}
}
nrow(markers.tab) == length(unique(markers.tab$spp))
### Final markers table
markers.tab[order(markers.tab$spp),] -> markers.tab
markers.tab$merged -> merged
markers.tab$excluded -> excluded
markers.tab$spp -> spp.temp
data.frame(table(unlist(lapply(aligns, rownames)))) -> all.labels
seqs.tab <- data.frame(spp=unlist(lapply(strsplit(as.character(all.labels[,1]), "-"), "[", 1)),
terminal=as.character(all.labels[,1]), markers=all.labels[,2])
seqs.tab[order(seqs.tab$spp),] -> seqs.tab
as.character(unique(seqs.tab$spp)) -> spp
spp == spp.temp
as.character(seqs.tab$terminal) -> terminals
markers.tab <- matrix(nrow=nrow(seqs.tab), ncol=length(aligns))
rownames(markers.tab) <- seqs.tab$terminal
colnames(markers.tab) <- names(aligns)
markers.tab[] <- 0
for (i in 1:nrow(markers.tab)) {
terminals[i] -> t0
unlist(lapply(aligns, FUN=function(x)(match(t0, rownames(x))))) -> x
x[x > 0] <- 1
which(is.na(x)) -> miss0
if (length(miss0) > 0) {
x[miss0] <- 0
}
x -> markers.tab[i,]
}
clades$Clade[match(seqs.tab$terminal, clades$Terminal)] -> Clade
cbind(Clade,seqs.tab) -> seqs.tab
cbind(seqs.tab, markers.tab, merged, excluded) -> markers.tab
write.csv(markers.tab, "1 Markers_table_final.csv", row.names=F)
### Genbank table
genbank.tab <- markers.tab[,-c(3,18,19)]
genbank.tab[,4:ncol(genbank.tab)] <- NA
for (i in 1:nrow(genbank.tab)) {
as.character(markers.tab$terminal[i]) -> p0
as.character(markers.tab$merged[i]) -> p1
unlist(strsplit(p1, ", ")) -> p1
meta[which(meta$Terminal == p0),] -> m0
genbank.tab[i,match(m0$Marker, colnames(genbank.tab))] <- m0$Accession
if (is.na(match("", p1))) {
for (k in 1:length(p1)) {
colnames(genbank.tab)[which(is.na(genbank.tab[i,]))] -> keep
meta[which(meta$Terminal == p1[k]),] -> m0
m0[which(is.na(match(m0$Marker, keep)) == F),] -> m0
if (nrow(m0) > 0) {
genbank.tab[i,match(m0$Marker, colnames(genbank.tab))] <- m0$Accession
}
}
}
}
sub("_", " ", genbank.tab$spp) -> genbank.tab$spp
as.matrix(genbank.tab) -> genbank.tab
genbank.tab[is.na(genbank.tab[])] <- "-"
data.frame(genbank.tab) -> genbank.tab
write.csv(genbank.tab, "1 Markers_table_final_genbank.csv", row.names=F)
### Export new alignments
paste(labels, ".fas", sep="") -> files
for (i in 1:length(aligns)) {
aligns[[i]] -> a0
delete.empty.cells(a0) -> a0
del.gaps(a0) -> a0
write.dna(a0, file="temp.fas", "fasta")
readDNAStringSet("temp.fas", format="fasta") -> a0
unlink("temp.fas")
AlignSeqs(a0, iterations = 50, refinements = 50, processors=6) -> a0
AdjustAlignment(a0, processors=6) -> a0
writeXStringSet(a0, filepath = files[i], format="fasta")
read.dna(files[i], "fasta") -> a0
fillEndsWithN(a0) -> a0
delete.empty.cells(a0) -> a0
unlist(lapply(strsplit(rownames(a0), "-"), "[", 1)) -> rownames(a0)
write.dna(a0, file=files[i], "fasta")
write.phy(a0, file=paste(labels[i], ".phy", sep=""))
a0 -> aligns[[i]]
}
fastConc(aligns, fill.with.gaps = T, map = T) -> conc
conc$map -> map
conc$align -> conc
rownames(conc) -> spp
spp[which(duplicated(spp))]
write.phy(conc, "concatenated.phy")
write.csv(map, "concatenated_map.csv")
jpeg("conc.jpg")
image(conc)
dev.off()
### Stats
aligns$concatenated <- conc
lapply(aligns, dstats, missing.char = "n") -> stats.out
do.call(rbind, stats.out) -> stats.out
unlist(lapply(aligns, nrow)) -> n
stats.out[,c(1:5)] -> stats.out
data.frame(Terminals=n, stats.out) -> stats.out
colnames(stats.out)[2] <- "Aligned bp"
write.csv(stats.out, "DNA_stats.csv")
|
# Load packages
needed_packages = c("dplyr", "data.table", "sf", "tibble")
for (package in needed_packages) {
if (!require(package, character.only=TRUE))
{install.packages(package, character.only=TRUE)}
library(package, character.only=TRUE)
}
rm("needed_packages", "package")
# Set working directory to the one where the file is located
# This works when run directly
# setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
# This works when sourced
setwd(dirname(sys.frame(1)$ofile))
# Load shapefiles
berlin = sf::st_read(file.path(getwd(), "Data",
"Berlin-Ortsteile-polygon.shp", fsep="/"))
# Object with the neightbourhoods (and respective district)
berlin_neighbourhood_sf = berlin %>%
dplyr::rename(id = Name,
group = BEZNAME) %>%
dplyr::select(id, group, geometry) %>%
dplyr::arrange(group)
# Buckow is composed of two separate parts, so we need to join them
berlin_neighbourhood_singlebuckow_sf = berlin_neighbourhood_sf %>%
dplyr::group_by(id, group) %>%
dplyr::summarize(do_union = TRUE)
# Object with the districts
berlin_district_sf = berlin_neighbourhood_sf %>%
dplyr::group_by(group) %>%
dplyr::summarize(do_union = TRUE) %>%
dplyr::mutate(id = group)
# Create dataframes with the names for plotting
# Neighbourhoods
berlin_neighbourhoods_names = berlin_neighbourhood_sf %>%
sf::st_centroid() %>%
sf::st_coordinates() %>%
base::as.data.frame() %>%
dplyr::rename(long = X,
lat = Y) %>%
dplyr::mutate(id = berlin_neighbourhood_sf$id,
group = berlin_neighbourhood_sf$group,
name = gsub("-", "-<br>", berlin_neighbourhood_sf$id))
# Districts
berlin_districts_names = berlin_district_sf %>%
sf::st_centroid() %>%
sf::st_coordinates() %>%
base::as.data.frame() %>%
dplyr::rename(long = X,
lat = Y) %>%
dplyr::mutate(id = berlin_district_sf$id,
group = berlin_district_sf$group,
name = gsub("-", "-<br>", berlin_district_sf$id))
# Remove not needed data
rm("berlin")
|
/SPL_Berlin_Districts_Neighbourhoods/berlin_districts_neighbourhoods.R
|
no_license
|
silvia-ventoruzzo/SPL-WISE-2018
|
R
| false
| false
| 2,157
|
r
|
# Load packages
needed_packages = c("dplyr", "data.table", "sf", "tibble")
for (package in needed_packages) {
if (!require(package, character.only=TRUE))
{install.packages(package, character.only=TRUE)}
library(package, character.only=TRUE)
}
rm("needed_packages", "package")
# Set working directory to the one where the file is located
# This works when run directly
# setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
# This works when sourced
setwd(dirname(sys.frame(1)$ofile))
# Load shapefiles
berlin = sf::st_read(file.path(getwd(), "Data",
"Berlin-Ortsteile-polygon.shp", fsep="/"))
# Object with the neightbourhoods (and respective district)
berlin_neighbourhood_sf = berlin %>%
dplyr::rename(id = Name,
group = BEZNAME) %>%
dplyr::select(id, group, geometry) %>%
dplyr::arrange(group)
# Buckow is composed of two separate parts, so we need to join them
berlin_neighbourhood_singlebuckow_sf = berlin_neighbourhood_sf %>%
dplyr::group_by(id, group) %>%
dplyr::summarize(do_union = TRUE)
# Object with the districts
berlin_district_sf = berlin_neighbourhood_sf %>%
dplyr::group_by(group) %>%
dplyr::summarize(do_union = TRUE) %>%
dplyr::mutate(id = group)
# Create dataframes with the names for plotting
# Neighbourhoods
berlin_neighbourhoods_names = berlin_neighbourhood_sf %>%
sf::st_centroid() %>%
sf::st_coordinates() %>%
base::as.data.frame() %>%
dplyr::rename(long = X,
lat = Y) %>%
dplyr::mutate(id = berlin_neighbourhood_sf$id,
group = berlin_neighbourhood_sf$group,
name = gsub("-", "-<br>", berlin_neighbourhood_sf$id))
# Districts
berlin_districts_names = berlin_district_sf %>%
sf::st_centroid() %>%
sf::st_coordinates() %>%
base::as.data.frame() %>%
dplyr::rename(long = X,
lat = Y) %>%
dplyr::mutate(id = berlin_district_sf$id,
group = berlin_district_sf$group,
name = gsub("-", "-<br>", berlin_district_sf$id))
# Remove not needed data
rm("berlin")
|
# Source: PLS Path Modeling with R
# by Gaston Sanchez
# www.gastonsanchez.com
# Chapter 7: Moderating Effects (조절변수)
rm(list=ls())
setwd("~/R/Structural Equation Modeling")
# Case Study: Simplified Customer Satisfaction
# load package 'plspm'
library("plspm")
# Step 1: plspm 라이브러리를 열고 satisfaction 데이터를 가져오기
# get data satisfaction
data(satisfaction)
# duplicate satisfaction as satisfaction1
satisfaction1 = satisfaction
# how many columns in satisfaction1?
ncol(satisfaction1)
# create product indicator terms between Image and Satisfaction
satisfaction1$inter1 = satisfaction$imag1 * satisfaction$sat1
satisfaction1$inter2 = satisfaction$imag1 * satisfaction$sat2
satisfaction1$inter3 = satisfaction$imag1 * satisfaction$sat3
satisfaction1$inter4 = satisfaction$imag2 * satisfaction$sat1
satisfaction1$inter5 = satisfaction$imag2 * satisfaction$sat2
satisfaction1$inter6 = satisfaction$imag2 * satisfaction$sat3
satisfaction1$inter7 = satisfaction$imag3 * satisfaction$sat1
satisfaction1$inter8 = satisfaction$imag3 * satisfaction$sat2
satisfaction1$inter9 = satisfaction$imag3 * satisfaction$sat3
# check again the number of columns in satisfaction1
ncol(satisfaction1)
# Step 2: PLS inner model 을 만들고, outer model list 를 만들기 위한 block 을 지정하고, plspm 함수를
# 사용하여 경로분석을 실시함.
# create path matrix
r1 = c(0, 0, 0, 0)
r2 = c(0, 0, 0, 0)
r3 = c(0, 0, 0, 0)
r4 = c(1, 1, 1, 0)
prod_path = rbind(r1, r2, r3, r4)
rownames(prod_path) = c("Image", "Inter", "Satisfaction", "Loyalty")
colnames(prod_path) = c("Image", "Inter", "Satisfaction", "Loyalty")
# define outer model list
prod_blocks = list(1:3, 29:37, 20:22, 24:26)
# define reflective indicators
prod_modes = rep("A", 4)
# run plspm analysis with bootstrap validation
prod_pls = plspm(satisfaction1, prod_path, prod_blocks, modes = prod_modes,
boot.val = TRUE, br = 200)
# Step 3: plspm 을 실행한 후 경로계수 (path coefficient)를 확인함.
# check path coefficients
prod_pls$path_coefs
# plot inner model
plot(prod_pls)
# 경로계수의 유의성을 판단하기 위해 bootstrapping 을 수행함.
# check bootstrapped path coefficients
prod_pls$boot$paths
# Even though Inter has a negative effect on Loyalty, its associated bootstrap confidence
# interval contains the zero, having a non-significant effect. This means that the moderating
# effect of Inter on the relation between Satisfaction and Loyalty is not significant.
|
/PLS04.R
|
no_license
|
Joshuariver/SEM
|
R
| false
| false
| 2,602
|
r
|
# Source: PLS Path Modeling with R
# by Gaston Sanchez
# www.gastonsanchez.com
# Chapter 7: Moderating Effects (조절변수)
rm(list=ls())
setwd("~/R/Structural Equation Modeling")
# Case Study: Simplified Customer Satisfaction
# load package 'plspm'
library("plspm")
# Step 1: plspm 라이브러리를 열고 satisfaction 데이터를 가져오기
# get data satisfaction
data(satisfaction)
# duplicate satisfaction as satisfaction1
satisfaction1 = satisfaction
# how many columns in satisfaction1?
ncol(satisfaction1)
# create product indicator terms between Image and Satisfaction
satisfaction1$inter1 = satisfaction$imag1 * satisfaction$sat1
satisfaction1$inter2 = satisfaction$imag1 * satisfaction$sat2
satisfaction1$inter3 = satisfaction$imag1 * satisfaction$sat3
satisfaction1$inter4 = satisfaction$imag2 * satisfaction$sat1
satisfaction1$inter5 = satisfaction$imag2 * satisfaction$sat2
satisfaction1$inter6 = satisfaction$imag2 * satisfaction$sat3
satisfaction1$inter7 = satisfaction$imag3 * satisfaction$sat1
satisfaction1$inter8 = satisfaction$imag3 * satisfaction$sat2
satisfaction1$inter9 = satisfaction$imag3 * satisfaction$sat3
# check again the number of columns in satisfaction1
ncol(satisfaction1)
# Step 2: PLS inner model 을 만들고, outer model list 를 만들기 위한 block 을 지정하고, plspm 함수를
# 사용하여 경로분석을 실시함.
# create path matrix
r1 = c(0, 0, 0, 0)
r2 = c(0, 0, 0, 0)
r3 = c(0, 0, 0, 0)
r4 = c(1, 1, 1, 0)
prod_path = rbind(r1, r2, r3, r4)
rownames(prod_path) = c("Image", "Inter", "Satisfaction", "Loyalty")
colnames(prod_path) = c("Image", "Inter", "Satisfaction", "Loyalty")
# define outer model list
prod_blocks = list(1:3, 29:37, 20:22, 24:26)
# define reflective indicators
prod_modes = rep("A", 4)
# run plspm analysis with bootstrap validation
prod_pls = plspm(satisfaction1, prod_path, prod_blocks, modes = prod_modes,
boot.val = TRUE, br = 200)
# Step 3: plspm 을 실행한 후 경로계수 (path coefficient)를 확인함.
# check path coefficients
prod_pls$path_coefs
# plot inner model
plot(prod_pls)
# 경로계수의 유의성을 판단하기 위해 bootstrapping 을 수행함.
# check bootstrapped path coefficients
prod_pls$boot$paths
# Even though Inter has a negative effect on Loyalty, its associated bootstrap confidence
# interval contains the zero, having a non-significant effect. This means that the moderating
# effect of Inter on the relation between Satisfaction and Loyalty is not significant.
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_formula.R
\name{get_formula}
\alias{get_formula}
\title{Get formula of models.}
\usage{
get_formula(x, ...)
}
\arguments{
\item{x}{Object.}
\item{...}{Arguments passed to or from other methods.}
}
\description{
Get formula of models. Implemented for:
\itemize{
\item{analyze.merModLmerTest}
\item{analyze.glmerMod}
\item{analyze.lm}
\item{analyze.glm}
\item{analyze.stanreg}
}
}
\examples{
library(psycho)
library(lme4)
fit <- lme4::glmer(vs ~ wt + (1|gear), data=mtcars, family="binomial")
fit <- lm(hp ~ wt, data=mtcars)
get_formula(fit)
}
\author{
\href{https://dominiquemakowski.github.io/}{Dominique Makowski}
}
|
/man/get_formula.Rd
|
permissive
|
HugoNjb/psycho.R
|
R
| false
| true
| 710
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_formula.R
\name{get_formula}
\alias{get_formula}
\title{Get formula of models.}
\usage{
get_formula(x, ...)
}
\arguments{
\item{x}{Object.}
\item{...}{Arguments passed to or from other methods.}
}
\description{
Get formula of models. Implemented for:
\itemize{
\item{analyze.merModLmerTest}
\item{analyze.glmerMod}
\item{analyze.lm}
\item{analyze.glm}
\item{analyze.stanreg}
}
}
\examples{
library(psycho)
library(lme4)
fit <- lme4::glmer(vs ~ wt + (1|gear), data=mtcars, family="binomial")
fit <- lm(hp ~ wt, data=mtcars)
get_formula(fit)
}
\author{
\href{https://dominiquemakowski.github.io/}{Dominique Makowski}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.r
\docType{data}
\name{howard7}
\alias{howard7}
\title{Production, imports, exports, and consumption of hardwood products, by major product, 1965-1999.}
\format{A data frame with 56 observations on 31 variables:
\describe{
\item{AllProduction.Prod}{All products production}
\item{AllProduct.Consump}{All products consumption}
\item{Ind.RW.Tot.Prod}{Industrial roundwood uses total production}
\item{Ind.RW.Tot.Imports}{Industrial roundwood uses total imports}
\item{Ind.RW.Tot.Exports}{Industrial roundwood uses total exports}
\item{Ind.RW.Tot.Consump}{Industrial roundwood uses total consumption}
\item{Ind.RW.Lum.Prod}{Industrial roundwood uses lumber production}
\item{Ind.RW.Lum.Imports}{Industrial roundwood uses lumber imports}
\item{Ind.RW.Lum.Exports}{Industrial roundwood uses lumber exports}
\item{Ind.RW.Lum.Consump}{Industrial roundwood uses lumber consumption}
\item{Ind.RW.PlyandVen.Prod}{Industrial roundwood uses plywood and veneer production}
\item{Ind.RW.PlyandVen.Imports}{Industrial roundwood uses plywood and veneer imports}
\item{Ind.RW.PlyandVen.Exports}{Industrial roundwood uses plywood and veneer exports}
\item{Ind.RW.PlyandVen.Consump}{Industrial roundwood uses plywood and veneer consumption}
\item{Ind.RW.Pulp.Prod}{Industrial roundwood uses pulp-based products production}
\item{Ind.RW.Pulp.Imports}{Industrial roundwood uses pulp-based products imports}
\item{Ind.RW.Pulp.Exports}{Industrial roundwood uses pulp-based products exports}
\item{Ind.RW.Pulp.Consump}{Industrial roundwood uses pulp-based products consumption}
\item{Ind.RW.OtherIndustrial.ProdAndConsump}{Other industrial product production and consumption}
\item{Ind.RW.Logs.Imports}{Industrial roundwood uses log imports}
\item{Ind.RW.Logs.Exports}{Industrial roundwood uses log exports}
\item{Ind.RW.Pulpchip.Imports}{Industrial roundwood uses pulpwood chip imports}
\item{Ind.RW.Pulpchip.Exports}{Industrial roundwood uses pulpwood chip exports}
\item{FuelWood.ProdAndConsumption}{Fuelwood production and consumption}
\item{UnNamed1}{}
\item{UnNamed2}{}
\item{UnNamed3}{}
\item{UnNamed4}{}
\item{UnNamed5}{}
\item{UnNamed6}{}
\item{UnNamed7}{}
}}
\source{
USFS Estimations and Bureau of Census Data
}
\usage{
howard7
}
\description{
All units are in million cubic feet except when specified otherwise.
}
\keyword{datasets}
|
/man/howard7.Rd
|
no_license
|
alanarnholt/WOODCARB3R
|
R
| false
| true
| 2,405
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.r
\docType{data}
\name{howard7}
\alias{howard7}
\title{Production, imports, exports, and consumption of hardwood products, by major product, 1965-1999.}
\format{A data frame with 56 observations on 31 variables:
\describe{
\item{AllProduction.Prod}{All products production}
\item{AllProduct.Consump}{All products consumption}
\item{Ind.RW.Tot.Prod}{Industrial roundwood uses total production}
\item{Ind.RW.Tot.Imports}{Industrial roundwood uses total imports}
\item{Ind.RW.Tot.Exports}{Industrial roundwood uses total exports}
\item{Ind.RW.Tot.Consump}{Industrial roundwood uses total consumption}
\item{Ind.RW.Lum.Prod}{Industrial roundwood uses lumber production}
\item{Ind.RW.Lum.Imports}{Industrial roundwood uses lumber imports}
\item{Ind.RW.Lum.Exports}{Industrial roundwood uses lumber exports}
\item{Ind.RW.Lum.Consump}{Industrial roundwood uses lumber consumption}
\item{Ind.RW.PlyandVen.Prod}{Industrial roundwood uses plywood and veneer production}
\item{Ind.RW.PlyandVen.Imports}{Industrial roundwood uses plywood and veneer imports}
\item{Ind.RW.PlyandVen.Exports}{Industrial roundwood uses plywood and veneer exports}
\item{Ind.RW.PlyandVen.Consump}{Industrial roundwood uses plywood and veneer consumption}
\item{Ind.RW.Pulp.Prod}{Industrial roundwood uses pulp-based products production}
\item{Ind.RW.Pulp.Imports}{Industrial roundwood uses pulp-based products imports}
\item{Ind.RW.Pulp.Exports}{Industrial roundwood uses pulp-based products exports}
\item{Ind.RW.Pulp.Consump}{Industrial roundwood uses pulp-based products consumption}
\item{Ind.RW.OtherIndustrial.ProdAndConsump}{Other industrial product production and consumption}
\item{Ind.RW.Logs.Imports}{Industrial roundwood uses log imports}
\item{Ind.RW.Logs.Exports}{Industrial roundwood uses log exports}
\item{Ind.RW.Pulpchip.Imports}{Industrial roundwood uses pulpwood chip imports}
\item{Ind.RW.Pulpchip.Exports}{Industrial roundwood uses pulpwood chip exports}
\item{FuelWood.ProdAndConsumption}{Fuelwood production and consumption}
\item{UnNamed1}{}
\item{UnNamed2}{}
\item{UnNamed3}{}
\item{UnNamed4}{}
\item{UnNamed5}{}
\item{UnNamed6}{}
\item{UnNamed7}{}
}}
\source{
USFS Estimations and Bureau of Census Data
}
\usage{
howard7
}
\description{
All units are in million cubic feet except when specified otherwise.
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stata.R
\name{scale_linetype_stata}
\alias{scale_linetype_stata}
\title{Stata linetype palette (discrete)}
\usage{
scale_linetype_stata(...)
}
\arguments{
\item{...}{Arguments passed on to \code{discrete_scale}
\describe{
\item{breaks}{One of:
\itemize{
\item \code{NULL} for no breaks
\item \code{waiver()} for the default breaks computed by the
transformation object
\item A character vector of breaks
\item A function that takes the limits as input and returns breaks
as output
}}
\item{limits}{A character vector that defines possible values of the scale
and their order.}
\item{drop}{Should unused factor levels be omitted from the scale?
The default, \code{TRUE}, uses the levels that appear in the data;
\code{FALSE} uses all the levels in the factor.}
\item{na.translate}{Unlike continuous scales, discrete scales can easily show
missing values, and do so by default. If you want to remove missing values
from a discrete scale, specify \code{na.translate = FALSE}.}
\item{na.value}{If \code{na.translate = TRUE}, what value aesthetic
value should missing be displayed as? Does not apply to position scales
where \code{NA} is always placed at the far right.}
\item{aesthetics}{The names of the aesthetics that this scale works with}
\item{scale_name}{The name of the scale}
\item{palette}{A palette function that when called with a single integer
argument (the number of levels in the scale) returns the values that
they should take}
\item{name}{The name of the scale. Used as axis or legend title. If
\code{waiver()}, the default, the name of the scale is taken from the first
mapping used for that aesthetic. If \code{NULL}, the legend title will be
omitted.}
\item{labels}{One of:
\itemize{
\item \code{NULL} for no labels
\item \code{waiver()} for the default labels computed by the
transformation object
\item A character vector giving labels (must be same length as \code{breaks})
\item A function that takes the breaks as input and returns labels
as output
}}
\item{guide}{A function used to create a guide or its name. See
\code{\link[=guides]{guides()}} for more info.}
\item{super}{The super class to use for the constructed scale}
}}
}
\description{
See \code{\link{stata_linetype_pal}} for details.
}
\examples{
require("dplyr")
require("tidyr")
require("ggplot2")
rescale01 <- function(x) {
(x - min(x)) / diff(range(x))
}
gather(economics, variable, value, -date) \%>\%
group_by(variable) \%>\%
mutate(value = rescale01(value)) \%>\%
ggplot(aes(x = date, y = value, linetype = variable)) +
geom_line() +
scale_linetype_stata()
}
\seealso{
Other linetype stata: \code{\link{stata_linetype_pal}}
}
|
/man/scale_linetype_stata.Rd
|
no_license
|
Mababyak/ggthemes
|
R
| false
| true
| 2,733
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stata.R
\name{scale_linetype_stata}
\alias{scale_linetype_stata}
\title{Stata linetype palette (discrete)}
\usage{
scale_linetype_stata(...)
}
\arguments{
\item{...}{Arguments passed on to \code{discrete_scale}
\describe{
\item{breaks}{One of:
\itemize{
\item \code{NULL} for no breaks
\item \code{waiver()} for the default breaks computed by the
transformation object
\item A character vector of breaks
\item A function that takes the limits as input and returns breaks
as output
}}
\item{limits}{A character vector that defines possible values of the scale
and their order.}
\item{drop}{Should unused factor levels be omitted from the scale?
The default, \code{TRUE}, uses the levels that appear in the data;
\code{FALSE} uses all the levels in the factor.}
\item{na.translate}{Unlike continuous scales, discrete scales can easily show
missing values, and do so by default. If you want to remove missing values
from a discrete scale, specify \code{na.translate = FALSE}.}
\item{na.value}{If \code{na.translate = TRUE}, what value aesthetic
value should missing be displayed as? Does not apply to position scales
where \code{NA} is always placed at the far right.}
\item{aesthetics}{The names of the aesthetics that this scale works with}
\item{scale_name}{The name of the scale}
\item{palette}{A palette function that when called with a single integer
argument (the number of levels in the scale) returns the values that
they should take}
\item{name}{The name of the scale. Used as axis or legend title. If
\code{waiver()}, the default, the name of the scale is taken from the first
mapping used for that aesthetic. If \code{NULL}, the legend title will be
omitted.}
\item{labels}{One of:
\itemize{
\item \code{NULL} for no labels
\item \code{waiver()} for the default labels computed by the
transformation object
\item A character vector giving labels (must be same length as \code{breaks})
\item A function that takes the breaks as input and returns labels
as output
}}
\item{guide}{A function used to create a guide or its name. See
\code{\link[=guides]{guides()}} for more info.}
\item{super}{The super class to use for the constructed scale}
}}
}
\description{
See \code{\link{stata_linetype_pal}} for details.
}
\examples{
require("dplyr")
require("tidyr")
require("ggplot2")
rescale01 <- function(x) {
(x - min(x)) / diff(range(x))
}
gather(economics, variable, value, -date) \%>\%
group_by(variable) \%>\%
mutate(value = rescale01(value)) \%>\%
ggplot(aes(x = date, y = value, linetype = variable)) +
geom_line() +
scale_linetype_stata()
}
\seealso{
Other linetype stata: \code{\link{stata_linetype_pal}}
}
|
this.dir <- dirname(parent.frame(2)$ofile)
setwd(this.dir)
#setwd('/Users/gzchen/Documents/GitHub/thesis')
source('MyFuns.R')
##########################
######generate tree#######
set.seed(1012)
p <- 50
phy <- rtree(p)
phy$tip.label <- 1:p
######Define clusters & plot tree########
#define the associated clusters by the highest internal node
CLS <- c(56,75)
asso_tips <- plot_tree(phy, cls = CLS)
############################################
################generate data###############
ln_par <- ln_param(phy)
# generate parameters used for generating OTUs
gma <- gen_gma(ln_par = ln_par, p1 = 0.02, p2 = 0.05, tree = phy, cls = CLS)
# generate coefficients with given effect size
beta_true <- true_beta(phy, CLS, gma)
# recover full vector
#######generate penalty matrix & Data prep#######
# generate penalty matrix
DW <- gen_D(phy, m = 2, weight = 'max', type = 'wang1')
# data prep. for pen. reg.
ref <- 1
################check selective type I error#####
NN <- 300
correct <- 0
# P_val.norm <- matrix(nrow = NN, ncol = ncol(X_new1))
P_val.chi1 <- NULL
P_val.chi2 <- NULL
P_val.norm.zero1 <- NULL
P_val.norm.zero2 <- NULL
P_val.norm.nonzero1 <- NULL
P_val.norm.nonzero2 <- NULL
for (i in 1:NN) {
Data <- gen_dat(n = 500, ln_par = ln_par, gma = gma, tree = phy, cls = CLS, sig = 1)
model.data <- data_prep(Data, DW, ref, alp = 0.26, normlz = F)
G_cen <- model.data$G_cen
y_cen <- model.data$y_cen
X_cen1 <- model.data$X_cen1
D1 <- model.data$D1
# model
res <- gen_select(y_cen, X_cen1, D1, btol = 1e-6)
# model result and assessment
beta_esti <- esti_beta(res$beta[,res$stop.index], ref)
plot_beta_bic(beta_true, beta_esti, res$bic)
fuse_ass <- assess_fuse(phy, beta_esti, beta_true)
sparse_ass <- assess_sparse(beta_esti, beta_true)
if (!fuse_ass$nFPR & !sparse_ass$FPR) {
rdig <- 6
approx_beta <- round(beta_esti, rdig)
# different values of beta exist
level_beta <- unique(approx_beta)
# how many OTUs correspond to the values in level_beta
# num_ele_level <- sapply(level_beta, FUN = function(x) sum(approx_beta == x))
ref_new <- which(level_beta == approx_beta[ref])
# form new covariates
X_new <- sapply(level_beta, FUN = function(x) rowSums(model.data$X_cen[,approx_beta == x,drop = F]))
X_new1 <- cbind(G_cen, X_new[,-ref_new])
# assumed that ref_new == 1
# H_0 is false
beta_nonzero <- beta_true != 0
beta_nonzero_level <- unique(approx_beta[beta_nonzero])
index_nonzero <- sapply(beta_nonzero_level, function(x) which(level_beta == x))
# H_0 is true
beta_zero <- !beta_nonzero
beta_zero_level <- unique(approx_beta[beta_zero])
index_zero <- setdiff(sapply(beta_zero_level, function(x) which(level_beta == x)), ref_new)
for (j in 2:ncol(X_new1)){
eta <- ginv(X_new1)[j,]
if (j %in% index_zero) {
P_val.norm.zero1 <- c(P_val.norm.zero1, test_norm(y_cen, res$gama, res$dd, eta, res$sig2))
P_val.norm.zero2 <- c(P_val.norm.zero2, test_norm2(y_cen, eta, res$sig2))
} else {
P_val.norm.nonzero1 <- c(P_val.norm.nonzero1, test_norm(y_cen, res$gama, res$dd, eta, res$sig2))
P_val.norm.nonzero2 <- c(P_val.norm.nonzero2, test_norm2(y_cen, eta, res$sig2))
# if (length(P_val.norm.nonzero1) != length(P_val.norm.nonzero2)) break
}
}
# interaction terms
Intact <- G_cen * X_new[,-ref_new]
Intact_cen <- t(t(Intact) - colMeans(Intact))
P <- proj.mat(Intact_cen) %*% (diag(length(y_cen)) - proj.mat(X_new1))
P_val.chi1 <- c(P_val.chi1, test_chi(y_cen, res$gama, res$dd, P, res$sig2))
P_val.chi2 <- c(P_val.chi2, test_chi2(y_cen, P, res$sig2))
}
print(paste0('i=',i,'done'))
if (i == 10) break
}
# save.image('type1err.RData')
###########################################
# # testing for single covariate
# etas <- ginv(X_new1)
# for (j in 2:ncol(X_new1)){
# eta <- etas[j,]
# if (beta_true[new_res$fused_OTU[[j]][1]] == 0) {
# P_val.norm.zero1 <- c(P_val.norm.zero1, test_norm(y_cen, res$gama, res$dd, eta, res$sig2, btol = 0))
# P_val.norm.zero2 <- c(P_val.norm.zero2, test_norm2(y_cen, eta, res$sig2))
# } else {
# P_val.norm.nonzero1 <- c(P_val.norm.nonzero1, test_norm(y_cen, res$gama, res$dd, eta, res$sig2, btol = 0))
# P_val.norm.nonzero2 <- c(P_val.norm.nonzero2, test_norm2(y_cen, eta, res$sig2))
# }
# }
#
# # testing for interaction terms
# Intact <- G_cen * X_new
# Intact_cen <- t(t(Intact) - colMeans(Intact))
# P <- proj.mat(Intact_cen) %*% (diag(length(y_cen)) - proj.mat(X_new1))
#
# P_val.chi1 <- c(P_val.chi1, test_chi(y_cen, res$gama, res$dd, P, res$sig2, btol = 0))
# P_val.chi2 <- c(P_val.chi2, test_chi2(y_cen, P, res$sig2))
# }
#############################################
|
/type1err.R
|
no_license
|
gz-chen/thesis
|
R
| false
| false
| 4,808
|
r
|
this.dir <- dirname(parent.frame(2)$ofile)
setwd(this.dir)
#setwd('/Users/gzchen/Documents/GitHub/thesis')
source('MyFuns.R')
##########################
######generate tree#######
set.seed(1012)
p <- 50
phy <- rtree(p)
phy$tip.label <- 1:p
######Define clusters & plot tree########
#define the associated clusters by the highest internal node
CLS <- c(56,75)
asso_tips <- plot_tree(phy, cls = CLS)
############################################
################generate data###############
ln_par <- ln_param(phy)
# generate parameters used for generating OTUs
gma <- gen_gma(ln_par = ln_par, p1 = 0.02, p2 = 0.05, tree = phy, cls = CLS)
# generate coefficients with given effect size
beta_true <- true_beta(phy, CLS, gma)
# recover full vector
#######generate penalty matrix & Data prep#######
# generate penalty matrix
DW <- gen_D(phy, m = 2, weight = 'max', type = 'wang1')
# data prep. for pen. reg.
ref <- 1
################check selective type I error#####
NN <- 300
correct <- 0
# P_val.norm <- matrix(nrow = NN, ncol = ncol(X_new1))
P_val.chi1 <- NULL
P_val.chi2 <- NULL
P_val.norm.zero1 <- NULL
P_val.norm.zero2 <- NULL
P_val.norm.nonzero1 <- NULL
P_val.norm.nonzero2 <- NULL
for (i in 1:NN) {
Data <- gen_dat(n = 500, ln_par = ln_par, gma = gma, tree = phy, cls = CLS, sig = 1)
model.data <- data_prep(Data, DW, ref, alp = 0.26, normlz = F)
G_cen <- model.data$G_cen
y_cen <- model.data$y_cen
X_cen1 <- model.data$X_cen1
D1 <- model.data$D1
# model
res <- gen_select(y_cen, X_cen1, D1, btol = 1e-6)
# model result and assessment
beta_esti <- esti_beta(res$beta[,res$stop.index], ref)
plot_beta_bic(beta_true, beta_esti, res$bic)
fuse_ass <- assess_fuse(phy, beta_esti, beta_true)
sparse_ass <- assess_sparse(beta_esti, beta_true)
if (!fuse_ass$nFPR & !sparse_ass$FPR) {
rdig <- 6
approx_beta <- round(beta_esti, rdig)
# different values of beta exist
level_beta <- unique(approx_beta)
# how many OTUs correspond to the values in level_beta
# num_ele_level <- sapply(level_beta, FUN = function(x) sum(approx_beta == x))
ref_new <- which(level_beta == approx_beta[ref])
# form new covariates
X_new <- sapply(level_beta, FUN = function(x) rowSums(model.data$X_cen[,approx_beta == x,drop = F]))
X_new1 <- cbind(G_cen, X_new[,-ref_new])
# assumed that ref_new == 1
# H_0 is false
beta_nonzero <- beta_true != 0
beta_nonzero_level <- unique(approx_beta[beta_nonzero])
index_nonzero <- sapply(beta_nonzero_level, function(x) which(level_beta == x))
# H_0 is true
beta_zero <- !beta_nonzero
beta_zero_level <- unique(approx_beta[beta_zero])
index_zero <- setdiff(sapply(beta_zero_level, function(x) which(level_beta == x)), ref_new)
for (j in 2:ncol(X_new1)){
eta <- ginv(X_new1)[j,]
if (j %in% index_zero) {
P_val.norm.zero1 <- c(P_val.norm.zero1, test_norm(y_cen, res$gama, res$dd, eta, res$sig2))
P_val.norm.zero2 <- c(P_val.norm.zero2, test_norm2(y_cen, eta, res$sig2))
} else {
P_val.norm.nonzero1 <- c(P_val.norm.nonzero1, test_norm(y_cen, res$gama, res$dd, eta, res$sig2))
P_val.norm.nonzero2 <- c(P_val.norm.nonzero2, test_norm2(y_cen, eta, res$sig2))
# if (length(P_val.norm.nonzero1) != length(P_val.norm.nonzero2)) break
}
}
# interaction terms
Intact <- G_cen * X_new[,-ref_new]
Intact_cen <- t(t(Intact) - colMeans(Intact))
P <- proj.mat(Intact_cen) %*% (diag(length(y_cen)) - proj.mat(X_new1))
P_val.chi1 <- c(P_val.chi1, test_chi(y_cen, res$gama, res$dd, P, res$sig2))
P_val.chi2 <- c(P_val.chi2, test_chi2(y_cen, P, res$sig2))
}
print(paste0('i=',i,'done'))
if (i == 10) break
}
# save.image('type1err.RData')
###########################################
# # testing for single covariate
# etas <- ginv(X_new1)
# for (j in 2:ncol(X_new1)){
# eta <- etas[j,]
# if (beta_true[new_res$fused_OTU[[j]][1]] == 0) {
# P_val.norm.zero1 <- c(P_val.norm.zero1, test_norm(y_cen, res$gama, res$dd, eta, res$sig2, btol = 0))
# P_val.norm.zero2 <- c(P_val.norm.zero2, test_norm2(y_cen, eta, res$sig2))
# } else {
# P_val.norm.nonzero1 <- c(P_val.norm.nonzero1, test_norm(y_cen, res$gama, res$dd, eta, res$sig2, btol = 0))
# P_val.norm.nonzero2 <- c(P_val.norm.nonzero2, test_norm2(y_cen, eta, res$sig2))
# }
# }
#
# # testing for interaction terms
# Intact <- G_cen * X_new
# Intact_cen <- t(t(Intact) - colMeans(Intact))
# P <- proj.mat(Intact_cen) %*% (diag(length(y_cen)) - proj.mat(X_new1))
#
# P_val.chi1 <- c(P_val.chi1, test_chi(y_cen, res$gama, res$dd, P, res$sig2, btol = 0))
# P_val.chi2 <- c(P_val.chi2, test_chi2(y_cen, P, res$sig2))
# }
#############################################
|
library(testthat)
source("tools/TestsTools.R")
test_that("Test Gramm_matrix(not_matrix)",{
expect_error(Gramm_matrix(c(NaN, NaN)), "Not appropriate input format")
expect_error(Gramm_matrix(data.frame(NaN, NaN)), "Not appropriate input format")
expect_error(Gramm_matrix(list(NaN, NaN)), "Not appropriate input format")
expect_error(Gramm_matrix(factor(NaN, NaN)), "Not appropriate input format")
expect_error(Gramm_matrix("String"), "Not appropriate input format")
})
test_that("Test output type of Gramm_matrix", {
w1 <- wcoeff_two_components(1)
w2 <- wcoeff_two_components(2)
w4 <- wcoeff_three_components(4)
expect_is(Gramm_matrix(w1), "matrix")
expect_is(Gramm_matrix(w2), "matrix")
expect_is(Gramm_matrix(w4), "matrix")
})
test_that("Test Gramm_matrix(one_component_mixture)",{
expect_error(Gramm_matrix(rbind(0, 1)), "Not correct mixture")
})
test_that("Test Gramm_matrix(two_component_mixture)",{
w1 <- wcoeff_two_components(1)
w2 <- wcoeff_two_components(2)
w4 <- wcoeff_two_components(4)
expect_equal(Gramm_matrix(w1), rbind(c(1, 0), c(0, 0)))
expect_equal(Gramm_matrix(w2), cbind(c(0.625, 0.125), c(0.125, 0.125)))
expect_equal(Gramm_matrix(w4), cbind(c(0.46875, 0.15625), c(0.15625, 0.21875)))
})
test_that("Test Gramm_matrix(three_component_mixture)",{
w1 <- wcoeff_three_components(1)
w2 <- wcoeff_three_components(2)
w4 <- wcoeff_three_components(4)
expect_equal(Gramm_matrix(w1), rbind(c(1, 0, 0), c(0, 0, 0), c(0, 0, 0)))
expect_equal(Gramm_matrix(w2), cbind(c(0.625, 0.0625, 0.0625), c(0.0625, 0.03125, 0.03125), c(0.0625, 0.03125, 0.03125)))
expect_equal(Gramm_matrix(w4), cbind(c(0.46875, 0.078125, 0.078125), c(0.078125, 0.0546875, 0.0546875), c(0.078125, 0.0546875, 0.0546875)))
})
test_that("Test minor(not_matrix)",{
expect_error(minor(c(NaN, NaN), 1, 1), "Not appropriate input format")
expect_error(minor(data.frame(NaN, NaN), 1, 1), "Not appropriate input format")
expect_error(minor(list(NaN, NaN), 1, 1), "Not appropriate input format")
expect_error(minor(factor(NaN, NaN), 1, 1), "Not appropriate input format")
expect_error(minor("String", 1, 1), "Not appropriate input format")
})
test_that("Test minor(matrix, not_correct_i_j)",{
M <- matrix(NaN, nrow = 2, ncol = 2)
expect_error(minor(M, 0, 0), "Not correct i,j")
expect_error(minor(M, 1, 0), "Not correct i,j")
expect_error(minor(M, 0, 1), "Not correct i,j")
expect_error(minor(M, -1, 0), "Not correct i,j")
expect_error(minor(M, 0, -1), "Not correct i,j")
expect_error(minor(M, -1, -1), "Not correct i,j")
expect_error(minor(M, -1, 1), "Not correct i,j")
expect_error(minor(M, 1, -1), "Not correct i,j")
})
test_that("Test minor(from_different_parameters)",{
M1 <- matrix(1:4, ncol = 2, nrow = 2)
M2 <- matrix(1:9, ncol = 3, nrow = 3)
expect_equal(minor(M1, 1, 1), 4)
expect_equal(minor(M2, 2, 3), -6)
})
test_that("Test output type of minor",{
M1 <- matrix(1:4, ncol = 2, nrow = 2)
M2 <- matrix(1:9, ncol = 3, nrow = 3)
expect_true(is.numeric(minor(M1, 1, 1)))
expect_true(is.numeric(minor(M2, 2, 3)))
expect_true(is.vector(minor(M1, 1, 1)))
expect_true(is.vector(minor(M2, 2, 3)))
})
test_that("Test all_matrix_minors(not_matrix)",{
expect_error(all_matrix_minors(c(NaN, NaN)), "Not appropriate input format")
expect_error(all_matrix_minors(data.frame(NaN, NaN)), "Not appropriate input format")
expect_error(all_matrix_minors(list(NaN, NaN)), "Not appropriate input format")
expect_error(all_matrix_minors(factor(NaN, NaN)), "Not appropriate input format")
expect_error(all_matrix_minors("String"), "Not appropriate input format")
})
test_that("Test all_matrix_minors(not_correct_matrix)",{
M1 <- matrix(NA, ncol = 1, nrow = 1)
expect_error(all_matrix_minors(M1), "Not correct dimension of input matrix")
})
test_that("Test all_matrix_minors(different_matrices)",{
M1 <- matrix(1:4, ncol = 2, nrow = 2)
M2 <- matrix(1:9, ncol = 3, nrow = 3)
expect_equal(all_matrix_minors(M1), cbind(c(4, 3), c(2, 1)))
expect_equal(all_matrix_minors(M2), cbind(c(-3, -6, -3), c(-6, -12, -6), c(-3, -6, -3)))
})
test_that("Test output type of all_matrix_minors",{
M1 <- matrix(1:4, ncol = 2, nrow = 2)
M2 <- matrix(1:9, ncol = 3, nrow = 3)
expect_true(is.matrix(all_matrix_minors(M1)))
expect_true(is.matrix(all_matrix_minors(M2)))
})
test_that("Test minus_one(not_correct_value)",{
expect_error(minus_one(1), "Not correct number of components")
})
test_that("Test minus_one(different_values)",{
expect_equal(minus_one(2), cbind(c(1, -1), c(-1, 1)))
expect_equal(minus_one(3), cbind(c(1, -1, 1), c(-1, 1, -1), c(1, -1, 1)))
})
test_that("Test output type of minus_one",{
expect_true(is.matrix(minus_one(2)))
expect_true(is.matrix(minus_one(3)))
})
test_that("Test acoeff(not_correct_value)",{
w <- cbind(c(1, 1), c(0, 0))
expect_error(acoeff(w), "Devision by zero")
})
test_that("Test acoeff(different_values)",{
w1 <- wcoeff_two_components(2)
w2 <- wcoeff_two_components(4)
w3 <- rbind(c(0.8, 0.1, 0.1),
c(0.05, 0.90, 0.05),
c(0.2, 0.1, 0.7))
expect_equal(acoeff(w1), cbind(c(0, 2), c(4, -2)))
expect_equal(acoeff(w2), cbind(c(-0.8, 0.4, 1.6, 2.8), c(4, 2, 0, -2)))
expect_equal(acoeff(w3), cbind(c(3.90625, -0.37500, -0.53125),
c(-0.15625, 3.37500, -0.21875),
c(-1.09375, -0.37500, 4.46875)))
})
test_that("Test output type of acoeff",{
w1 <- wcoeff_two_components(2)
w2 <- wcoeff_two_components(4)
w3 <- rbind(c(0.8, 0.1, 0.1),
c(0.05, 0.90, 0.05),
c(0.2, 0.1, 0.7))
expect_true(is.matrix(acoeff(w1)))
expect_true(is.matrix(acoeff(w2)))
expect_true(is.matrix(acoeff(w3)))
})
|
/tests/test_MVCweights.R
|
no_license
|
h-dychko/zno_analysis
|
R
| false
| false
| 5,778
|
r
|
library(testthat)
source("tools/TestsTools.R")
test_that("Test Gramm_matrix(not_matrix)",{
expect_error(Gramm_matrix(c(NaN, NaN)), "Not appropriate input format")
expect_error(Gramm_matrix(data.frame(NaN, NaN)), "Not appropriate input format")
expect_error(Gramm_matrix(list(NaN, NaN)), "Not appropriate input format")
expect_error(Gramm_matrix(factor(NaN, NaN)), "Not appropriate input format")
expect_error(Gramm_matrix("String"), "Not appropriate input format")
})
test_that("Test output type of Gramm_matrix", {
w1 <- wcoeff_two_components(1)
w2 <- wcoeff_two_components(2)
w4 <- wcoeff_three_components(4)
expect_is(Gramm_matrix(w1), "matrix")
expect_is(Gramm_matrix(w2), "matrix")
expect_is(Gramm_matrix(w4), "matrix")
})
test_that("Test Gramm_matrix(one_component_mixture)",{
expect_error(Gramm_matrix(rbind(0, 1)), "Not correct mixture")
})
test_that("Test Gramm_matrix(two_component_mixture)",{
w1 <- wcoeff_two_components(1)
w2 <- wcoeff_two_components(2)
w4 <- wcoeff_two_components(4)
expect_equal(Gramm_matrix(w1), rbind(c(1, 0), c(0, 0)))
expect_equal(Gramm_matrix(w2), cbind(c(0.625, 0.125), c(0.125, 0.125)))
expect_equal(Gramm_matrix(w4), cbind(c(0.46875, 0.15625), c(0.15625, 0.21875)))
})
test_that("Test Gramm_matrix(three_component_mixture)",{
w1 <- wcoeff_three_components(1)
w2 <- wcoeff_three_components(2)
w4 <- wcoeff_three_components(4)
expect_equal(Gramm_matrix(w1), rbind(c(1, 0, 0), c(0, 0, 0), c(0, 0, 0)))
expect_equal(Gramm_matrix(w2), cbind(c(0.625, 0.0625, 0.0625), c(0.0625, 0.03125, 0.03125), c(0.0625, 0.03125, 0.03125)))
expect_equal(Gramm_matrix(w4), cbind(c(0.46875, 0.078125, 0.078125), c(0.078125, 0.0546875, 0.0546875), c(0.078125, 0.0546875, 0.0546875)))
})
test_that("Test minor(not_matrix)",{
expect_error(minor(c(NaN, NaN), 1, 1), "Not appropriate input format")
expect_error(minor(data.frame(NaN, NaN), 1, 1), "Not appropriate input format")
expect_error(minor(list(NaN, NaN), 1, 1), "Not appropriate input format")
expect_error(minor(factor(NaN, NaN), 1, 1), "Not appropriate input format")
expect_error(minor("String", 1, 1), "Not appropriate input format")
})
test_that("Test minor(matrix, not_correct_i_j)",{
M <- matrix(NaN, nrow = 2, ncol = 2)
expect_error(minor(M, 0, 0), "Not correct i,j")
expect_error(minor(M, 1, 0), "Not correct i,j")
expect_error(minor(M, 0, 1), "Not correct i,j")
expect_error(minor(M, -1, 0), "Not correct i,j")
expect_error(minor(M, 0, -1), "Not correct i,j")
expect_error(minor(M, -1, -1), "Not correct i,j")
expect_error(minor(M, -1, 1), "Not correct i,j")
expect_error(minor(M, 1, -1), "Not correct i,j")
})
test_that("Test minor(from_different_parameters)",{
M1 <- matrix(1:4, ncol = 2, nrow = 2)
M2 <- matrix(1:9, ncol = 3, nrow = 3)
expect_equal(minor(M1, 1, 1), 4)
expect_equal(minor(M2, 2, 3), -6)
})
test_that("Test output type of minor",{
M1 <- matrix(1:4, ncol = 2, nrow = 2)
M2 <- matrix(1:9, ncol = 3, nrow = 3)
expect_true(is.numeric(minor(M1, 1, 1)))
expect_true(is.numeric(minor(M2, 2, 3)))
expect_true(is.vector(minor(M1, 1, 1)))
expect_true(is.vector(minor(M2, 2, 3)))
})
test_that("Test all_matrix_minors(not_matrix)",{
expect_error(all_matrix_minors(c(NaN, NaN)), "Not appropriate input format")
expect_error(all_matrix_minors(data.frame(NaN, NaN)), "Not appropriate input format")
expect_error(all_matrix_minors(list(NaN, NaN)), "Not appropriate input format")
expect_error(all_matrix_minors(factor(NaN, NaN)), "Not appropriate input format")
expect_error(all_matrix_minors("String"), "Not appropriate input format")
})
test_that("Test all_matrix_minors(not_correct_matrix)",{
M1 <- matrix(NA, ncol = 1, nrow = 1)
expect_error(all_matrix_minors(M1), "Not correct dimension of input matrix")
})
test_that("Test all_matrix_minors(different_matrices)",{
M1 <- matrix(1:4, ncol = 2, nrow = 2)
M2 <- matrix(1:9, ncol = 3, nrow = 3)
expect_equal(all_matrix_minors(M1), cbind(c(4, 3), c(2, 1)))
expect_equal(all_matrix_minors(M2), cbind(c(-3, -6, -3), c(-6, -12, -6), c(-3, -6, -3)))
})
test_that("Test output type of all_matrix_minors",{
M1 <- matrix(1:4, ncol = 2, nrow = 2)
M2 <- matrix(1:9, ncol = 3, nrow = 3)
expect_true(is.matrix(all_matrix_minors(M1)))
expect_true(is.matrix(all_matrix_minors(M2)))
})
test_that("Test minus_one(not_correct_value)",{
expect_error(minus_one(1), "Not correct number of components")
})
test_that("Test minus_one(different_values)",{
expect_equal(minus_one(2), cbind(c(1, -1), c(-1, 1)))
expect_equal(minus_one(3), cbind(c(1, -1, 1), c(-1, 1, -1), c(1, -1, 1)))
})
test_that("Test output type of minus_one",{
expect_true(is.matrix(minus_one(2)))
expect_true(is.matrix(minus_one(3)))
})
test_that("Test acoeff(not_correct_value)",{
w <- cbind(c(1, 1), c(0, 0))
expect_error(acoeff(w), "Devision by zero")
})
test_that("Test acoeff(different_values)",{
w1 <- wcoeff_two_components(2)
w2 <- wcoeff_two_components(4)
w3 <- rbind(c(0.8, 0.1, 0.1),
c(0.05, 0.90, 0.05),
c(0.2, 0.1, 0.7))
expect_equal(acoeff(w1), cbind(c(0, 2), c(4, -2)))
expect_equal(acoeff(w2), cbind(c(-0.8, 0.4, 1.6, 2.8), c(4, 2, 0, -2)))
expect_equal(acoeff(w3), cbind(c(3.90625, -0.37500, -0.53125),
c(-0.15625, 3.37500, -0.21875),
c(-1.09375, -0.37500, 4.46875)))
})
test_that("Test output type of acoeff",{
w1 <- wcoeff_two_components(2)
w2 <- wcoeff_two_components(4)
w3 <- rbind(c(0.8, 0.1, 0.1),
c(0.05, 0.90, 0.05),
c(0.2, 0.1, 0.7))
expect_true(is.matrix(acoeff(w1)))
expect_true(is.matrix(acoeff(w2)))
expect_true(is.matrix(acoeff(w3)))
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/add_missing.R
\name{add_missing}
\alias{add_missing}
\title{Add missing values to a vector given a MCAR, MAR, or MNAR scheme}
\usage{
add_missing(y, fun = function(y, rate = 0.1, ...) rep(rate, length(y)), ...)
}
\arguments{
\item{y}{an input vector that should contain missing data in the form of \code{NA}'s}
\item{fun}{a user defined function indicating the missing data mechanism for each element in \code{y}.
Function must return a vector of probability values with the length equal to the length of \code{y}.
Each value in the returned vector indicates the probability that
the respective element in y will be replaced with \code{NA}.
Function must contain the argument \code{y}, representing the
input vector, however any number of additional arguments can be included}
\item{...}{additional arguments to be passed to \code{FUN}}
}
\value{
the input vector \code{y} with the sampled \code{NA} values
(according to the \code{FUN} scheme)
}
\description{
Given an input vector, replace elements of this vector with missing values according to some scheme.
Default method replaces input values with a MCAR scheme (where on average 10\% of the values will be
replaced with \code{NA}s). MAR and MNAR are supported by replacing the default \code{FUN} argument.
}
\details{
Given an input vector y, and other relevant variables
inside (X) and outside (Z) the data-set, the three types of missingness are:
\describe{
\item{MCAR}{Missing completely at random (MCAR). This is realized by randomly sampling the values of the
input vector (y) irrespective of the possible values in X and Z.
Therefore missing values are randomly sampled and do not depend on any data characteristics and
are truly random}
\item{MAR}{Missing at random (MAR). This is realized when values in the dataset (X)
predict the missing data mechanism in y; conceptually this is equivalent to
\eqn{P(y = NA | X)}. This requires the user to define a custom missing data function}
\item{MNAR}{Missing not at random (MNAR). This is similar to MAR except
that the missing mechanism comes
from the value of y itself or from variables outside the working dataset;
conceptually this is equivalent to \eqn{P(y = NA | X, Z, y)}. This requires
the user to define a custom missing data function}
}
}
\examples{
set.seed(1)
y <- rnorm(1000)
## 10\% missing rate with default FUN
head(ymiss <- add_missing(y), 10)
## 50\% missing with default FUN
head(ymiss <- add_missing(y, rate = .5), 10)
## missing values only when female and low
X <- data.frame(group = sample(c('male', 'female'), 1000, replace=TRUE),
level = sample(c('high', 'low'), 1000, replace=TRUE))
head(X)
fun <- function(y, X, ...){
p <- rep(0, length(y))
p[X$group == 'female' & X$level == 'low'] <- .2
p
}
ymiss <- add_missing(y, X, fun=fun)
tail(cbind(ymiss, X), 10)
## missingness as a function of elements in X (i.e., a type of MAR)
fun <- function(y, X){
# missingness with a logistic regression approach
df <- data.frame(y, X)
mm <- model.matrix(y ~ group + level, df)
cfs <- c(-5, 2, 3) #intercept, group, and level coefs
z <- cfs \%*\% t(mm)
plogis(z)
}
ymiss <- add_missing(y, X, fun=fun)
tail(cbind(ymiss, X), 10)
## missing values when y elements are large (i.e., a type of MNAR)
fun <- function(y) ifelse(abs(y) > 1, .4, 0)
ymiss <- add_missing(y, fun=fun)
tail(cbind(y, ymiss), 10)
}
|
/man/add_missing.Rd
|
no_license
|
mattsigal/SimDesign
|
R
| false
| true
| 3,499
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/add_missing.R
\name{add_missing}
\alias{add_missing}
\title{Add missing values to a vector given a MCAR, MAR, or MNAR scheme}
\usage{
add_missing(y, fun = function(y, rate = 0.1, ...) rep(rate, length(y)), ...)
}
\arguments{
\item{y}{an input vector that should contain missing data in the form of \code{NA}'s}
\item{fun}{a user defined function indicating the missing data mechanism for each element in \code{y}.
Function must return a vector of probability values with the length equal to the length of \code{y}.
Each value in the returned vector indicates the probability that
the respective element in y will be replaced with \code{NA}.
Function must contain the argument \code{y}, representing the
input vector, however any number of additional arguments can be included}
\item{...}{additional arguments to be passed to \code{FUN}}
}
\value{
the input vector \code{y} with the sampled \code{NA} values
(according to the \code{FUN} scheme)
}
\description{
Given an input vector, replace elements of this vector with missing values according to some scheme.
Default method replaces input values with a MCAR scheme (where on average 10\% of the values will be
replaced with \code{NA}s). MAR and MNAR are supported by replacing the default \code{FUN} argument.
}
\details{
Given an input vector y, and other relevant variables
inside (X) and outside (Z) the data-set, the three types of missingness are:
\describe{
\item{MCAR}{Missing completely at random (MCAR). This is realized by randomly sampling the values of the
input vector (y) irrespective of the possible values in X and Z.
Therefore missing values are randomly sampled and do not depend on any data characteristics and
are truly random}
\item{MAR}{Missing at random (MAR). This is realized when values in the dataset (X)
predict the missing data mechanism in y; conceptually this is equivalent to
\eqn{P(y = NA | X)}. This requires the user to define a custom missing data function}
\item{MNAR}{Missing not at random (MNAR). This is similar to MAR except
that the missing mechanism comes
from the value of y itself or from variables outside the working dataset;
conceptually this is equivalent to \eqn{P(y = NA | X, Z, y)}. This requires
the user to define a custom missing data function}
}
}
\examples{
set.seed(1)
y <- rnorm(1000)
## 10\% missing rate with default FUN
head(ymiss <- add_missing(y), 10)
## 50\% missing with default FUN
head(ymiss <- add_missing(y, rate = .5), 10)
## missing values only when female and low
X <- data.frame(group = sample(c('male', 'female'), 1000, replace=TRUE),
level = sample(c('high', 'low'), 1000, replace=TRUE))
head(X)
fun <- function(y, X, ...){
p <- rep(0, length(y))
p[X$group == 'female' & X$level == 'low'] <- .2
p
}
ymiss <- add_missing(y, X, fun=fun)
tail(cbind(ymiss, X), 10)
## missingness as a function of elements in X (i.e., a type of MAR)
fun <- function(y, X){
# missingness with a logistic regression approach
df <- data.frame(y, X)
mm <- model.matrix(y ~ group + level, df)
cfs <- c(-5, 2, 3) #intercept, group, and level coefs
z <- cfs \%*\% t(mm)
plogis(z)
}
ymiss <- add_missing(y, X, fun=fun)
tail(cbind(ymiss, X), 10)
## missing values when y elements are large (i.e., a type of MNAR)
fun <- function(y) ifelse(abs(y) > 1, .4, 0)
ymiss <- add_missing(y, fun=fun)
tail(cbind(y, ymiss), 10)
}
|
mdx_format = function(variant = "markdown_strict",
preserve_yaml = TRUE,
dev = 'png',
df_print = "tibble",
fig_width = 7,
fig_height = 5){
args <- ""
# add post_processor for yaml preservation
post_processor <- if (preserve_yaml) {
function(metadata, input_file, output_file, clean, verbose) {
input_lines <- read_utf8(input_file)
partitioned <- partition_yaml_front_matter(input_lines)
if (!is.null(partitioned$front_matter)) {
output_lines <- c(partitioned$front_matter, "", read_utf8(output_file))
write_utf8(output_lines, output_file)
}
output_file
}
}
# return format
rmarkdown:::output_format(
knitr = rmarkdown:::knitr_options_html(fig_width, fig_height, fig_retina = NULL, FALSE, dev),
pandoc = rmarkdown:::pandoc_options(to = variant,
from = rmarkdown::from_rmarkdown(),
args = args,
ext = '.mdx'),
keep_md = FALSE,
clean_supporting = FALSE,
df_print = df_print,
post_processor = post_processor
)
}
|
/R/mdx_format.R
|
permissive
|
jdnudel/writeMDX
|
R
| false
| false
| 1,196
|
r
|
mdx_format = function(variant = "markdown_strict",
preserve_yaml = TRUE,
dev = 'png',
df_print = "tibble",
fig_width = 7,
fig_height = 5){
args <- ""
# add post_processor for yaml preservation
post_processor <- if (preserve_yaml) {
function(metadata, input_file, output_file, clean, verbose) {
input_lines <- read_utf8(input_file)
partitioned <- partition_yaml_front_matter(input_lines)
if (!is.null(partitioned$front_matter)) {
output_lines <- c(partitioned$front_matter, "", read_utf8(output_file))
write_utf8(output_lines, output_file)
}
output_file
}
}
# return format
rmarkdown:::output_format(
knitr = rmarkdown:::knitr_options_html(fig_width, fig_height, fig_retina = NULL, FALSE, dev),
pandoc = rmarkdown:::pandoc_options(to = variant,
from = rmarkdown::from_rmarkdown(),
args = args,
ext = '.mdx'),
keep_md = FALSE,
clean_supporting = FALSE,
df_print = df_print,
post_processor = post_processor
)
}
|
#Programming assignment (Exploratory Data Analysis)
data <- read.table('household_power_consumption.txt', sep = ';', check.names = TRUE, col.names = c('Date', 'Time', 'Global_active_power', 'Global_reactive_power', 'Voltage', 'Global_intensity', 'Sub_metering_1', 'Sub_metering_2', 'Sub_metering_3'))
data = data[-1, ]
data$Date <- as.Date(data$Date, format="%d/%m/%Y")
result <- subset(data, Date == "2007/02/01" | Date == "2007/02/02")
newTime <- as.POSIXct(paste(result$Date, result$Time), "%d/%m/%Y %H:%M:%S")
result <- cbind(result, newTime)
#Changing factors to numbers
class(result$Sub_metering_1)
result$Sub_metering_1 <- as.numeric(as.character(result$Sub_metering_1))
result$Sub_metering_2 <- as.numeric(as.character(result$Sub_metering_2))
result$Sub_metering_3 <- as.numeric(as.character(result$Sub_metering_3))
#Plotting
par(mfrow=c(2,2))
##First plot
plot(result$newTime,result$Global_active_power, type="l", ylab="Global Active Power (kilowatts)", xlab = "", cex=.5)
##Secont plot
result$Voltage <- as.numeric(as.character(result$Voltage))
plot(result$newTime,result$Global_active_power, type="l", ylab="Voltage", xlab = "datetime")
##Third plot
plot(result$newTime, result$Sub_metering_1, col = "black", type = "l", ylab="Energy sub metering", xlab="")
lines(result$newTime, result$Sub_metering_2, col="red")
lines(result$newTime, result$Sub_metering_3, col="blue")
##Remove legend to fit the graph
##Fourth plot
result$Global_reactive_power <- as.numeric(as.character(result$Global_reactive_power))
plot(result$newTime, result$Global_reactive_power, col = "black", type = "l", ylab="Global_reactive_power", xlab="datetime")
##Finalizing
dev.copy(png, file="Plot4.png", width=480, height=480)
dev.off()
|
/Plot4.R
|
no_license
|
TriinK/ExData_Plotting1
|
R
| false
| false
| 1,764
|
r
|
#Programming assignment (Exploratory Data Analysis)
data <- read.table('household_power_consumption.txt', sep = ';', check.names = TRUE, col.names = c('Date', 'Time', 'Global_active_power', 'Global_reactive_power', 'Voltage', 'Global_intensity', 'Sub_metering_1', 'Sub_metering_2', 'Sub_metering_3'))
data = data[-1, ]
data$Date <- as.Date(data$Date, format="%d/%m/%Y")
result <- subset(data, Date == "2007/02/01" | Date == "2007/02/02")
newTime <- as.POSIXct(paste(result$Date, result$Time), "%d/%m/%Y %H:%M:%S")
result <- cbind(result, newTime)
#Changing factors to numbers
class(result$Sub_metering_1)
result$Sub_metering_1 <- as.numeric(as.character(result$Sub_metering_1))
result$Sub_metering_2 <- as.numeric(as.character(result$Sub_metering_2))
result$Sub_metering_3 <- as.numeric(as.character(result$Sub_metering_3))
#Plotting
par(mfrow=c(2,2))
##First plot
plot(result$newTime,result$Global_active_power, type="l", ylab="Global Active Power (kilowatts)", xlab = "", cex=.5)
##Secont plot
result$Voltage <- as.numeric(as.character(result$Voltage))
plot(result$newTime,result$Global_active_power, type="l", ylab="Voltage", xlab = "datetime")
##Third plot
plot(result$newTime, result$Sub_metering_1, col = "black", type = "l", ylab="Energy sub metering", xlab="")
lines(result$newTime, result$Sub_metering_2, col="red")
lines(result$newTime, result$Sub_metering_3, col="blue")
##Remove legend to fit the graph
##Fourth plot
result$Global_reactive_power <- as.numeric(as.character(result$Global_reactive_power))
plot(result$newTime, result$Global_reactive_power, col = "black", type = "l", ylab="Global_reactive_power", xlab="datetime")
##Finalizing
dev.copy(png, file="Plot4.png", width=480, height=480)
dev.off()
|
## Loading the original data set
blogs <- readLines("./final/en_US/en_US.blogs.txt", encoding = "UTF-8", skipNul=TRUE)
news <- readLines("./final/en_US/en_US.news.txt", encoding = "UTF-8", skipNul=TRUE)
twitter <- readLines("./final/en_US/en_US.twitter.txt", encoding = "UTF-8", skipNul=TRUE)
## Generating a random sapmle of all sources
sampleTwitter <- sample(twitter, 150000)
sampleNews <- sample(news, 150000)
sampleBlogs <- sample(blogs, 150000)
textSample <- c(sampleTwitter,sampleNews,sampleBlogs)
## Save sample
writeLines(textSample, "./bigTextSample.txt")
|
/testStuff/textSample.R
|
no_license
|
PavanYaswanth/capstone-project
|
R
| false
| false
| 587
|
r
|
## Loading the original data set
blogs <- readLines("./final/en_US/en_US.blogs.txt", encoding = "UTF-8", skipNul=TRUE)
news <- readLines("./final/en_US/en_US.news.txt", encoding = "UTF-8", skipNul=TRUE)
twitter <- readLines("./final/en_US/en_US.twitter.txt", encoding = "UTF-8", skipNul=TRUE)
## Generating a random sapmle of all sources
sampleTwitter <- sample(twitter, 150000)
sampleNews <- sample(news, 150000)
sampleBlogs <- sample(blogs, 150000)
textSample <- c(sampleTwitter,sampleNews,sampleBlogs)
## Save sample
writeLines(textSample, "./bigTextSample.txt")
|
# SELECTING BIOCLIMATIC VARIABLES FOR MODELLING ADAPTED#
# using VIF analysis
#...................................................
#...................................................
# Packages ####
library("data.table")
library("raster")
library("dismo")
library("BiodiversityR")
library("car")
#...................................................
#...................................................
# Data ####
# bioclimatic variables
bio <- list.files("data/bioclim",
pattern = ".tif$",
full.names = TRUE)
bio <- stack(bio)
names(bio)
# define projection and extension
myproj <- proj4string(bio)
myext <- extent(bio)
myres <- res(bio)
# passport data
df <- fread("data/macs.csv")
df
# .......................................
# .......................................
# Set background points ####
xy <- df[, c("lon", "lat")]
xy <- unique(xy, by = c("lon", "lat"))
set.seed(123)
bg <-randomPoints(bio[[1]],
n = 500,
ext = myext,
extf = 1.25)
plot(bio[[1]])
points(bg)
#...................................................
#...................................................
# Variable selection with VIF ####
vif <- ensemble.VIF(
x = bio,
a = xy,
an = bg,
VIF.max = 10,
keep = NULL,
factors = NULL,
dummy.vars = NULL
)
# save outputs
output <- "processing/vif/"
dir.create(output,
recursive = TRUE,
showWarnings = FALSE)
save(vif, file = paste0(output, "vif_results.rda"))
# remove files not selected by vif analysis
out <- vif$var.drops
file.remove(paste0("data/bioclim/", out, ".tif"))
# ..................................
# ..................................
# future scenarios
gcm <- list.dirs("data/gcm")[-1]
for (i in seq_along(gcm)) {
print(gcm[[i]])
file.remove(paste0(gcm[[i]], "/", out, ".tif"))
}
|
/script/02.1_vif_analysis_variable_selection.R
|
no_license
|
EJEYZiE01/Macadamia-modelling
|
R
| false
| false
| 1,959
|
r
|
# SELECTING BIOCLIMATIC VARIABLES FOR MODELLING ADAPTED#
# using VIF analysis
#...................................................
#...................................................
# Packages ####
library("data.table")
library("raster")
library("dismo")
library("BiodiversityR")
library("car")
#...................................................
#...................................................
# Data ####
# bioclimatic variables
bio <- list.files("data/bioclim",
pattern = ".tif$",
full.names = TRUE)
bio <- stack(bio)
names(bio)
# define projection and extension
myproj <- proj4string(bio)
myext <- extent(bio)
myres <- res(bio)
# passport data
df <- fread("data/macs.csv")
df
# .......................................
# .......................................
# Set background points ####
xy <- df[, c("lon", "lat")]
xy <- unique(xy, by = c("lon", "lat"))
set.seed(123)
bg <-randomPoints(bio[[1]],
n = 500,
ext = myext,
extf = 1.25)
plot(bio[[1]])
points(bg)
#...................................................
#...................................................
# Variable selection with VIF ####
vif <- ensemble.VIF(
x = bio,
a = xy,
an = bg,
VIF.max = 10,
keep = NULL,
factors = NULL,
dummy.vars = NULL
)
# save outputs
output <- "processing/vif/"
dir.create(output,
recursive = TRUE,
showWarnings = FALSE)
save(vif, file = paste0(output, "vif_results.rda"))
# remove files not selected by vif analysis
out <- vif$var.drops
file.remove(paste0("data/bioclim/", out, ".tif"))
# ..................................
# ..................................
# future scenarios
gcm <- list.dirs("data/gcm")[-1]
for (i in seq_along(gcm)) {
print(gcm[[i]])
file.remove(paste0(gcm[[i]], "/", out, ".tif"))
}
|
# SETUP ====
# load libraries ====
library(shiny)
library(shinydashboard)
library(shinyLP)
library(shinythemes)
library(plotly)
library(ggplot2)
library(data.table)
library(circlize)
library(dplyr)
library(stringr)
library(fs)
library(rmarkdown)
library(markdown)
library(data.table)
library(wesanderson)
library(shinycssloaders)
# install complexheatmap from bioconductor
library(BiocManager)
options(repos = BiocManager::repositories())
library(ComplexHeatmap)
# > call source function ====
source("my_circos_plot.R")
source(file.path("helpers", "Output_main.R"))
#------------------------------------------------------------------------------#
ui <- navbarPage(
title = "",
theme = shinytheme("flatly"),
fluid = TRUE,
selected = "EpiViz",
inverse = FALSE,
# EpiViz ====
tabPanel(
title = "EpiViz",
fluidRow(
column(12, title = "", id = "home_home", epiviz()),
column(4, title = "About", id = "home_about", home_about()),
column(4, title = "Example", id = "home_example", home_example()),
column(4, title = "", id = "home_footer", home_footer())
)),
# HOW TO ====
tabPanel(
title = "how to",
fluidRow(
column(4, title = "", id = "how_to_1", how_to_1()),
column(4, title = "About", id = "how_to_2", how_to_2()),
column(4, title = "Example", id = "how_to_3", how_to_3()),
)),
# ANALYSIS ====
tabPanel(
title = "plot",
tabsetPanel(
id = 'dataset',
## UPLOAD DATA ====
tabPanel(titlePanel(h5("Data")),
sidebarLayout(
## > sidebar panel ====
sidebarPanel(
h4("Upload data:"),
helpText("Upload tab seperated text files for each track."),
fileInput(
inputId = "file1",
label = "Track 1",
multiple = FALSE,
accept = c(
"text/csv",
"text/comma-separated-values",
"text/plain",
".csv")),
fileInput(
inputId = "file2",
label = "Track 2",
multiple = FALSE,
accept = c(
"text/csv",
"text/comma-separated-values",
"text/plain",
".csv")),
fileInput(
inputId = "file3",
label = "Track 3",
multiple = FALSE,
accept = c(
"text/csv",
"text/comma-separated-values",
"text/plain",
".csv")),
h4("Volcano plot of data:"),
helpText("Select columns to generate a volcano plot."),
selectInput("beta_column",
"Effect estimate:",
choices="",
selected = ""),
selectInput("p_column",
"P-value:",
choices="",
selected = ""),
actionButton("volcanobutton","volcano")),
## > main panel ====
mainPanel(
tabsetPanel(
tabPanel("Track 1",
conditionalPanel(
condition = "output.file_imported",
h4("Description of uploaded data"),
textOutput("rowcount"),
textOutput("colcount"),
br(),
h4("First rows of uploaded data"),
tableOutput(outputId = "data1"),
br(),
h4("Volcano plot of uploaded data"),
plotlyOutput("volcanoplot1"))),
tabPanel("Track 2",
conditionalPanel(
condition = "output.file_imported2",
h4("Description of uploaded data"),
textOutput("rowcount2"),
textOutput("colcount2"),
br(),
h4("First rows of uploaded data"),
tableOutput(outputId = "data2"),
br(),
h4("Volcano plot of uploaded data"),
plotlyOutput("volcanoplot2"))),
tabPanel("Track 3",
conditionalPanel(
condition = "output.file_imported3",
h4("Description of uploaded data"),
textOutput("rowcount3"),
textOutput("colcount3"),
br(),
h4("First rows of uploaded data"),
tableOutput(outputId = "data3"),
br(),
h4("Volcano plot of uploaded data"),
plotlyOutput("volcanoplot3")))
) # close tabsetPanel()
) # close mainPanel()
) # close sidebarLayout()
), # close tabPanel()
## PLOT PARAMETERS ====
tabPanel(titlePanel(h5("Circos")),
sidebarLayout(
## > sidebar panel ====
sidebarPanel(
h4("Circos plot paramaters"),
selectInput("track_number",
"Number of tracks",
choices = c(1,2,3),
selected = 1),
selectInput("label_column",
"Label:",
choices="",
selected = ""),
selectInput("section_column",
"Group:",
choices="",
selected = ""),
selectInput("estimate_column",
"Estimate:",
choices="",
selected = ""),
selectInput("pvalue_column",
"P-value:",
choices="",
selected = ""),
selectInput("confidence_interval_lower_column",
"Lower confidence interval:",
choices="",
selected = ""),
selectInput("confidence_interval_upper_column",
"Upper confidence interval:",
choices="",
selected = ""),
numericInput("pvalue_adjustment",
"P-value adjustment:",
value = 1,
min = 1,
max = 999999),
textOutput("pval"),
## Legend paramaters
h4("Legend paramaters"),
radioButtons(
inputId = 'legend',
label = 'Legend',
choices = c(
Yes = 'TRUE',
No = 'FALSE'),
selected = 'FALSE'),
textInput("track2_label",
"Track 2 legend label",
value = ""),
textInput("track3_label",
"Track 3 legend label",
value = ""),
textInput("track4_label",
"Track 4 legend label",
value = ""),
textInput("pvalue_label",
"P-value threshold label",
value = "P <= 0.05"),
textOutput("pval_label"),
br(),
h4("Customisation"),
radioButtons(
inputId = 'colours',
label = 'Colour',
choices = c(
'Accessible colours' = 'TRUE',
'Not accessible colours' = 'FALSE'),
selected = 'TRUE'),
actionButton("circosbutton","Plot")
), # close sidebarPanel()
## > main panel ====
mainPanel(withSpinner(uiOutput("plot")))
) # close sidebarLayout()
) # close tabPanel()
) # cose tabsetPanle()
), # close tabPanel
## Keep shiny app awake ====
tags$head(
HTML(
"
<script>
var socket_timeout_interval
var n = 0
$(document).on('shiny:connected', function(event) {
socket_timeout_interval = setInterval(function(){
Shiny.onInputChange('count', n++)
}, 15000)
});
$(document).on('shiny:disconnected', function(event) {
clearInterval(socket_timeout_interval)
});
</script>
"
)
),
textOutput("")
)
|
/app/ui.R
|
permissive
|
mattlee821/EpiViz
|
R
| false
| false
| 10,304
|
r
|
# SETUP ====
# load libraries ====
library(shiny)
library(shinydashboard)
library(shinyLP)
library(shinythemes)
library(plotly)
library(ggplot2)
library(data.table)
library(circlize)
library(dplyr)
library(stringr)
library(fs)
library(rmarkdown)
library(markdown)
library(data.table)
library(wesanderson)
library(shinycssloaders)
# install complexheatmap from bioconductor
library(BiocManager)
options(repos = BiocManager::repositories())
library(ComplexHeatmap)
# > call source function ====
source("my_circos_plot.R")
source(file.path("helpers", "Output_main.R"))
#------------------------------------------------------------------------------#
ui <- navbarPage(
title = "",
theme = shinytheme("flatly"),
fluid = TRUE,
selected = "EpiViz",
inverse = FALSE,
# EpiViz ====
tabPanel(
title = "EpiViz",
fluidRow(
column(12, title = "", id = "home_home", epiviz()),
column(4, title = "About", id = "home_about", home_about()),
column(4, title = "Example", id = "home_example", home_example()),
column(4, title = "", id = "home_footer", home_footer())
)),
# HOW TO ====
tabPanel(
title = "how to",
fluidRow(
column(4, title = "", id = "how_to_1", how_to_1()),
column(4, title = "About", id = "how_to_2", how_to_2()),
column(4, title = "Example", id = "how_to_3", how_to_3()),
)),
# ANALYSIS ====
tabPanel(
title = "plot",
tabsetPanel(
id = 'dataset',
## UPLOAD DATA ====
tabPanel(titlePanel(h5("Data")),
sidebarLayout(
## > sidebar panel ====
sidebarPanel(
h4("Upload data:"),
helpText("Upload tab seperated text files for each track."),
fileInput(
inputId = "file1",
label = "Track 1",
multiple = FALSE,
accept = c(
"text/csv",
"text/comma-separated-values",
"text/plain",
".csv")),
fileInput(
inputId = "file2",
label = "Track 2",
multiple = FALSE,
accept = c(
"text/csv",
"text/comma-separated-values",
"text/plain",
".csv")),
fileInput(
inputId = "file3",
label = "Track 3",
multiple = FALSE,
accept = c(
"text/csv",
"text/comma-separated-values",
"text/plain",
".csv")),
h4("Volcano plot of data:"),
helpText("Select columns to generate a volcano plot."),
selectInput("beta_column",
"Effect estimate:",
choices="",
selected = ""),
selectInput("p_column",
"P-value:",
choices="",
selected = ""),
actionButton("volcanobutton","volcano")),
## > main panel ====
mainPanel(
tabsetPanel(
tabPanel("Track 1",
conditionalPanel(
condition = "output.file_imported",
h4("Description of uploaded data"),
textOutput("rowcount"),
textOutput("colcount"),
br(),
h4("First rows of uploaded data"),
tableOutput(outputId = "data1"),
br(),
h4("Volcano plot of uploaded data"),
plotlyOutput("volcanoplot1"))),
tabPanel("Track 2",
conditionalPanel(
condition = "output.file_imported2",
h4("Description of uploaded data"),
textOutput("rowcount2"),
textOutput("colcount2"),
br(),
h4("First rows of uploaded data"),
tableOutput(outputId = "data2"),
br(),
h4("Volcano plot of uploaded data"),
plotlyOutput("volcanoplot2"))),
tabPanel("Track 3",
conditionalPanel(
condition = "output.file_imported3",
h4("Description of uploaded data"),
textOutput("rowcount3"),
textOutput("colcount3"),
br(),
h4("First rows of uploaded data"),
tableOutput(outputId = "data3"),
br(),
h4("Volcano plot of uploaded data"),
plotlyOutput("volcanoplot3")))
) # close tabsetPanel()
) # close mainPanel()
) # close sidebarLayout()
), # close tabPanel()
## PLOT PARAMETERS ====
tabPanel(titlePanel(h5("Circos")),
sidebarLayout(
## > sidebar panel ====
sidebarPanel(
h4("Circos plot paramaters"),
selectInput("track_number",
"Number of tracks",
choices = c(1,2,3),
selected = 1),
selectInput("label_column",
"Label:",
choices="",
selected = ""),
selectInput("section_column",
"Group:",
choices="",
selected = ""),
selectInput("estimate_column",
"Estimate:",
choices="",
selected = ""),
selectInput("pvalue_column",
"P-value:",
choices="",
selected = ""),
selectInput("confidence_interval_lower_column",
"Lower confidence interval:",
choices="",
selected = ""),
selectInput("confidence_interval_upper_column",
"Upper confidence interval:",
choices="",
selected = ""),
numericInput("pvalue_adjustment",
"P-value adjustment:",
value = 1,
min = 1,
max = 999999),
textOutput("pval"),
## Legend paramaters
h4("Legend paramaters"),
radioButtons(
inputId = 'legend',
label = 'Legend',
choices = c(
Yes = 'TRUE',
No = 'FALSE'),
selected = 'FALSE'),
textInput("track2_label",
"Track 2 legend label",
value = ""),
textInput("track3_label",
"Track 3 legend label",
value = ""),
textInput("track4_label",
"Track 4 legend label",
value = ""),
textInput("pvalue_label",
"P-value threshold label",
value = "P <= 0.05"),
textOutput("pval_label"),
br(),
h4("Customisation"),
radioButtons(
inputId = 'colours',
label = 'Colour',
choices = c(
'Accessible colours' = 'TRUE',
'Not accessible colours' = 'FALSE'),
selected = 'TRUE'),
actionButton("circosbutton","Plot")
), # close sidebarPanel()
## > main panel ====
mainPanel(withSpinner(uiOutput("plot")))
) # close sidebarLayout()
) # close tabPanel()
) # cose tabsetPanle()
), # close tabPanel
## Keep shiny app awake ====
tags$head(
HTML(
"
<script>
var socket_timeout_interval
var n = 0
$(document).on('shiny:connected', function(event) {
socket_timeout_interval = setInterval(function(){
Shiny.onInputChange('count', n++)
}, 15000)
});
$(document).on('shiny:disconnected', function(event) {
clearInterval(socket_timeout_interval)
});
</script>
"
)
),
textOutput("")
)
|
#' @title Construct a gpuVector
#' @description Construct a gpuVector of a class that inherits
#' from \code{gpuVector}
#' @param data An object that is or can be converted to a
#' \code{vector}
#' @param length A non-negative integer specifying the desired length.
#' @param type A character string specifying the type of gpuVector. Default
#' is NULL where type is inherited from the source data type.
#' @param ctx_id An integer specifying the object's context
#' @param ... Additional method to pass to gpuVector methods
#' @return A gpuVector object
#' @docType methods
#' @rdname gpuVector-methods
#' @author Charles Determan Jr.
#' @export
setGeneric("gpuVector", function(data, length, type=NULL, ...){
standardGeneric("gpuVector")
})
#' @rdname gpuVector-methods
#' @aliases gpuVector,vector
setMethod('gpuVector',
signature(data = 'vector', length = 'missing'),
function(data, type=NULL, ctx_id = NULL){
if (is.null(type)) {
type <- switch(typeof(data),
"integer" = "integer",
getOption("gpuR.default.type"))
}
device <- currentDevice()
context_index <- ifelse(is.null(ctx_id), currentContext(), as.integer(ctx_id))
device_index <- as.integer(device$device_index)
device_type <- device$device_type
device_name <- switch(device_type,
"gpu" = gpuInfo(device_idx = as.integer(device_index))$deviceName,
"cpu" = cpuInfo(device_idx = as.integer(device_index))$deviceName,
stop("Unrecognized device type")
)
platform_index <- currentPlatform()$platform_index
platform_name <- platformInfo(platform_index)$platformName
out = switch(type,
integer = {
new("igpuVector",
address=sexpVecToEigenVecXptr(data,
length(data),
4L),
.context_index = context_index,
.platform_index = platform_index,
.platform = platform_name,
.device_index = device_index,
.device = device_name)
},
float = {
new("fgpuVector",
address=sexpVecToEigenVecXptr(data,
length(data),
6L),
.context_index = context_index,
.platform_index = platform_index,
.platform = platform_name,
.device_index = device_index,
.device = device_name)
},
double = {
assert_has_double(platform_index, device_index)
new("dgpuVector",
address = sexpVecToEigenVecXptr(data,
length(data),
8L),
.context_index = context_index,
.platform_index = platform_index,
.platform = platform_name,
.device_index = device_index,
.device = device_name)
},
stop("this is an unrecognized
or unimplemented data type")
)
return(out)
},
valueClass = "gpuVector"
)
#' @rdname gpuVector-methods
#' @aliases gpuVector,missingOrNULL
setMethod('gpuVector',
signature(data = 'missingOrNULL'),
function(data, length, type=NULL, ctx_id = NULL){
if (is.null(type)) type <- getOption("gpuR.default.type")
if (length <= 0) stop("length must be a positive integer")
if (!is.integer(length)) stop("length must be a positive integer")
device <- currentDevice()
context_index <- ifelse(is.null(ctx_id), currentContext(), as.integer(ctx_id))
device_index <- as.integer(device$device_index)
device_type <- device$device_type
device_name <- switch(device_type,
"gpu" = gpuInfo(device_idx = as.integer(device_index))$deviceName,
"cpu" = cpuInfo(device_idx = as.integer(device_index))$deviceName,
stop("Unrecognized device type")
)
platform_index <- currentPlatform()$platform_index
platform_name <- platformInfo(platform_index)$platformName
out = switch(type,
integer = {
new("igpuVector",
address=emptyEigenVecXptr(length, 4L),
.context_index = context_index,
.platform_index = platform_index,
.platform = platform_name,
.device_index = device_index,
.device = device_name)
},
float = {
new("fgpuVector",
address=emptyEigenVecXptr(length, 6L),
.context_index = context_index,
.platform_index = platform_index,
.platform = platform_name,
.device_index = device_index,
.device = device_name)
},
double = {
assert_has_double(platform_index, device_index)
new("dgpuVector",
address = emptyEigenVecXptr(length, 8L),
.context_index = context_index,
.platform_index = platform_index,
.platform = platform_name,
.device_index = device_index,
.device = device_name)
},
stop("this is an unrecognized
or unimplemented data type")
)
return(out)
},
valueClass = "gpuVector"
)
|
/R/gpuVector.R
|
no_license
|
bryant1410/gpuR
|
R
| false
| false
| 7,411
|
r
|
#' @title Construct a gpuVector
#' @description Construct a gpuVector of a class that inherits
#' from \code{gpuVector}
#' @param data An object that is or can be converted to a
#' \code{vector}
#' @param length A non-negative integer specifying the desired length.
#' @param type A character string specifying the type of gpuVector. Default
#' is NULL where type is inherited from the source data type.
#' @param ctx_id An integer specifying the object's context
#' @param ... Additional method to pass to gpuVector methods
#' @return A gpuVector object
#' @docType methods
#' @rdname gpuVector-methods
#' @author Charles Determan Jr.
#' @export
setGeneric("gpuVector", function(data, length, type=NULL, ...){
standardGeneric("gpuVector")
})
#' @rdname gpuVector-methods
#' @aliases gpuVector,vector
setMethod('gpuVector',
signature(data = 'vector', length = 'missing'),
function(data, type=NULL, ctx_id = NULL){
if (is.null(type)) {
type <- switch(typeof(data),
"integer" = "integer",
getOption("gpuR.default.type"))
}
device <- currentDevice()
context_index <- ifelse(is.null(ctx_id), currentContext(), as.integer(ctx_id))
device_index <- as.integer(device$device_index)
device_type <- device$device_type
device_name <- switch(device_type,
"gpu" = gpuInfo(device_idx = as.integer(device_index))$deviceName,
"cpu" = cpuInfo(device_idx = as.integer(device_index))$deviceName,
stop("Unrecognized device type")
)
platform_index <- currentPlatform()$platform_index
platform_name <- platformInfo(platform_index)$platformName
out = switch(type,
integer = {
new("igpuVector",
address=sexpVecToEigenVecXptr(data,
length(data),
4L),
.context_index = context_index,
.platform_index = platform_index,
.platform = platform_name,
.device_index = device_index,
.device = device_name)
},
float = {
new("fgpuVector",
address=sexpVecToEigenVecXptr(data,
length(data),
6L),
.context_index = context_index,
.platform_index = platform_index,
.platform = platform_name,
.device_index = device_index,
.device = device_name)
},
double = {
assert_has_double(platform_index, device_index)
new("dgpuVector",
address = sexpVecToEigenVecXptr(data,
length(data),
8L),
.context_index = context_index,
.platform_index = platform_index,
.platform = platform_name,
.device_index = device_index,
.device = device_name)
},
stop("this is an unrecognized
or unimplemented data type")
)
return(out)
},
valueClass = "gpuVector"
)
#' @rdname gpuVector-methods
#' @aliases gpuVector,missingOrNULL
setMethod('gpuVector',
signature(data = 'missingOrNULL'),
function(data, length, type=NULL, ctx_id = NULL){
if (is.null(type)) type <- getOption("gpuR.default.type")
if (length <= 0) stop("length must be a positive integer")
if (!is.integer(length)) stop("length must be a positive integer")
device <- currentDevice()
context_index <- ifelse(is.null(ctx_id), currentContext(), as.integer(ctx_id))
device_index <- as.integer(device$device_index)
device_type <- device$device_type
device_name <- switch(device_type,
"gpu" = gpuInfo(device_idx = as.integer(device_index))$deviceName,
"cpu" = cpuInfo(device_idx = as.integer(device_index))$deviceName,
stop("Unrecognized device type")
)
platform_index <- currentPlatform()$platform_index
platform_name <- platformInfo(platform_index)$platformName
out = switch(type,
integer = {
new("igpuVector",
address=emptyEigenVecXptr(length, 4L),
.context_index = context_index,
.platform_index = platform_index,
.platform = platform_name,
.device_index = device_index,
.device = device_name)
},
float = {
new("fgpuVector",
address=emptyEigenVecXptr(length, 6L),
.context_index = context_index,
.platform_index = platform_index,
.platform = platform_name,
.device_index = device_index,
.device = device_name)
},
double = {
assert_has_double(platform_index, device_index)
new("dgpuVector",
address = emptyEigenVecXptr(length, 8L),
.context_index = context_index,
.platform_index = platform_index,
.platform = platform_name,
.device_index = device_index,
.device = device_name)
},
stop("this is an unrecognized
or unimplemented data type")
)
return(out)
},
valueClass = "gpuVector"
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.