content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
\name{File-class}
\Rdversion{1.1}
\docType{class}
\alias{File-class}
\alias{File}
\title{Class \code{File}}
\description{
A Rook application that serves static files from a root directory, according to the path info of the Rook request.
}
\examples{
# This example serves all your files in /etc (on UNIX and Mac only).
#
# Note that when you open the application, you will see the word
# 'Forbidden'. "File" doesn't serve directories, so you must amend the
# url in the location bar with the file you want to view. Try adding /passwd.
s <- Rhttpd$new()
\dontrun{
s$start(quiet=TRUE)
}
s$add(name="etc",app=File$new('/etc'))
\dontrun{
s$browse('etc') # Opens a browser window to the app.
}
s$remove(all=TRUE)
rm(s)
}
\section{Methods}{
\describe{
\item{\code{new(root):}}{
\code{root} is the name of the directory from where to serve files.
}
}}
\seealso{
\code{\link{Rhttpd}}.
}
\keyword{classes}
|
/man/File-class.Rd
|
no_license
|
cran/Rook
|
R
| false
| false
| 908
|
rd
|
\name{File-class}
\Rdversion{1.1}
\docType{class}
\alias{File-class}
\alias{File}
\title{Class \code{File}}
\description{
A Rook application that serves static files from a root directory, according to the path info of the Rook request.
}
\examples{
# This example serves all your files in /etc (on UNIX and Mac only).
#
# Note that when you open the application, you will see the word
# 'Forbidden'. "File" doesn't serve directories, so you must amend the
# url in the location bar with the file you want to view. Try adding /passwd.
s <- Rhttpd$new()
\dontrun{
s$start(quiet=TRUE)
}
s$add(name="etc",app=File$new('/etc'))
\dontrun{
s$browse('etc') # Opens a browser window to the app.
}
s$remove(all=TRUE)
rm(s)
}
\section{Methods}{
\describe{
\item{\code{new(root):}}{
\code{root} is the name of the directory from where to serve files.
}
}}
\seealso{
\code{\link{Rhttpd}}.
}
\keyword{classes}
|
## P398: Assignment 3
## Matthias Orlowski
## 02/08/12
## R script to plot run times generated with HW3timetest.py
library(ggplot2)
RT <- read.table("/Users/mo/Documents/Uni/Duke/ComputationSoc/PS398-HW1/HW3/runtimes.csv", header=F, as.is=T, sep=",")
names(RT) <- c("algorithm","type","length","runtime")
# correct typo in python script
RT$algorithm[RT$algorithm == "Qick Sort"] <- "Quick Sort"
RT$algorithm[RT$algorithm == "Qick Sort (RP)"] <- "Quick Sort (RP)"
# exclude screwed results due to computer usage while running the test
RT <- RT[RT$length<7680,]
# plot all four algorithms
png("/Users/mo/Documents/Uni/Duke/ComputationSoc/PS398-HW1/HW3/runtimeplot.png")
plot <- ggplot(RT,aes(x=length, y = runtime, col=as.factor(algorithm)))
plot <- plot + geom_line() + facet_grid(type ~. ) + theme_bw()
plot <- plot + scale_colour_manual(values=c(3,4,5,6),name= "Algorithm")
plot <- plot + opts(title ="Runtimes for different sorting algorithms")
plot
dev.off()
# plot all quick sort algorithms
png("/Users/mo/Documents/Uni/Duke/ComputationSoc/PS398-HW1/HW3/runtimeplotQS.png")
RTqs <- RT[RT$algorithm == "Quick Sort" | RT$algorithm == "Quick Sort (RP)",]
plot2 <- ggplot(RTqs,aes(x=length, y = runtime, col=as.factor(algorithm)))
plot2 <- plot2 + geom_line() + facet_grid(type ~. ) + theme_bw()
plot2 <- plot2 + scale_colour_manual(values=c(5,6),name= "Algorithm")
plot2 <- plot2 + opts(title ="Runtimes for Quick Sort algorithms")
plot2
dev.off()
|
/HW3/HW3plot.r
|
no_license
|
mace84/PS398-HW1
|
R
| false
| false
| 1,453
|
r
|
## P398: Assignment 3
## Matthias Orlowski
## 02/08/12
## R script to plot run times generated with HW3timetest.py
library(ggplot2)
RT <- read.table("/Users/mo/Documents/Uni/Duke/ComputationSoc/PS398-HW1/HW3/runtimes.csv", header=F, as.is=T, sep=",")
names(RT) <- c("algorithm","type","length","runtime")
# correct typo in python script
RT$algorithm[RT$algorithm == "Qick Sort"] <- "Quick Sort"
RT$algorithm[RT$algorithm == "Qick Sort (RP)"] <- "Quick Sort (RP)"
# exclude screwed results due to computer usage while running the test
RT <- RT[RT$length<7680,]
# plot all four algorithms
png("/Users/mo/Documents/Uni/Duke/ComputationSoc/PS398-HW1/HW3/runtimeplot.png")
plot <- ggplot(RT,aes(x=length, y = runtime, col=as.factor(algorithm)))
plot <- plot + geom_line() + facet_grid(type ~. ) + theme_bw()
plot <- plot + scale_colour_manual(values=c(3,4,5,6),name= "Algorithm")
plot <- plot + opts(title ="Runtimes for different sorting algorithms")
plot
dev.off()
# plot all quick sort algorithms
png("/Users/mo/Documents/Uni/Duke/ComputationSoc/PS398-HW1/HW3/runtimeplotQS.png")
RTqs <- RT[RT$algorithm == "Quick Sort" | RT$algorithm == "Quick Sort (RP)",]
plot2 <- ggplot(RTqs,aes(x=length, y = runtime, col=as.factor(algorithm)))
plot2 <- plot2 + geom_line() + facet_grid(type ~. ) + theme_bw()
plot2 <- plot2 + scale_colour_manual(values=c(5,6),name= "Algorithm")
plot2 <- plot2 + opts(title ="Runtimes for Quick Sort algorithms")
plot2
dev.off()
|
# The model requires a constant coefficient for the reaction speed
# and a spline function for the coefficient of the forcing function,
# which in this model is the unit function.
# The estimated coefficient function is a step function,
# or a spline function of order one, with 20 steps.
# load the fda script data
rm(list=ls())
library(R.utils)
library(R.cache)
library(Matrix)
library(fda)
source("/Users/michellecarey/Desktop/Data2LD_Fix/R/init.R")
init()
sourceDirectory("/Users/michellecarey/Desktop/Data2LD_Fix/R")
fdascriptX <- fdascript[,,1]
fdascriptY <- fdascript[,,2]
# Define the observation times in 100ths of a second
centisec <- seq(0,2.3,len=1401)*100
fdarange <- range(centisec)
# Define a constant basis
conbasis <- create.constant.basis(fdarange)
# Define the order one Bspline basis for the coefficient
# of the forcing term.
nevent <- 20
nAorder <- 1
nAbasis <- nevent
nAknots <- nAbasis + 1
Aknots <- seq(fdarange[1],fdarange[2],len=nAknots)
Abasisobj <- create.bspline.basis(fdarange,nAbasis,nAorder,Aknots)
# Define the two coefficient functions in this model
coef1 <- make.coef(conbasis, 0.04, TRUE, "beta")
coef2 <- make.coef(Abasisobj, matrix(1,nAbasis,1), TRUE, "alpha")
# List array containing the coefficient lists
coefList <- vector("list",2)
coefList[[1]] <- coef1
coefList[[2]] <- coef2
# Check the coefficients
coefResult <- coefCheck(coefList)
coefList <- coefResult$coefList
ntheta <- coefResult$ntheta
print(paste("ntheta = ",ntheta))
# Set up single homogeneous term in D^2x = -beta x
Xterm <- make.Xterm(variable=1, derivative=0, ncoef=1, factor=-1)
XList <- vector("list",1)
XList[[1]] <- Xterm
# Set up coefficient for forcing term \alpha(t)*1
confd <- fd(1,conbasis)
Fterm <- make.Fterm(ncoef=2, Ufd=confd, factor=1)
FList <- vector("list", 1)
FList[[1]] <- Fterm
# make variable for X coefficient of script
Xvariable <- make.variable(name="X", order=2, XList=XList, FList=FList)
# List array for the whole system
fdaXList = vector("list",1)
fdaXList[[1]] <- Xvariable
# check the system specification for consistency
fdaXList <- modelCheck(fdaXList, coefList)
# Set up the data lists for X- and Y-coordinates
yListX <- list(list(argvals=centisec, y=fdascriptX[,1]))
yListY <- list(list(argvals=centisec, y=fdascriptY[,1]))
## The basis functions for the script will be B-splines
# 32 basis functions per second, 4 per 8 herz cycle
# 2.3 seconds 2.3*32=73.6,
# This implies norder + no. of interior knots <- 74 - 1 + 6 <- 79
# basis functions.
nXbasis <- 79
norder <- 6 # order 6 for a smooth 2nd deriv.
fdabasis <- create.bspline.basis(fdarange, nXbasis, norder)
XbasisList = vector("list",1)
XbasisList[[1]] <- fdabasis
# Single evaluation in order to set up the 4-way tensors
rhoVec <- 0.5
Data2LDResult <- Data2LD(yListX, XbasisList, fdaXList, coefList, rhoVec)
MSE <- Data2LDResult$MSE # Mean squared error for fit to data
DpMSE <- Data2LDResult$DpMSE # gradient with respect to parameter values
D2ppMSE <- Data2LDResult$D2ppMSE # Hessian matrix
XfdParList <- Data2LDResult$XfdParList # List of fdPar objects for variable values
df <- Data2LDResult$df # Degrees of freedom for fit
gcv <- Data2LDResult$gcv # Generalized cross-validation coefficient
ISE <- Data2LDResult$ISE # Size of second term, integrated squared error
Var.theta <- Data2LDResult$Var.theta # Estimate sampling variance for parameters
# set up sequence of rho values
gamvec <- 0:7
rhoVec <- exp(gamvec)/(1+exp(gamvec))
nrho <- length(rhoVec)
# values controlling optimization
dbglev <- 1 # debugging level
iterlim <- 50 # maximum number of iterations
convrg <- c(1e-6, 1e-4) # convergence criterion
# Set up arrays to hold results over rho
dfesaveX <- matrix(0,nrho,1)
gcvsaveX <- matrix(0,nrho,1)
MSEsaveX <- matrix(0,nrho,1)
ISEsaveX <- matrix(0,nrho,1)
thesaveX <- matrix(0,nrho,ntheta)
# Initialize coefficient list
coefList.optX <- coefList
# step through rho values, optimizing at each step
# X-coordinate:
for (irho in 1:nrho) {
rhoi <- rhoVec[irho]
print(paste("rho <- ",round(rhoi,5)))
Data2LD.optResult <- Data2LD.opt(yListX, XbasisList, fdaXList, coefList.optX,
rhoi, convrg, iterlim, dbglev)
theta.opti <- Data2LD.optResult$thetastore
coefList.opti <- modelVec2List(theta.opti, coefList.optX)
Data2LDResult <- Data2LD(yListX, XbasisList, fdaXList, coefList.opti, rhoi)
thesaveX[irho,] <- theta.opti
dfesaveX[irho,1] <- Data2LDResult$df
gcvsaveX[irho,1] <- Data2LDResult$gcv
MSEsaveX[irho,1] <- Data2LDResult$MSE
ISEsaveX[irho,1] <- Data2LDResult$ISE
coefList.optX <- coefList.opti
}
# Set up arrays to hold results over rho
dfesaveY <- matrix(0,nrho,1)
gcvsaveY <- matrix(0,nrho,1)
MSEsaveY <- matrix(0,nrho,1)
ISEsaveY <- matrix(0,nrho,1)
thesaveY <- matrix(0,nrho,ntheta)
# Initialize coefficient list
coefList.optY <- coefList
# step through rho values, optimizing at each step
# Y-coordinate:
for (irho in 1:nrho) {
rhoi <- rhoVec[irho]
print(paste("rho <- ",round(rhoi,5)))
Data2LD.optResult <- Data2LD.opt(yListY, XbasisList, fdaXList, coefList.optY,
rhoi, convrg, iterlim, dbglev)
theta.opti <- Data2LD.optResult$thetastore
coefList.opti <- modelVec2List(theta.opti, coefList.optY)
Data2LDResult <- Data2LD(yListY, XbasisList, fdaXList, coefList.opti, rhoi)
thesaveY[irho,] <- theta.opti
dfesaveY[irho,1] <- Data2LDResult$df
gcvsaveY[irho,1] <- Data2LDResult$gcv
MSEsaveY[irho,1] <- Data2LDResult$MSE
ISEsaveY[irho,1] <- Data2LDResult$ISE
coefList.optY <- coefList.opti
}
# display degrees of freedom and gcv values
print("for X: rho df gcv")
print(round(cbind(rhoVec, dfesaveX, gcvsaveX),4))
print("for Y: rho df gcv")
print(round(cbind(rhoVec, dfesaveY, gcvsaveY),4))
# Evaluate the fit for parameter values at highest rho value
irho <- 8
thetaX <- thesaveX[irho,]
coefListX <- modelVec2List(thetaX, coefList)
thetaY <- thesaveY[irho,]
coefListY <- modelVec2List(thetaY, coefList)
rho <- rhoVec[irho]
Data2LDResultX <- Data2LD(yListX, XbasisList, fdaXList, coefListX, rho)
Data2LDResultY <- Data2LD(yListY, XbasisList, fdaXList, coefListY, rho)
print(paste("MSEX = ", round(Data2LDResultX$MSE,4)))
print(paste("dfX = ", round(Data2LDResultX$df, 4)))
print(paste("gcvX = ", round(Data2LDResultX$gcv,4)))
print(paste("MSEY = ", round(Data2LDResultY$MSE,4)))
print(paste("dfY = ", round(Data2LDResultY$df, 4)))
print(paste("gcvY = ", round(Data2LDResultY$gcv,4)))
FDAScriptfdX <- Data2LDResultX$XfdParList[[1]]$fd
FDAScriptfdY <- Data2LDResultY$XfdParList[[1]]$fd
FDAScriptTimefine <- seq(fdarange[1],fdarange[2],len=201)
FDAScriptfineX <- eval.fd(FDAScriptTimefine, FDAScriptfdX)
FDAScriptfineY <- eval.fd(FDAScriptTimefine, FDAScriptfdY)
# plot fit to the data
plot(FDAScriptfineX, FDAScriptfineY, type="l",
xlab="X coordinate", ylab="Y coordinate")
points(fdascriptX[,1], fdascriptY[,1], pch="o")
|
/MSc-SimSci/ACM41000-Uncertainty-Quantification/assignement3/Data2LD_Fix/Data2LD_Fix/demo/FDAScript.R
|
no_license
|
iantowey/sandbox
|
R
| false
| false
| 7,122
|
r
|
# The model requires a constant coefficient for the reaction speed
# and a spline function for the coefficient of the forcing function,
# which in this model is the unit function.
# The estimated coefficient function is a step function,
# or a spline function of order one, with 20 steps.
# load the fda script data
rm(list=ls())
library(R.utils)
library(R.cache)
library(Matrix)
library(fda)
source("/Users/michellecarey/Desktop/Data2LD_Fix/R/init.R")
init()
sourceDirectory("/Users/michellecarey/Desktop/Data2LD_Fix/R")
fdascriptX <- fdascript[,,1]
fdascriptY <- fdascript[,,2]
# Define the observation times in 100ths of a second
centisec <- seq(0,2.3,len=1401)*100
fdarange <- range(centisec)
# Define a constant basis
conbasis <- create.constant.basis(fdarange)
# Define the order one Bspline basis for the coefficient
# of the forcing term.
nevent <- 20
nAorder <- 1
nAbasis <- nevent
nAknots <- nAbasis + 1
Aknots <- seq(fdarange[1],fdarange[2],len=nAknots)
Abasisobj <- create.bspline.basis(fdarange,nAbasis,nAorder,Aknots)
# Define the two coefficient functions in this model
coef1 <- make.coef(conbasis, 0.04, TRUE, "beta")
coef2 <- make.coef(Abasisobj, matrix(1,nAbasis,1), TRUE, "alpha")
# List array containing the coefficient lists
coefList <- vector("list",2)
coefList[[1]] <- coef1
coefList[[2]] <- coef2
# Check the coefficients
coefResult <- coefCheck(coefList)
coefList <- coefResult$coefList
ntheta <- coefResult$ntheta
print(paste("ntheta = ",ntheta))
# Set up single homogeneous term in D^2x = -beta x
Xterm <- make.Xterm(variable=1, derivative=0, ncoef=1, factor=-1)
XList <- vector("list",1)
XList[[1]] <- Xterm
# Set up coefficient for forcing term \alpha(t)*1
confd <- fd(1,conbasis)
Fterm <- make.Fterm(ncoef=2, Ufd=confd, factor=1)
FList <- vector("list", 1)
FList[[1]] <- Fterm
# make variable for X coefficient of script
Xvariable <- make.variable(name="X", order=2, XList=XList, FList=FList)
# List array for the whole system
fdaXList = vector("list",1)
fdaXList[[1]] <- Xvariable
# check the system specification for consistency
fdaXList <- modelCheck(fdaXList, coefList)
# Set up the data lists for X- and Y-coordinates
yListX <- list(list(argvals=centisec, y=fdascriptX[,1]))
yListY <- list(list(argvals=centisec, y=fdascriptY[,1]))
## The basis functions for the script will be B-splines
# 32 basis functions per second, 4 per 8 herz cycle
# 2.3 seconds 2.3*32=73.6,
# This implies norder + no. of interior knots <- 74 - 1 + 6 <- 79
# basis functions.
nXbasis <- 79
norder <- 6 # order 6 for a smooth 2nd deriv.
fdabasis <- create.bspline.basis(fdarange, nXbasis, norder)
XbasisList = vector("list",1)
XbasisList[[1]] <- fdabasis
# Single evaluation in order to set up the 4-way tensors
rhoVec <- 0.5
Data2LDResult <- Data2LD(yListX, XbasisList, fdaXList, coefList, rhoVec)
MSE <- Data2LDResult$MSE # Mean squared error for fit to data
DpMSE <- Data2LDResult$DpMSE # gradient with respect to parameter values
D2ppMSE <- Data2LDResult$D2ppMSE # Hessian matrix
XfdParList <- Data2LDResult$XfdParList # List of fdPar objects for variable values
df <- Data2LDResult$df # Degrees of freedom for fit
gcv <- Data2LDResult$gcv # Generalized cross-validation coefficient
ISE <- Data2LDResult$ISE # Size of second term, integrated squared error
Var.theta <- Data2LDResult$Var.theta # Estimate sampling variance for parameters
# set up sequence of rho values
gamvec <- 0:7
rhoVec <- exp(gamvec)/(1+exp(gamvec))
nrho <- length(rhoVec)
# values controlling optimization
dbglev <- 1 # debugging level
iterlim <- 50 # maximum number of iterations
convrg <- c(1e-6, 1e-4) # convergence criterion
# Set up arrays to hold results over rho
dfesaveX <- matrix(0,nrho,1)
gcvsaveX <- matrix(0,nrho,1)
MSEsaveX <- matrix(0,nrho,1)
ISEsaveX <- matrix(0,nrho,1)
thesaveX <- matrix(0,nrho,ntheta)
# Initialize coefficient list
coefList.optX <- coefList
# step through rho values, optimizing at each step
# X-coordinate:
for (irho in 1:nrho) {
rhoi <- rhoVec[irho]
print(paste("rho <- ",round(rhoi,5)))
Data2LD.optResult <- Data2LD.opt(yListX, XbasisList, fdaXList, coefList.optX,
rhoi, convrg, iterlim, dbglev)
theta.opti <- Data2LD.optResult$thetastore
coefList.opti <- modelVec2List(theta.opti, coefList.optX)
Data2LDResult <- Data2LD(yListX, XbasisList, fdaXList, coefList.opti, rhoi)
thesaveX[irho,] <- theta.opti
dfesaveX[irho,1] <- Data2LDResult$df
gcvsaveX[irho,1] <- Data2LDResult$gcv
MSEsaveX[irho,1] <- Data2LDResult$MSE
ISEsaveX[irho,1] <- Data2LDResult$ISE
coefList.optX <- coefList.opti
}
# Set up arrays to hold results over rho
dfesaveY <- matrix(0,nrho,1)
gcvsaveY <- matrix(0,nrho,1)
MSEsaveY <- matrix(0,nrho,1)
ISEsaveY <- matrix(0,nrho,1)
thesaveY <- matrix(0,nrho,ntheta)
# Initialize coefficient list
coefList.optY <- coefList
# step through rho values, optimizing at each step
# Y-coordinate:
for (irho in 1:nrho) {
rhoi <- rhoVec[irho]
print(paste("rho <- ",round(rhoi,5)))
Data2LD.optResult <- Data2LD.opt(yListY, XbasisList, fdaXList, coefList.optY,
rhoi, convrg, iterlim, dbglev)
theta.opti <- Data2LD.optResult$thetastore
coefList.opti <- modelVec2List(theta.opti, coefList.optY)
Data2LDResult <- Data2LD(yListY, XbasisList, fdaXList, coefList.opti, rhoi)
thesaveY[irho,] <- theta.opti
dfesaveY[irho,1] <- Data2LDResult$df
gcvsaveY[irho,1] <- Data2LDResult$gcv
MSEsaveY[irho,1] <- Data2LDResult$MSE
ISEsaveY[irho,1] <- Data2LDResult$ISE
coefList.optY <- coefList.opti
}
# display degrees of freedom and gcv values
print("for X: rho df gcv")
print(round(cbind(rhoVec, dfesaveX, gcvsaveX),4))
print("for Y: rho df gcv")
print(round(cbind(rhoVec, dfesaveY, gcvsaveY),4))
# Evaluate the fit for parameter values at highest rho value
irho <- 8
thetaX <- thesaveX[irho,]
coefListX <- modelVec2List(thetaX, coefList)
thetaY <- thesaveY[irho,]
coefListY <- modelVec2List(thetaY, coefList)
rho <- rhoVec[irho]
Data2LDResultX <- Data2LD(yListX, XbasisList, fdaXList, coefListX, rho)
Data2LDResultY <- Data2LD(yListY, XbasisList, fdaXList, coefListY, rho)
print(paste("MSEX = ", round(Data2LDResultX$MSE,4)))
print(paste("dfX = ", round(Data2LDResultX$df, 4)))
print(paste("gcvX = ", round(Data2LDResultX$gcv,4)))
print(paste("MSEY = ", round(Data2LDResultY$MSE,4)))
print(paste("dfY = ", round(Data2LDResultY$df, 4)))
print(paste("gcvY = ", round(Data2LDResultY$gcv,4)))
FDAScriptfdX <- Data2LDResultX$XfdParList[[1]]$fd
FDAScriptfdY <- Data2LDResultY$XfdParList[[1]]$fd
FDAScriptTimefine <- seq(fdarange[1],fdarange[2],len=201)
FDAScriptfineX <- eval.fd(FDAScriptTimefine, FDAScriptfdX)
FDAScriptfineY <- eval.fd(FDAScriptTimefine, FDAScriptfdY)
# plot fit to the data
plot(FDAScriptfineX, FDAScriptfineY, type="l",
xlab="X coordinate", ylab="Y coordinate")
points(fdascriptX[,1], fdascriptY[,1], pch="o")
|
# add points to section plot ----------------------------------------------
add_points = function(wd, pos, pt_col, pt_pch = 21, cex = 2){
x = as.POSIXct(wd$time)
y = rep(pos, length(x))
points(x, y, pch = pt_pch, bg = pt_col, cex = cex)
}
# plot section ------------------------------------------------------------
plot_section = function(ctd,var,zlim,cex.all=1.5){
# setup
rw_pos = 150
fw_pos = 160
sw_pos = 170
hw_pos = 180
lab_pos = c(rw_pos,fw_pos,sw_pos,hw_pos)
spp = c('right', 'fin', 'sei', 'humpback')
# setup layout for plotting
m = rbind(c(1,1,1,1,1,1,1,1,1,1,1,2),
c(1,1,1,1,1,1,1,1,1,1,1,2),
c(1,1,1,1,1,1,1,1,1,1,1,2),
c(1,1,1,1,1,1,1,1,1,1,1,2),
c(1,1,1,1,1,1,1,1,1,1,1,2),
c(1,1,1,1,1,1,1,1,1,1,1,2),
c(1,1,1,1,1,1,1,1,1,1,1,3))
# switch for variable
if(var == 'temperature'){
pal = oce.colorsTemperature()
lab = 'Temperature [deg C]'
c = colormap(ctd$temperature, breaks=100, zclip = T, col = pal, zlim = zlim)
} else if(var == 'salinity'){
pal = oce.colorsSalinity()
lab = 'Salinity'
c = colormap(ctd$salinity, breaks=100, zclip = T, col = pal, zlim = zlim)
} else if(var == 'density'){
pal = oce.colorsDensity()
lab = 'Density [kg/m3]'
c = colormap(ctd$density, breaks=100, zclip = T, col = pal, zlim = zlim)
} else {
stop('Unknown variable! Please choose from: temperature, salinity, or density')
}
layout(m)
# plot section
plot(ctd$time, ctd$depth, ylim = c(175,0), type = 'l', col = 'grey',
ylab = 'Depth [m]', xlab = '', cex.lab = cex.all, cex.axis = cex.all)
mtext(paste0(lab),side = 3, line = 1,adj = 0)
points(ctd$time, ctd$depth, pch = 21, bg = c$zcol, cex = 1.75, col = NULL)
# overlay detections
abline(h = lab_pos, col = 'grey')
add_points(rw_m, rw_pos, 'yellow')
add_points(rw_p, rw_pos, 'red')
add_points(fw_m, fw_pos, 'yellow')
add_points(fw_p, fw_pos, 'red')
add_points(sw_m, sw_pos, 'yellow')
add_points(sw_p, sw_pos, 'red')
add_points(hw_m, hw_pos, 'yellow')
add_points(hw_p, hw_pos, 'red')
# add axis
axis(side = 4, at = lab_pos, tick = T, labels = spp, las = 2)
# add palette
drawPalette(c$zlim, col=c$col, breaks=c$breaks, zlab = '', fullpage = T,
cex.axis = cex.all, cex.lab = cex.all)
}
|
/helper.R
|
no_license
|
hansenjohnson/live_glider
|
R
| false
| false
| 2,371
|
r
|
# add points to section plot ----------------------------------------------
add_points = function(wd, pos, pt_col, pt_pch = 21, cex = 2){
x = as.POSIXct(wd$time)
y = rep(pos, length(x))
points(x, y, pch = pt_pch, bg = pt_col, cex = cex)
}
# plot section ------------------------------------------------------------
plot_section = function(ctd,var,zlim,cex.all=1.5){
# setup
rw_pos = 150
fw_pos = 160
sw_pos = 170
hw_pos = 180
lab_pos = c(rw_pos,fw_pos,sw_pos,hw_pos)
spp = c('right', 'fin', 'sei', 'humpback')
# setup layout for plotting
m = rbind(c(1,1,1,1,1,1,1,1,1,1,1,2),
c(1,1,1,1,1,1,1,1,1,1,1,2),
c(1,1,1,1,1,1,1,1,1,1,1,2),
c(1,1,1,1,1,1,1,1,1,1,1,2),
c(1,1,1,1,1,1,1,1,1,1,1,2),
c(1,1,1,1,1,1,1,1,1,1,1,2),
c(1,1,1,1,1,1,1,1,1,1,1,3))
# switch for variable
if(var == 'temperature'){
pal = oce.colorsTemperature()
lab = 'Temperature [deg C]'
c = colormap(ctd$temperature, breaks=100, zclip = T, col = pal, zlim = zlim)
} else if(var == 'salinity'){
pal = oce.colorsSalinity()
lab = 'Salinity'
c = colormap(ctd$salinity, breaks=100, zclip = T, col = pal, zlim = zlim)
} else if(var == 'density'){
pal = oce.colorsDensity()
lab = 'Density [kg/m3]'
c = colormap(ctd$density, breaks=100, zclip = T, col = pal, zlim = zlim)
} else {
stop('Unknown variable! Please choose from: temperature, salinity, or density')
}
layout(m)
# plot section
plot(ctd$time, ctd$depth, ylim = c(175,0), type = 'l', col = 'grey',
ylab = 'Depth [m]', xlab = '', cex.lab = cex.all, cex.axis = cex.all)
mtext(paste0(lab),side = 3, line = 1,adj = 0)
points(ctd$time, ctd$depth, pch = 21, bg = c$zcol, cex = 1.75, col = NULL)
# overlay detections
abline(h = lab_pos, col = 'grey')
add_points(rw_m, rw_pos, 'yellow')
add_points(rw_p, rw_pos, 'red')
add_points(fw_m, fw_pos, 'yellow')
add_points(fw_p, fw_pos, 'red')
add_points(sw_m, sw_pos, 'yellow')
add_points(sw_p, sw_pos, 'red')
add_points(hw_m, hw_pos, 'yellow')
add_points(hw_p, hw_pos, 'red')
# add axis
axis(side = 4, at = lab_pos, tick = T, labels = spp, las = 2)
# add palette
drawPalette(c$zlim, col=c$col, breaks=c$breaks, zlab = '', fullpage = T,
cex.axis = cex.all, cex.lab = cex.all)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/adexchangebuyer_objects.R
\name{Creative.servingRestrictions}
\alias{Creative.servingRestrictions}
\title{Creative.servingRestrictions Object}
\usage{
Creative.servingRestrictions(Creative.servingRestrictions.contexts = NULL,
Creative.servingRestrictions.disapprovalReasons = NULL)
}
\arguments{
\item{Creative.servingRestrictions.contexts}{The \link{Creative.servingRestrictions.contexts} object or list of objects}
\item{Creative.servingRestrictions.disapprovalReasons}{The \link{Creative.servingRestrictions.disapprovalReasons} object or list of objects}
}
\value{
Creative.servingRestrictions object
}
\description{
Creative.servingRestrictions Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
No description
}
\seealso{
Other Creative functions: \code{\link{Creative.corrections.contexts}},
\code{\link{Creative.corrections}},
\code{\link{Creative.filteringReasons.reasons}},
\code{\link{Creative.filteringReasons}},
\code{\link{Creative.nativeAd.appIcon}},
\code{\link{Creative.nativeAd.image}},
\code{\link{Creative.nativeAd.logo}},
\code{\link{Creative.nativeAd}},
\code{\link{Creative.servingRestrictions.contexts}},
\code{\link{Creative.servingRestrictions.disapprovalReasons}},
\code{\link{Creative}}, \code{\link{creatives.insert}}
}
|
/googleadexchangebuyerv14.auto/man/Creative.servingRestrictions.Rd
|
permissive
|
Phippsy/autoGoogleAPI
|
R
| false
| true
| 1,386
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/adexchangebuyer_objects.R
\name{Creative.servingRestrictions}
\alias{Creative.servingRestrictions}
\title{Creative.servingRestrictions Object}
\usage{
Creative.servingRestrictions(Creative.servingRestrictions.contexts = NULL,
Creative.servingRestrictions.disapprovalReasons = NULL)
}
\arguments{
\item{Creative.servingRestrictions.contexts}{The \link{Creative.servingRestrictions.contexts} object or list of objects}
\item{Creative.servingRestrictions.disapprovalReasons}{The \link{Creative.servingRestrictions.disapprovalReasons} object or list of objects}
}
\value{
Creative.servingRestrictions object
}
\description{
Creative.servingRestrictions Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
No description
}
\seealso{
Other Creative functions: \code{\link{Creative.corrections.contexts}},
\code{\link{Creative.corrections}},
\code{\link{Creative.filteringReasons.reasons}},
\code{\link{Creative.filteringReasons}},
\code{\link{Creative.nativeAd.appIcon}},
\code{\link{Creative.nativeAd.image}},
\code{\link{Creative.nativeAd.logo}},
\code{\link{Creative.nativeAd}},
\code{\link{Creative.servingRestrictions.contexts}},
\code{\link{Creative.servingRestrictions.disapprovalReasons}},
\code{\link{Creative}}, \code{\link{creatives.insert}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils2.R
\name{tabler_resp}
\alias{tabler_resp}
\title{Response table}
\usage{
tabler_resp(
x,
r_or_better = levels(x)[3:1],
conf = 0.95,
digits = 0L,
frac = TRUE,
show_conf = TRUE,
pct.sign = TRUE,
total = FALSE,
two_stage = FALSE
)
}
\arguments{
\item{x}{a factor variable of responses; responses should be ordered as
CR, PR, SD, PD, NE or similar (i.e., best to worst)}
\item{r_or_better}{if integer(s), indicates the levels of \code{x} that
are to be combined with better responses; for example, if
\code{r_or_better = 3} (default), then any occurrence of level 1, 2, or 3
of \code{x} is treated as a response, and the proportion and confidence
interval are calculated for the aggregate
if \code{FALSE}; levels of \code{x} are estimated independently}
\item{conf, frac, show_conf, pct.sign}{additional arguments passed to
\code{\link{binconr}}}
\item{digits}{number of digits past the decimal point to keep}
\item{total}{logical or numeric; if \code{TRUE}, a column with the total,
i.e., \code{length(x)} is added; if numeric, \code{length(x)} and,
optionally, fraction and percent out of \code{total} is added}
\item{two_stage}{\code{FALSE} (default, assumes exact binomial CIs are
desired) or a vector of length 3 with the 1) maximum number responses in
the first stage that can be observed \emph{without} continuing; 2) the
number entered in the first stage; and 3) the additional number entered
in the second stage
if more than three integers are given, the remaining should indicate the
columns which should be calculated as two-stage CIs; usually this is only
one value but multiple are accepted}
}
\description{
Convenience function to calculate proportions and confidence invervals and
format for easy display.
}
\examples{
set.seed(1)
r <- c('CR','PR','SD','PD','NE')
x <- factor(sample(r, 30, replace = TRUE), r)
tabler_resp(x, 3)
tabler_resp(x, 'PR')
tabler_resp(x, 'PR', total = 50)
## note NAs are removed
y <- `[<-`(x, 1:10, value = NA)
tabler_resp(x, FALSE)
tabler_resp(y, FALSE)
## two-stage designs
## use two-stage CI in "PR" column
tabler_resp(x)
two_idx <- 1:2
two_stage <- c(r1 = 2, n1 = 10, n2 = 20)
tabler_resp(x, two_stage = c(two_stage, two_idx))
## compare
bincon(c(2, 4), c(10, 20), method = 'two-stage') ## CRs
bincon(c(2, 11), c(10, 20), method = 'two-stage') ## PRs
## one-stage methods should not be used
bincon(c(4, 11), 30, method = 'exact')
## typical usage
ht <- htmlTable::htmlTable(
rbind(
tabler_resp(x),
tabler_resp(x, conf = 0.9),
tabler_resp(x, frac = FALSE, pct.sign = FALSE,
show_conf = FALSE, digits = 1),
tabler_resp(x, two_stage = c(2, 10, 20, 1))
),
caption = 'Table of responses with exact binomial and
two-stage<sup>†</sup>confidence intervals.',
css.cell = 'padding: 0 10 0px; white-space: nowrap;',
cgroup = c('Evaluation', 'Outcome (95\% CI)'),
n.cgroup = c(nlevels(x), 3L)
)
structure(ht, class = 'htmlTable')
}
\seealso{
\code{\link{bincon}}; \code{\link{binconr}}
Other tabler:
\code{\link{tabler_by}()},
\code{\link{tabler_stat2}()},
\code{\link{tabler_stat}()},
\code{\link{tabler}()}
}
\concept{tabler}
|
/man/tabler_resp.Rd
|
no_license
|
Huaichao2018/rawr
|
R
| false
| true
| 3,234
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils2.R
\name{tabler_resp}
\alias{tabler_resp}
\title{Response table}
\usage{
tabler_resp(
x,
r_or_better = levels(x)[3:1],
conf = 0.95,
digits = 0L,
frac = TRUE,
show_conf = TRUE,
pct.sign = TRUE,
total = FALSE,
two_stage = FALSE
)
}
\arguments{
\item{x}{a factor variable of responses; responses should be ordered as
CR, PR, SD, PD, NE or similar (i.e., best to worst)}
\item{r_or_better}{if integer(s), indicates the levels of \code{x} that
are to be combined with better responses; for example, if
\code{r_or_better = 3} (default), then any occurrence of level 1, 2, or 3
of \code{x} is treated as a response, and the proportion and confidence
interval are calculated for the aggregate
if \code{FALSE}; levels of \code{x} are estimated independently}
\item{conf, frac, show_conf, pct.sign}{additional arguments passed to
\code{\link{binconr}}}
\item{digits}{number of digits past the decimal point to keep}
\item{total}{logical or numeric; if \code{TRUE}, a column with the total,
i.e., \code{length(x)} is added; if numeric, \code{length(x)} and,
optionally, fraction and percent out of \code{total} is added}
\item{two_stage}{\code{FALSE} (default, assumes exact binomial CIs are
desired) or a vector of length 3 with the 1) maximum number responses in
the first stage that can be observed \emph{without} continuing; 2) the
number entered in the first stage; and 3) the additional number entered
in the second stage
if more than three integers are given, the remaining should indicate the
columns which should be calculated as two-stage CIs; usually this is only
one value but multiple are accepted}
}
\description{
Convenience function to calculate proportions and confidence invervals and
format for easy display.
}
\examples{
set.seed(1)
r <- c('CR','PR','SD','PD','NE')
x <- factor(sample(r, 30, replace = TRUE), r)
tabler_resp(x, 3)
tabler_resp(x, 'PR')
tabler_resp(x, 'PR', total = 50)
## note NAs are removed
y <- `[<-`(x, 1:10, value = NA)
tabler_resp(x, FALSE)
tabler_resp(y, FALSE)
## two-stage designs
## use two-stage CI in "PR" column
tabler_resp(x)
two_idx <- 1:2
two_stage <- c(r1 = 2, n1 = 10, n2 = 20)
tabler_resp(x, two_stage = c(two_stage, two_idx))
## compare
bincon(c(2, 4), c(10, 20), method = 'two-stage') ## CRs
bincon(c(2, 11), c(10, 20), method = 'two-stage') ## PRs
## one-stage methods should not be used
bincon(c(4, 11), 30, method = 'exact')
## typical usage
ht <- htmlTable::htmlTable(
rbind(
tabler_resp(x),
tabler_resp(x, conf = 0.9),
tabler_resp(x, frac = FALSE, pct.sign = FALSE,
show_conf = FALSE, digits = 1),
tabler_resp(x, two_stage = c(2, 10, 20, 1))
),
caption = 'Table of responses with exact binomial and
two-stage<sup>†</sup>confidence intervals.',
css.cell = 'padding: 0 10 0px; white-space: nowrap;',
cgroup = c('Evaluation', 'Outcome (95\% CI)'),
n.cgroup = c(nlevels(x), 3L)
)
structure(ht, class = 'htmlTable')
}
\seealso{
\code{\link{bincon}}; \code{\link{binconr}}
Other tabler:
\code{\link{tabler_by}()},
\code{\link{tabler_stat2}()},
\code{\link{tabler_stat}()},
\code{\link{tabler}()}
}
\concept{tabler}
|
StnChkCoordsProcs <- function(GeneralParameters){
if(!dir.exists(GeneralParameters$output)){
Insert.Messages.Out(paste(GeneralParameters$output, "did not find"), format = TRUE)
return(NULL)
}
if(GeneralParameters$data.type == "cdtcoords")
{
don0 <- getStnOpenData(GeneralParameters$infile)
if(is.null(don0)) return(NULL)
nom.col <- names(don0)
don.disp <- don0
coords <- list(id = as.character(don0[, 1]),
lon = as.numeric(don0[, 3]),
lat = as.numeric(don0[, 4]))
}
if(GeneralParameters$data.type == "cdtstation")
{
don0 <- getStnOpenData(GeneralParameters$infile)
if(is.null(don0)) return(NULL)
don <- splitCDTData0(don0)
if(is.null(don)) return(NULL)
don <- don[c('id', 'lon', 'lat', 'elv')]
nom.col <- c("ID", "Longitude", "Latitude", "Elevation")
if(is.null(don$elv)){
don <- don[c('id', 'lon', 'lat')]
nom.col <- nom.col[1:3]
}
don.disp <- as.data.frame(don)
names(don.disp) <- nom.col
coords <- don[c('id', 'lon', 'lat')]
rm(don)
}
############
outdir <- file.path(GeneralParameters$output, "CHECK.COORDS_data")
dir.create(outdir, showWarnings = FALSE, recursive = TRUE)
fileout <- file.path(outdir, paste0('Checked_Coords_', GeneralParameters$infile))
don.info <- getStnOpenDataInfo(GeneralParameters$infile)
sep <- don.info[[3]]$sepr
if(sep == "") sep <- " "
write.table(don0, file = fileout, sep = sep, na = don.info[[3]]$miss.val,
col.names = don.info[[3]]$header, row.names = FALSE, quote = FALSE)
rm(don0)
############
if(GeneralParameters$shpfile == "")
{
Insert.Messages.Out("No ESRI shapefile found", format = TRUE)
Insert.Messages.Out("The stations outside the boundaries will not be checked", format = TRUE)
shpd <- NULL
}else{
shpd <- getShpOpenData(GeneralParameters$shpfile)
if(is.null(shpd)){
Insert.Messages.Out(paste('Unable to open', GeneralParameters$shpfile, 'or it is not an ESRI shapefile'), format = TRUE)
Insert.Messages.Out("The stations outside the boundaries will not be checked", format = TRUE)
shpd <- NULL
}else{
shpd <- as(shpd[[2]], "SpatialPolygons")
shpd <- gUnaryUnion(shpd)
shpd <- gSimplify(shpd, tol = 0.05, topologyPreserve = TRUE)
shpd <- gBuffer(shpd, width = GeneralParameters$buffer/111)
}
}
############
output <- list(params = GeneralParameters, info = don.info, id = coords$id)
coords <- as.data.frame(coords)
coords$id <- as.character(coords$id)
don.disp$LonX <- coords$lon
don.disp$LatX <- coords$lat
don.disp$StatusX <- rep("blue", length(coords$lon))
don.table <- NULL
############
## Missing coords
imiss <- is.na(coords$lon) | is.na(coords$lat)
if(any(imiss)){
don.table$miss <- data.frame(State = 'Missing Coordinates', don.disp[imiss, , drop = FALSE])
don.disp <- don.disp[!imiss, , drop = FALSE]
coords <- coords[!imiss, , drop = FALSE]
}
## Wrong coords
iwrong <- coords$lon < -180 | coords$lon > 360 | coords$lat < -90 | coords$lat > 90
if(any(iwrong)){
don.table$wrong <- data.frame(State = 'Invalid Coordinates', don.disp[iwrong, , drop = FALSE])
don.disp <- don.disp[!iwrong, , drop = FALSE]
coords <- coords[!iwrong, , drop = FALSE]
}
## Duplicated ID
iddup <- duplicated(coords$id) | duplicated(coords$id, fromLast = TRUE)
if(any(iddup)){
don.table$iddup <- data.frame(State = 'Duplicate ID', don.disp[iddup, , drop = FALSE])
don.table$iddup <- don.table$iddup[order(coords$id[iddup]), , drop = FALSE]
don.disp$StatusX[iddup] <- "orange"
}
## Duplicated coordinates
crddup <- duplicated(coords[, c('lon', 'lat'), drop = FALSE]) |
duplicated(coords[, c('lon', 'lat'), drop = FALSE], fromLast = TRUE)
if(any(crddup)){
don.table$crddup <- data.frame(State = 'Duplicate Coordinates', don.disp[crddup, , drop = FALSE])
don.table$crddup <- don.table$crddup[order(paste0(coords$lon[crddup], coords$lat[crddup])), , drop = FALSE]
don.disp$StatusX[crddup] <- "orange"
}
## Coordinates outside boundaries
if(!is.null(shpd)){
spcoords <- coords
coordinates(spcoords) <- ~lon+lat
iout <- is.na(over(spcoords, geometry(shpd)))
if(any(iout)){
don.table$out <- data.frame(State = 'Coordinates Outside', don.disp[iout, , drop = FALSE])
don.table$out <- don.table$out[order(coords$id[iout]), , drop = FALSE]
don.disp$StatusX[iout] <- "red"
}
rm(spcoords, shpd)
}
############
if(!is.null(don.table)){
don.table <- do.call(rbind, don.table)
don.table <- don.table[, !names(don.table) %in% c('LonX', 'LatX', 'StatusX'), drop = FALSE]
rownames(don.table) <- NULL
}
output$coords <- coords
############
file.index <- file.path(outdir, 'CoordinatesCheck.rds')
dataOUT <- file.path(outdir, 'CDTDATASET')
dir.create(dataOUT, showWarnings = FALSE, recursive = TRUE)
file.table.csv <- file.path(outdir, 'Stations_to_Check.csv')
file.table.rds <- file.path(dataOUT, 'Table.rds')
file.display <- file.path(dataOUT, 'Display.rds')
saveRDS(output, file.index)
saveRDS(don.disp, file.display)
saveRDS(don.table, file.table.rds)
if(!is.null(don.table)) writeFiles(don.table, file.table.csv, col.names = TRUE)
############
.cdtData$EnvData$output <- output
.cdtData$EnvData$PathData <- outdir
.cdtData$EnvData$Table.Disp <- don.table
.cdtData$EnvData$Maps.Disp <- don.disp
return(0)
}
##########################################################################
StnChkCoordsDataStn <- function(GeneralParameters){
if(GeneralParameters$data.type == "cdtcoords")
{
don0 <- getStnOpenData(GeneralParameters$infile)
if(is.null(don0)) return(NULL)
nom.col <- names(don0)
don.orig <- don0
coords <- list(id = as.character(don0[, 1]),
lon = as.numeric(don0[, 3]),
lat = as.numeric(don0[, 4]))
}
if(GeneralParameters$data.type == "cdtstation")
{
don0 <- getStnOpenData(GeneralParameters$infile)
if(is.null(don0)) return(NULL)
don <- splitCDTData0(don0)
if(is.null(don)) return(NULL)
don <- don[c('id', 'lon', 'lat', 'elv')]
nom.col <- c("ID", "Longitude", "Latitude", "Elevation")
if(is.null(don$elv)){
don <- don[c('id', 'lon', 'lat')]
nom.col <- nom.col[1:3]
}
don.orig <- as.data.frame(don)
names(don.orig) <- nom.col
coords <- don[c('id', 'lon', 'lat')]
rm(don)
}
############
rm(don0)
coords <- as.data.frame(coords)
don.orig$LonX <- coords$lon
don.orig$LatX <- coords$lat
don.orig$StatusX <- rep("blue", length(coords$lon))
############
## Missing coords
imiss <- is.na(coords$lon) | is.na(coords$lat)
if(any(imiss)){
don.orig <- don.orig[!imiss, , drop = FALSE]
coords <- coords[!imiss, , drop = FALSE]
}
## Wrong coords
iwrong <- coords$lon < -180 | coords$lon > 360 | coords$lat < -90 | coords$lat > 90
if(any(iwrong)){
don.orig <- don.orig[!iwrong, , drop = FALSE]
coords <- coords[!iwrong, , drop = FALSE]
}
.cdtData$EnvData$output$coords <- coords
.cdtData$EnvData$Maps.Disp <- don.orig
return(0)
}
##########################################################################
StnChkCoordsCorrect <- function(){
if(is.null(.cdtData$EnvData$Table.Disp0)){
Insert.Messages.Out("No stations to be corrected")
return(NULL)
}
idx0 <- as.character(.cdtData$EnvData$Table.Disp0$ID)
fileTable <- file.path(.cdtData$EnvData$PathData, "CDTDATASET/Table.rds")
Table.Disp <- readRDS(fileTable)
if(!is.null(Table.Disp)){
idx <- as.character(Table.Disp$ID)
id.del0 <- idx0[!idx0 %in% idx]
change <- Table.Disp[, -1, drop = FALSE]
change <- as.matrix(change)
.cdtData$EnvData$Table.Disp <- Table.Disp
}else{
id.del0 <- idx0
change <- matrix(NA, 0, 3)
.cdtData$EnvData$Table.Disp <- NULL
}
######
info <- .cdtData$EnvData$output$info
fileout <- file.path(.cdtData$EnvData$PathData,
paste0('Checked_Coords_', .cdtData$EnvData$output$params$infile))
don0 <- read.table(fileout, header = info[[3]]$header,
sep = info[[3]]$sepr, na.strings = info[[3]]$miss.val,
stringsAsFactors = FALSE, colClasses = "character")
filemap <- file.path(.cdtData$EnvData$PathData, 'CDTDATASET', 'Display.rds')
map.disp <- readRDS(filemap)
nom1 <- names(map.disp)
nom1 <- which(!nom1 %in% c('LonX', 'LatX', 'StatusX'))
######
if(.cdtData$EnvData$output$params$data.type == "cdtcoords"){
if(nrow(change) > 0){
ix <- match(idx, .cdtData$EnvData$output$id)
don0[ix, ] <- change
pos.lon <- 3
pos.lat <- 4
}
if(length(id.del0)){
ix1 <- match(id.del0, .cdtData$EnvData$output$id)
don0 <- don0[-ix1, , drop = FALSE]
}
}
if(.cdtData$EnvData$output$params$data.type == "cdtstation"){
if(nrow(change) > 0){
ix <- match(idx, .cdtData$EnvData$output$id)
don0[1:ncol(change), ix + 1] <- t(change)
pos.lon <- 2
pos.lat <- 3
}
if(length(id.del0)){
ix1 <- match(id.del0, .cdtData$EnvData$output$id)
don0 <- don0[, -(ix1 + 1), drop = FALSE]
}
}
if(length(id.del0)){
.cdtData$EnvData$output$id <- .cdtData$EnvData$output$id[-ix1]
.cdtData$EnvData$Table.Disp0 <- .cdtData$EnvData$Table.Disp0[!idx0 %in% id.del0, , drop = FALSE]
if(nrow(.cdtData$EnvData$Table.Disp0) == 0) .cdtData$EnvData$Table.Disp0 <- NULL
}
######
idx1 <- .cdtData$EnvData$output$coords$id
id.del1 <- if(length(id.del0)) idx1[idx1 %in% id.del0] else NULL
if(nrow(change) > 0){
ix0 <- match(idx, idx1)
ina <- is.na(ix0)
if(any(ina)){
ix0 <- ix0[!ina]
change0 <- change[!ina, , drop = FALSE]
change1 <- change[ina, , drop = FALSE]
idx2 <- idx[ina]
}else change0 <- change
.cdtData$EnvData$output$coords$id[ix0] <- as.character(change0[, 1])
.cdtData$EnvData$output$coords$lon[ix0] <- as.numeric(change0[, pos.lon])
.cdtData$EnvData$output$coords$lat[ix0] <- as.numeric(change0[, pos.lat])
map.disp[ix0, nom1] <- change0
map.disp$LonX[ix0] <- as.numeric(change0[, pos.lon])
map.disp$LatX[ix0] <- as.numeric(change0[, pos.lat])
.cdtData$EnvData$Maps.Disp[ix0, nom1] <- change0
.cdtData$EnvData$Maps.Disp$LonX[ix0] <- as.numeric(change0[, pos.lon])
.cdtData$EnvData$Maps.Disp$LatX[ix0] <- as.numeric(change0[, pos.lat])
if(any(ina)){
idx1 <- c(idx1, idx2)
tmp <- data.frame(id = as.character(change1[, 1]),
lon = as.numeric(change1[, pos.lon]),
lat = as.numeric(change1[, pos.lat]),
stringsAsFactors = FALSE)
.cdtData$EnvData$output$coords <- rbind(.cdtData$EnvData$output$coords, tmp)
tmp1 <- data.frame(change1,
LonX = as.numeric(change1[, pos.lon]),
LatX = as.numeric(change1[, pos.lat]),
StatusX = "red", stringsAsFactors = FALSE)
map.disp <- rbind(map.disp, tmp1)
.cdtData$EnvData$Maps.Disp <- rbind(.cdtData$EnvData$Maps.Disp, tmp1)
}
}
if(length(id.del1)){
ix <- match(id.del1, idx1)
.cdtData$EnvData$output$coords <- .cdtData$EnvData$output$coords[-ix, , drop = FALSE]
map.disp <- map.disp[-ix, , drop = FALSE]
.cdtData$EnvData$Maps.Disp <- .cdtData$EnvData$Maps.Disp[-ix, , drop = FALSE]
}
######
sep <- info[[3]]$sepr
if(sep == "") sep <- " "
write.table(don0, file = fileout, sep = sep, na = info[[3]]$miss.val,
col.names = info[[3]]$header, row.names = FALSE, quote = FALSE)
saveRDS(map.disp, filemap)
return(0)
}
|
/R/cdtStnCoords_Procs.R
|
no_license
|
heureux1985/CDT
|
R
| false
| false
| 11,079
|
r
|
StnChkCoordsProcs <- function(GeneralParameters){
if(!dir.exists(GeneralParameters$output)){
Insert.Messages.Out(paste(GeneralParameters$output, "did not find"), format = TRUE)
return(NULL)
}
if(GeneralParameters$data.type == "cdtcoords")
{
don0 <- getStnOpenData(GeneralParameters$infile)
if(is.null(don0)) return(NULL)
nom.col <- names(don0)
don.disp <- don0
coords <- list(id = as.character(don0[, 1]),
lon = as.numeric(don0[, 3]),
lat = as.numeric(don0[, 4]))
}
if(GeneralParameters$data.type == "cdtstation")
{
don0 <- getStnOpenData(GeneralParameters$infile)
if(is.null(don0)) return(NULL)
don <- splitCDTData0(don0)
if(is.null(don)) return(NULL)
don <- don[c('id', 'lon', 'lat', 'elv')]
nom.col <- c("ID", "Longitude", "Latitude", "Elevation")
if(is.null(don$elv)){
don <- don[c('id', 'lon', 'lat')]
nom.col <- nom.col[1:3]
}
don.disp <- as.data.frame(don)
names(don.disp) <- nom.col
coords <- don[c('id', 'lon', 'lat')]
rm(don)
}
############
outdir <- file.path(GeneralParameters$output, "CHECK.COORDS_data")
dir.create(outdir, showWarnings = FALSE, recursive = TRUE)
fileout <- file.path(outdir, paste0('Checked_Coords_', GeneralParameters$infile))
don.info <- getStnOpenDataInfo(GeneralParameters$infile)
sep <- don.info[[3]]$sepr
if(sep == "") sep <- " "
write.table(don0, file = fileout, sep = sep, na = don.info[[3]]$miss.val,
col.names = don.info[[3]]$header, row.names = FALSE, quote = FALSE)
rm(don0)
############
if(GeneralParameters$shpfile == "")
{
Insert.Messages.Out("No ESRI shapefile found", format = TRUE)
Insert.Messages.Out("The stations outside the boundaries will not be checked", format = TRUE)
shpd <- NULL
}else{
shpd <- getShpOpenData(GeneralParameters$shpfile)
if(is.null(shpd)){
Insert.Messages.Out(paste('Unable to open', GeneralParameters$shpfile, 'or it is not an ESRI shapefile'), format = TRUE)
Insert.Messages.Out("The stations outside the boundaries will not be checked", format = TRUE)
shpd <- NULL
}else{
shpd <- as(shpd[[2]], "SpatialPolygons")
shpd <- gUnaryUnion(shpd)
shpd <- gSimplify(shpd, tol = 0.05, topologyPreserve = TRUE)
shpd <- gBuffer(shpd, width = GeneralParameters$buffer/111)
}
}
############
output <- list(params = GeneralParameters, info = don.info, id = coords$id)
coords <- as.data.frame(coords)
coords$id <- as.character(coords$id)
don.disp$LonX <- coords$lon
don.disp$LatX <- coords$lat
don.disp$StatusX <- rep("blue", length(coords$lon))
don.table <- NULL
############
## Missing coords
imiss <- is.na(coords$lon) | is.na(coords$lat)
if(any(imiss)){
don.table$miss <- data.frame(State = 'Missing Coordinates', don.disp[imiss, , drop = FALSE])
don.disp <- don.disp[!imiss, , drop = FALSE]
coords <- coords[!imiss, , drop = FALSE]
}
## Wrong coords
iwrong <- coords$lon < -180 | coords$lon > 360 | coords$lat < -90 | coords$lat > 90
if(any(iwrong)){
don.table$wrong <- data.frame(State = 'Invalid Coordinates', don.disp[iwrong, , drop = FALSE])
don.disp <- don.disp[!iwrong, , drop = FALSE]
coords <- coords[!iwrong, , drop = FALSE]
}
## Duplicated ID
iddup <- duplicated(coords$id) | duplicated(coords$id, fromLast = TRUE)
if(any(iddup)){
don.table$iddup <- data.frame(State = 'Duplicate ID', don.disp[iddup, , drop = FALSE])
don.table$iddup <- don.table$iddup[order(coords$id[iddup]), , drop = FALSE]
don.disp$StatusX[iddup] <- "orange"
}
## Duplicated coordinates
crddup <- duplicated(coords[, c('lon', 'lat'), drop = FALSE]) |
duplicated(coords[, c('lon', 'lat'), drop = FALSE], fromLast = TRUE)
if(any(crddup)){
don.table$crddup <- data.frame(State = 'Duplicate Coordinates', don.disp[crddup, , drop = FALSE])
don.table$crddup <- don.table$crddup[order(paste0(coords$lon[crddup], coords$lat[crddup])), , drop = FALSE]
don.disp$StatusX[crddup] <- "orange"
}
## Coordinates outside boundaries
if(!is.null(shpd)){
spcoords <- coords
coordinates(spcoords) <- ~lon+lat
iout <- is.na(over(spcoords, geometry(shpd)))
if(any(iout)){
don.table$out <- data.frame(State = 'Coordinates Outside', don.disp[iout, , drop = FALSE])
don.table$out <- don.table$out[order(coords$id[iout]), , drop = FALSE]
don.disp$StatusX[iout] <- "red"
}
rm(spcoords, shpd)
}
############
if(!is.null(don.table)){
don.table <- do.call(rbind, don.table)
don.table <- don.table[, !names(don.table) %in% c('LonX', 'LatX', 'StatusX'), drop = FALSE]
rownames(don.table) <- NULL
}
output$coords <- coords
############
file.index <- file.path(outdir, 'CoordinatesCheck.rds')
dataOUT <- file.path(outdir, 'CDTDATASET')
dir.create(dataOUT, showWarnings = FALSE, recursive = TRUE)
file.table.csv <- file.path(outdir, 'Stations_to_Check.csv')
file.table.rds <- file.path(dataOUT, 'Table.rds')
file.display <- file.path(dataOUT, 'Display.rds')
saveRDS(output, file.index)
saveRDS(don.disp, file.display)
saveRDS(don.table, file.table.rds)
if(!is.null(don.table)) writeFiles(don.table, file.table.csv, col.names = TRUE)
############
.cdtData$EnvData$output <- output
.cdtData$EnvData$PathData <- outdir
.cdtData$EnvData$Table.Disp <- don.table
.cdtData$EnvData$Maps.Disp <- don.disp
return(0)
}
##########################################################################
StnChkCoordsDataStn <- function(GeneralParameters){
if(GeneralParameters$data.type == "cdtcoords")
{
don0 <- getStnOpenData(GeneralParameters$infile)
if(is.null(don0)) return(NULL)
nom.col <- names(don0)
don.orig <- don0
coords <- list(id = as.character(don0[, 1]),
lon = as.numeric(don0[, 3]),
lat = as.numeric(don0[, 4]))
}
if(GeneralParameters$data.type == "cdtstation")
{
don0 <- getStnOpenData(GeneralParameters$infile)
if(is.null(don0)) return(NULL)
don <- splitCDTData0(don0)
if(is.null(don)) return(NULL)
don <- don[c('id', 'lon', 'lat', 'elv')]
nom.col <- c("ID", "Longitude", "Latitude", "Elevation")
if(is.null(don$elv)){
don <- don[c('id', 'lon', 'lat')]
nom.col <- nom.col[1:3]
}
don.orig <- as.data.frame(don)
names(don.orig) <- nom.col
coords <- don[c('id', 'lon', 'lat')]
rm(don)
}
############
rm(don0)
coords <- as.data.frame(coords)
don.orig$LonX <- coords$lon
don.orig$LatX <- coords$lat
don.orig$StatusX <- rep("blue", length(coords$lon))
############
## Missing coords
imiss <- is.na(coords$lon) | is.na(coords$lat)
if(any(imiss)){
don.orig <- don.orig[!imiss, , drop = FALSE]
coords <- coords[!imiss, , drop = FALSE]
}
## Wrong coords
iwrong <- coords$lon < -180 | coords$lon > 360 | coords$lat < -90 | coords$lat > 90
if(any(iwrong)){
don.orig <- don.orig[!iwrong, , drop = FALSE]
coords <- coords[!iwrong, , drop = FALSE]
}
.cdtData$EnvData$output$coords <- coords
.cdtData$EnvData$Maps.Disp <- don.orig
return(0)
}
##########################################################################
StnChkCoordsCorrect <- function(){
if(is.null(.cdtData$EnvData$Table.Disp0)){
Insert.Messages.Out("No stations to be corrected")
return(NULL)
}
idx0 <- as.character(.cdtData$EnvData$Table.Disp0$ID)
fileTable <- file.path(.cdtData$EnvData$PathData, "CDTDATASET/Table.rds")
Table.Disp <- readRDS(fileTable)
if(!is.null(Table.Disp)){
idx <- as.character(Table.Disp$ID)
id.del0 <- idx0[!idx0 %in% idx]
change <- Table.Disp[, -1, drop = FALSE]
change <- as.matrix(change)
.cdtData$EnvData$Table.Disp <- Table.Disp
}else{
id.del0 <- idx0
change <- matrix(NA, 0, 3)
.cdtData$EnvData$Table.Disp <- NULL
}
######
info <- .cdtData$EnvData$output$info
fileout <- file.path(.cdtData$EnvData$PathData,
paste0('Checked_Coords_', .cdtData$EnvData$output$params$infile))
don0 <- read.table(fileout, header = info[[3]]$header,
sep = info[[3]]$sepr, na.strings = info[[3]]$miss.val,
stringsAsFactors = FALSE, colClasses = "character")
filemap <- file.path(.cdtData$EnvData$PathData, 'CDTDATASET', 'Display.rds')
map.disp <- readRDS(filemap)
nom1 <- names(map.disp)
nom1 <- which(!nom1 %in% c('LonX', 'LatX', 'StatusX'))
######
if(.cdtData$EnvData$output$params$data.type == "cdtcoords"){
if(nrow(change) > 0){
ix <- match(idx, .cdtData$EnvData$output$id)
don0[ix, ] <- change
pos.lon <- 3
pos.lat <- 4
}
if(length(id.del0)){
ix1 <- match(id.del0, .cdtData$EnvData$output$id)
don0 <- don0[-ix1, , drop = FALSE]
}
}
if(.cdtData$EnvData$output$params$data.type == "cdtstation"){
if(nrow(change) > 0){
ix <- match(idx, .cdtData$EnvData$output$id)
don0[1:ncol(change), ix + 1] <- t(change)
pos.lon <- 2
pos.lat <- 3
}
if(length(id.del0)){
ix1 <- match(id.del0, .cdtData$EnvData$output$id)
don0 <- don0[, -(ix1 + 1), drop = FALSE]
}
}
if(length(id.del0)){
.cdtData$EnvData$output$id <- .cdtData$EnvData$output$id[-ix1]
.cdtData$EnvData$Table.Disp0 <- .cdtData$EnvData$Table.Disp0[!idx0 %in% id.del0, , drop = FALSE]
if(nrow(.cdtData$EnvData$Table.Disp0) == 0) .cdtData$EnvData$Table.Disp0 <- NULL
}
######
idx1 <- .cdtData$EnvData$output$coords$id
id.del1 <- if(length(id.del0)) idx1[idx1 %in% id.del0] else NULL
if(nrow(change) > 0){
ix0 <- match(idx, idx1)
ina <- is.na(ix0)
if(any(ina)){
ix0 <- ix0[!ina]
change0 <- change[!ina, , drop = FALSE]
change1 <- change[ina, , drop = FALSE]
idx2 <- idx[ina]
}else change0 <- change
.cdtData$EnvData$output$coords$id[ix0] <- as.character(change0[, 1])
.cdtData$EnvData$output$coords$lon[ix0] <- as.numeric(change0[, pos.lon])
.cdtData$EnvData$output$coords$lat[ix0] <- as.numeric(change0[, pos.lat])
map.disp[ix0, nom1] <- change0
map.disp$LonX[ix0] <- as.numeric(change0[, pos.lon])
map.disp$LatX[ix0] <- as.numeric(change0[, pos.lat])
.cdtData$EnvData$Maps.Disp[ix0, nom1] <- change0
.cdtData$EnvData$Maps.Disp$LonX[ix0] <- as.numeric(change0[, pos.lon])
.cdtData$EnvData$Maps.Disp$LatX[ix0] <- as.numeric(change0[, pos.lat])
if(any(ina)){
idx1 <- c(idx1, idx2)
tmp <- data.frame(id = as.character(change1[, 1]),
lon = as.numeric(change1[, pos.lon]),
lat = as.numeric(change1[, pos.lat]),
stringsAsFactors = FALSE)
.cdtData$EnvData$output$coords <- rbind(.cdtData$EnvData$output$coords, tmp)
tmp1 <- data.frame(change1,
LonX = as.numeric(change1[, pos.lon]),
LatX = as.numeric(change1[, pos.lat]),
StatusX = "red", stringsAsFactors = FALSE)
map.disp <- rbind(map.disp, tmp1)
.cdtData$EnvData$Maps.Disp <- rbind(.cdtData$EnvData$Maps.Disp, tmp1)
}
}
if(length(id.del1)){
ix <- match(id.del1, idx1)
.cdtData$EnvData$output$coords <- .cdtData$EnvData$output$coords[-ix, , drop = FALSE]
map.disp <- map.disp[-ix, , drop = FALSE]
.cdtData$EnvData$Maps.Disp <- .cdtData$EnvData$Maps.Disp[-ix, , drop = FALSE]
}
######
sep <- info[[3]]$sepr
if(sep == "") sep <- " "
write.table(don0, file = fileout, sep = sep, na = info[[3]]$miss.val,
col.names = info[[3]]$header, row.names = FALSE, quote = FALSE)
saveRDS(map.disp, filemap)
return(0)
}
|
# library, download model data, functions ====
source("code/modeling/data_prep_updated_hour.R")
# source("code/modeling/data_prep_updated_other_aggregation.R")
# source("code/modeling/data_prep_updated_day.R")
library(ggplot2)
# ls()
unit_flow <- unit_flow_hour
# unit_flow <- unit_flow_shift
# unit_flow <- unit_flow_day
# Modelling ====
units <- names(unit_flow)
inflow_models <- list()
outflow_models <- list()
for(i in units){
to_model_inflow <- unit_flow[[i]] %>% select(INFLOW, starts_with("from_"))
to_model_inflow[,paste0("from_",i):=NULL]
inflow_models[[i]] <- lm(INFLOW ~ ., to_model_inflow)
to_model_outflow <- unit_flow[[i]] %>% select(OUTFLOW, STAFF, starts_with("l1_waiting_time"))
to_model_outflow[,l1_waiting_time:=NULL]
outflow_models[[i]] <- lm(OUTFLOW ~ ., to_model_outflow)
}
stargazer(inflow_models, title="Results", align=TRUE, type = "latex")
stargazer(outflow_models, title="Results", align=TRUE, type = "latex")
rm(list=c("i", "to_model_outflow", "to_model_inflow","units"))
outflow_models["CCU"]
# Waiting times curves ====
waiting_do <- function(unit, unit_flow_data, outflow_models, r = c(1,100), w_max = 3){
# set units in the ICU to be examined
all_units <- names(unit_flow_data)
units <- all_units[all_units != unit]
# prepare data for the selected unit
unit_flow_data[[unit]] %>%
select(starts_with("l_STAFF_"), starts_with("l_INFLOW_"),
starts_with("l2_waiting_time_"),
l2_waiting_time, INFLOW, STAFF) %>% as.data.table() -> pred_data
pred_data <- pred_data[complete.cases(pred_data)]
staff_data <- pred_data$STAFF
pred_data <- select(pred_data, -STAFF)
colnames(pred_data)[colnames(pred_data) == "l2_waiting_time"] <- paste0("l2_waiting_time_",unit)
colnames(pred_data) <- gsub("2","1",colnames(pred_data))
# estimate outflows for downstream units
pred_outflows <- list()
for(i in units){
orig_cols <- colnames(pred_data)
colnames(pred_data)[colnames(pred_data) == paste0("l_STAFF_",i)] <- "STAFF"
inflow <- as.data.frame(pred_data)[paste0("l_INFLOW_",i)]
pred_outflows[[i]] <- 1 / theta_thres(predict(outflow_models[[i]], pred_data) - inflow, w_max)
colnames(pred_data) <- orig_cols
}
# estimate waiting time curves by varying staff in the unit of interest
to_plot <- data.frame(ressources=integer(), waiting_time=integer(), unit=character())#, do_unit=character())
for(ressources in r[1]:r[2]){
df <- data.frame(matrix(unlist(pred_outflows), ncol=length(pred_outflows), byrow=FALSE))
df <- cbind(ressources, df)
colnames(df) <- c("STAFF", paste0("l1_waiting_time_",names(pred_outflows)))
waiting_time <- mean(1 / theta_thres(data.frame(predict(outflow_models[[unit]], df) - pred_data$INFLOW), w_max))
to_plot <- rbind(to_plot, data.frame(ressources, waiting_time, unit))
}
# estimate waiting time curves by varying staff in the downstream units
for(downstream_unit in units){
pred_outflows_old <- pred_outflows[[downstream_unit]]
for(ressources in r[1]:r[2]){
orig_cols <- colnames(pred_data)
colnames(pred_data)[colnames(pred_data) == paste0("l_STAFF_", downstream_unit)] <- "STAFF"
pred_data[, "STAFF"] <- ressources
inflow <- as.data.frame(pred_data)[paste0("l_INFLOW_", downstream_unit)]
pred_outflows[[downstream_unit]] <- 1 / theta_thres(predict(outflow_models[[downstream_unit]], pred_data) - inflow, w_max)
colnames(pred_data) <- orig_cols
df <- data.frame(matrix(unlist(pred_outflows), ncol=length(pred_outflows), byrow=FALSE))
df <- cbind(staff_data, df)
colnames(df) <- c("STAFF", paste0("l1_waiting_time_",names(pred_outflows)))
waiting_time <- mean(1 / theta_thres(data.frame(predict(outflow_models[[unit]], df) - pred_data$INFLOW), w_max))
to_plot <- rbind(to_plot, data.frame(ressources, waiting_time, unit=downstream_unit))
}
pred_outflows[[downstream_unit]] <- pred_outflows_old
}
return(to_plot)
}
see <- waiting_do("CCU", unit_flow, outflow_models)
ggplot(see, aes(ressources, waiting_time, group=unit, color=unit)) +
geom_line()
# plot(see$ressources, see$waiting_time,type = "l", col="tomato", lwd=2)
|
/code/modeling/variations/waiting_curves_updated.R
|
no_license
|
aleixrvr/bayesian-hospital
|
R
| false
| false
| 4,350
|
r
|
# library, download model data, functions ====
source("code/modeling/data_prep_updated_hour.R")
# source("code/modeling/data_prep_updated_other_aggregation.R")
# source("code/modeling/data_prep_updated_day.R")
library(ggplot2)
# ls()
unit_flow <- unit_flow_hour
# unit_flow <- unit_flow_shift
# unit_flow <- unit_flow_day
# Modelling ====
units <- names(unit_flow)
inflow_models <- list()
outflow_models <- list()
for(i in units){
to_model_inflow <- unit_flow[[i]] %>% select(INFLOW, starts_with("from_"))
to_model_inflow[,paste0("from_",i):=NULL]
inflow_models[[i]] <- lm(INFLOW ~ ., to_model_inflow)
to_model_outflow <- unit_flow[[i]] %>% select(OUTFLOW, STAFF, starts_with("l1_waiting_time"))
to_model_outflow[,l1_waiting_time:=NULL]
outflow_models[[i]] <- lm(OUTFLOW ~ ., to_model_outflow)
}
stargazer(inflow_models, title="Results", align=TRUE, type = "latex")
stargazer(outflow_models, title="Results", align=TRUE, type = "latex")
rm(list=c("i", "to_model_outflow", "to_model_inflow","units"))
outflow_models["CCU"]
# Waiting times curves ====
waiting_do <- function(unit, unit_flow_data, outflow_models, r = c(1,100), w_max = 3){
# set units in the ICU to be examined
all_units <- names(unit_flow_data)
units <- all_units[all_units != unit]
# prepare data for the selected unit
unit_flow_data[[unit]] %>%
select(starts_with("l_STAFF_"), starts_with("l_INFLOW_"),
starts_with("l2_waiting_time_"),
l2_waiting_time, INFLOW, STAFF) %>% as.data.table() -> pred_data
pred_data <- pred_data[complete.cases(pred_data)]
staff_data <- pred_data$STAFF
pred_data <- select(pred_data, -STAFF)
colnames(pred_data)[colnames(pred_data) == "l2_waiting_time"] <- paste0("l2_waiting_time_",unit)
colnames(pred_data) <- gsub("2","1",colnames(pred_data))
# estimate outflows for downstream units
pred_outflows <- list()
for(i in units){
orig_cols <- colnames(pred_data)
colnames(pred_data)[colnames(pred_data) == paste0("l_STAFF_",i)] <- "STAFF"
inflow <- as.data.frame(pred_data)[paste0("l_INFLOW_",i)]
pred_outflows[[i]] <- 1 / theta_thres(predict(outflow_models[[i]], pred_data) - inflow, w_max)
colnames(pred_data) <- orig_cols
}
# estimate waiting time curves by varying staff in the unit of interest
to_plot <- data.frame(ressources=integer(), waiting_time=integer(), unit=character())#, do_unit=character())
for(ressources in r[1]:r[2]){
df <- data.frame(matrix(unlist(pred_outflows), ncol=length(pred_outflows), byrow=FALSE))
df <- cbind(ressources, df)
colnames(df) <- c("STAFF", paste0("l1_waiting_time_",names(pred_outflows)))
waiting_time <- mean(1 / theta_thres(data.frame(predict(outflow_models[[unit]], df) - pred_data$INFLOW), w_max))
to_plot <- rbind(to_plot, data.frame(ressources, waiting_time, unit))
}
# estimate waiting time curves by varying staff in the downstream units
for(downstream_unit in units){
pred_outflows_old <- pred_outflows[[downstream_unit]]
for(ressources in r[1]:r[2]){
orig_cols <- colnames(pred_data)
colnames(pred_data)[colnames(pred_data) == paste0("l_STAFF_", downstream_unit)] <- "STAFF"
pred_data[, "STAFF"] <- ressources
inflow <- as.data.frame(pred_data)[paste0("l_INFLOW_", downstream_unit)]
pred_outflows[[downstream_unit]] <- 1 / theta_thres(predict(outflow_models[[downstream_unit]], pred_data) - inflow, w_max)
colnames(pred_data) <- orig_cols
df <- data.frame(matrix(unlist(pred_outflows), ncol=length(pred_outflows), byrow=FALSE))
df <- cbind(staff_data, df)
colnames(df) <- c("STAFF", paste0("l1_waiting_time_",names(pred_outflows)))
waiting_time <- mean(1 / theta_thres(data.frame(predict(outflow_models[[unit]], df) - pred_data$INFLOW), w_max))
to_plot <- rbind(to_plot, data.frame(ressources, waiting_time, unit=downstream_unit))
}
pred_outflows[[downstream_unit]] <- pred_outflows_old
}
return(to_plot)
}
see <- waiting_do("CCU", unit_flow, outflow_models)
ggplot(see, aes(ressources, waiting_time, group=unit, color=unit)) +
geom_line()
# plot(see$ressources, see$waiting_time,type = "l", col="tomato", lwd=2)
|
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
require(shiny)
require(leaflet)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
# Application title
titlePanel("Geolocation Playground"),
# Documentation
h4('Enter an address and click geolocate to access the approximate latitude and longitude coordinates and the point map. If you want to add a circle to the point on the map, activate the option "Add Circle"'),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
textInput("address", "Address to Geolocate", "1600 Pennsylvania Ave NW, Washington DC"),
textOutput("lat"),
textOutput("long"),
checkboxInput("circ", "Add Circle?"),
submitButton('Geolocate')
),
# Show a plot of the generated distribution
mainPanel(
leafletOutput("map", height = 550)
)
)
))
|
/ui.R
|
no_license
|
damarals/shiny-leaflet
|
R
| false
| false
| 1,133
|
r
|
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
require(shiny)
require(leaflet)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
# Application title
titlePanel("Geolocation Playground"),
# Documentation
h4('Enter an address and click geolocate to access the approximate latitude and longitude coordinates and the point map. If you want to add a circle to the point on the map, activate the option "Add Circle"'),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
textInput("address", "Address to Geolocate", "1600 Pennsylvania Ave NW, Washington DC"),
textOutput("lat"),
textOutput("long"),
checkboxInput("circ", "Add Circle?"),
submitButton('Geolocate')
),
# Show a plot of the generated distribution
mainPanel(
leafletOutput("map", height = 550)
)
)
))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/FScolar.R
\name{FScolar}
\alias{FScolar}
\title{Cola dois paramentros de entrada}
\usage{
FScolar(a, b)
}
\arguments{
\item{a}{primeiro parametro}
\item{b}{segundo parametro}
}
\value{
o paste dos dois parametros
}
\description{
Recebe dois parametros e colam os mesmos numa unica saída
}
|
/man/FScolar.Rd
|
no_license
|
fabiosalle/FScolar
|
R
| false
| true
| 369
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/FScolar.R
\name{FScolar}
\alias{FScolar}
\title{Cola dois paramentros de entrada}
\usage{
FScolar(a, b)
}
\arguments{
\item{a}{primeiro parametro}
\item{b}{segundo parametro}
}
\value{
o paste dos dois parametros
}
\description{
Recebe dois parametros e colam os mesmos numa unica saída
}
|
context("t")
test_that("result equivalence of tc", {
expect_equal(c(1:5), tc(1:5), info="simple sequence")
expect_equal(c(1:5), tc(1:5, ), info="simple sequence with extra comma")
expect_equal(c(1, 2, 3), tc(1, 2, 3), info="individual parameters")
expect_equal(c(1, 2, 3), tc(1, 2, 3, ), info="individual parameters with comma")
expect_equal(c(1L, "2", 3.4), tc(1L, "2", 3.4), info="coercion")
expect_equal(c(1L, "2", 3.4), tc(1L, "2", 3.4, ), info="coercion with comma")
})
test_that("result equivalence of tlist", {
expect_equal(list(1:5), tlist(1:5), info="simple sequence")
expect_equal(list(1:5), tlist(1:5, ), info="simple sequence with extra comma")
expect_equal(list(1, 2, 3), tlist(1, 2, 3), info="individual parameters")
expect_equal(list(1, 2, 3), tlist(1, 2, 3, ), info="individual parameters with comma")
expect_equal(list(1L, "2", 3.4), tlist(1L, "2", 3.4), info="coercion")
expect_equal(list(1L, "2", 3.4), tlist(1L, "2", 3.4, ), info="coercion with comma")
})
|
/inst/tests/test-t.R
|
no_license
|
krlmlr-archive/sweetnR
|
R
| false
| false
| 1,002
|
r
|
context("t")
test_that("result equivalence of tc", {
expect_equal(c(1:5), tc(1:5), info="simple sequence")
expect_equal(c(1:5), tc(1:5, ), info="simple sequence with extra comma")
expect_equal(c(1, 2, 3), tc(1, 2, 3), info="individual parameters")
expect_equal(c(1, 2, 3), tc(1, 2, 3, ), info="individual parameters with comma")
expect_equal(c(1L, "2", 3.4), tc(1L, "2", 3.4), info="coercion")
expect_equal(c(1L, "2", 3.4), tc(1L, "2", 3.4, ), info="coercion with comma")
})
test_that("result equivalence of tlist", {
expect_equal(list(1:5), tlist(1:5), info="simple sequence")
expect_equal(list(1:5), tlist(1:5, ), info="simple sequence with extra comma")
expect_equal(list(1, 2, 3), tlist(1, 2, 3), info="individual parameters")
expect_equal(list(1, 2, 3), tlist(1, 2, 3, ), info="individual parameters with comma")
expect_equal(list(1L, "2", 3.4), tlist(1L, "2", 3.4), info="coercion")
expect_equal(list(1L, "2", 3.4), tlist(1L, "2", 3.4, ), info="coercion with comma")
})
|
#' Computes a vector or matrix norm.
#'
#' If `A` is complex valued, it computes the norm of `A$abs()`
#' Supports input of float, double, cfloat and cdouble dtypes.
#' Whether this function computes a vector or matrix norm is determined as follows:
#'
#' * If `dim` is an int, the vector norm will be computed.
#' * If `dim` is a 2-tuple, the matrix norm will be computed.
#' * If `dim=NULL` and `ord=NULL`, A will be flattened to 1D and the 2-norm of the resulting vector will be computed.
#' * If `dim=NULL` and `ord!=NULL`, A must be 1D or 2D.
#'
#' @includeRmd man/rmd/linalg-norm.Rmd details
#'
#' @param A (Tensor): tensor of shape `(*, n)` or `(*, m, n)` where `*` is zero or more batch dimensions
#' @param ord (int, float, inf, -inf, 'fro', 'nuc', optional): order of norm. Default: `NULL`
#' @param dim (int, Tuple[int], optional): dimensions over which to compute
#' the vector or matrix norm. See above for the behavior when `dim=NULL`.
#' Default: `NULL`
#' @param keepdim (bool, optional): If set to `TRUE`, the reduced dimensions are retained
#' in the result as dimensions with size one. Default: `FALSE`
#' @param dtype dtype (`torch_dtype`, optional): If specified, the input tensor is cast to
#' `dtype` before performing the operation, and the returned tensor's type
#' will be `dtype`. Default: `NULL`
#'
#' @family linalg
#'
#' @examples
#' a <- torch_arange(0, 8, dtype=torch_float()) - 4
#' a
#' b <- a$reshape(c(3, 3))
#' b
#'
#' linalg_norm(a)
#' linalg_norm(b)
#'
#' @export
linalg_norm <- function(A, ord = NULL, dim = NULL, keepdim = FALSE, dtype = NULL) {
torch_linalg_norm(self = A, ord = ord, dim = dim, keepdim = keepdim, dtype = dtype)
}
#' Computes a vector norm.
#'
#' If `A` is complex valued, it computes the norm of `A$abs()`
#' Supports input of float, double, cfloat and cdouble dtypes.
#' This function does not necessarily treat multidimensonal `A` as a batch of
#' vectors, instead:
#'
#' - If `dim=NULL`, `A` will be flattened before the norm is computed.
#' - If `dim` is an `int` or a `tuple`, the norm will be computed over these dimensions
#' and the other dimensions will be treated as batch dimensions.
#'
#' This behavior is for consistency with [linalg_norm()].
#'
#' @includeRmd man/rmd/linalg-norm.Rmd details
#' @family linalg
#'
#' @param A (Tensor): tensor, flattened by default, but this behavior can be
#' controlled using `dim`.
#' @param ord (int, float, inf, -inf, 'fro', 'nuc', optional): order of norm. Default: `2`
#'
#' @inheritParams linalg_norm
#'
#' @examples
#' a <- torch_arange(0, 8, dtype=torch_float()) - 4
#' a
#' b <- a$reshape(c(3, 3))
#' b
#'
#' linalg_vector_norm(a, ord = 3.5)
#' linalg_vector_norm(b, ord = 3.5)
#'
#' @export
linalg_vector_norm <- function(A, ord=2, dim=NULL, keepdim=FALSE, dtype=NULL) {
torch_linalg_vector_norm(
self = A,
ord = ord,
dim = dim,
keepdim = keepdim,
dtype = dtype
)
}
#' Computes a matrix norm.
#'
#' If `A` is complex valued, it computes the norm of `A$abs()`
#' Support input of float, double, cfloat and cdouble dtypes.
#' Also supports batches of matrices: the norm will be computed over the
#' dimensions specified by the 2-tuple `dim` and the other dimensions will
#' be treated as batch dimensions. The output will have the same batch dimensions.
#'
#' @includeRmd man/rmd/linalg-norm.Rmd details
#' @family linalg
#'
#' @param A (Tensor): tensor with two or more dimensions. By default its
#' shape is interpreted as `(*, m, n)` where `*` is zero or more
#' batch dimensions, but this behavior can be controlled using `dim`.
#' @param ord (int, inf, -inf, 'fro', 'nuc', optional): order of norm. Default: `'fro'`
#' @inheritParams linalg_norm
#'
#' @examples
#' a <- torch_arange(0, 8, dtype=torch_float())$reshape(c(3,3))
#' linalg_matrix_norm(a)
#' linalg_matrix_norm(a, ord = -1)
#' b <- a$expand(c(2, -1, -1))
#' linalg_matrix_norm(b)
#' linalg_matrix_norm(b, dim = c(1, 3))
#'
#' @export
linalg_matrix_norm <- function(A, ord='fro', dim=c(-2, -1), keepdim=FALSE, dtype=NULL) {
torch_linalg_matrix_norm(
self = A,
ord = ord,
dim = dim,
keepdim = keepdim,
dtype = dtype
)
}
#' Computes the determinant of a square matrix.
#'
#' Supports input of float, double, cfloat and cdouble dtypes.
#' Also supports batches of matrices, and if `A` is a batch of matrices then
#' the output has the same batch dimensions.
#'
#' @param A (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions.
#'
#' @examples
#' a <- torch_randn(3,3)
#' linalg_det(a)
#'
#' a <- torch_randn(3,3,3)
#' linalg_det(a)
#'
#' @family linalg
#' @export
linalg_det <- function(A) {
torch_linalg_det(A)
}
#' Computes the sign and natural logarithm of the absolute value of the determinant of a square matrix.
#'
#' For complex `A`, it returns the angle and the natural logarithm of the modulus of the
#' determinant, that is, a logarithmic polar decomposition of the determinant.
#' Supports input of float, double, cfloat and cdouble dtypes.
#' Also supports batches of matrices, and if `A` is a batch of matrices then
#' the output has the same batch dimensions.
#'
#' @section Notes:
#' - The determinant can be recovered as `sign * exp(logabsdet)`.
#' - When a matrix has a determinant of zero, it returns `(0, -Inf)`.
#'
#' @inheritParams linalg_det
#'
#' @returns
#'
#' A list `(sign, logabsdet)`.
#' `logabsdet` will always be real-valued, even when `A` is complex.
#' `sign` will have the same dtype as `A`.
#'
#' @examples
#' a <- torch_randn(3,3)
#' linalg_slogdet(a)
#'
#' @family linalg
#' @export
linalg_slogdet <- function(A) {
torch_linalg_slogdet(A)
}
#' Computes the condition number of a matrix with respect to a matrix norm.
#'
#' Letting \eqn{\mathbb{K}} be \eqn{\mathbb{R}} or \eqn{\mathbb{C}},
#' the **condition number** \eqn{\kappa} of a matrix
#' \eqn{A \in \mathbb{K}^{n \times n}} is defined as
#'
#' \Sexpr[results=rd, stage=build]{katex::math_to_rd("\\\\kappa(A) = \\\\|A\\\\|_p\\\\|A^{-1}\\\\|_p")}
#'
#' The condition number of `A` measures the numerical stability of the linear system `AX = B`
#' with respect to a matrix norm.
#'
#' Supports input of float, double, cfloat and cdouble dtypes.
#' Also supports batches of matrices, and if `A` is a batch of matrices then
#' the output has the same batch dimensions.
#'
#' `p` defines the matrix norm that is computed. See the table in 'Details' to
#' find the supported norms.
#'
#' For `p` is one of `('fro', 'nuc', inf, -inf, 1, -1)`, this function uses
#' [linalg_norm()] and [linalg_inv()].
#'
#' As such, in this case, the matrix (or every matrix in the batch) `A` has to be square
#' and invertible.
#'
#' For `p` in `(2, -2)`, this function can be computed in terms of the singular values
#' \eqn{\sigma_1 \geq \ldots \geq \sigma_n}
#'
#' \Sexpr[results=rd, stage=build]{katex::math_to_rd("\\\\kappa_2(A) = \\\\frac{\\\\sigma_1}{\\\\sigma_n}\\\\qquad \\\\kappa_{-2}(A) = \\\\frac{\\\\sigma_n}{\\\\sigma_1}")}
#'
#' In these cases, it is computed using [linalg_svd()]. For these norms, the matrix
#' (or every matrix in the batch) `A` may have any shape.
#'
#' @note When inputs are on a CUDA device, this function synchronizes that device with the CPU if
#' if `p` is one of `('fro', 'nuc', inf, -inf, 1, -1)`.
#'
#' @includeRmd man/rmd/linalg-cond.Rmd details
#'
#' @param A (Tensor): tensor of shape `(*, m, n)` where `*` is zero or more batch dimensions
#' for `p` in `(2, -2)`, and of shape `(*, n, n)` where every matrix
#' is invertible for `p` in `('fro', 'nuc', inf, -inf, 1, -1)`.
#' @param p (int, inf, -inf, 'fro', 'nuc', optional):
#' the type of the matrix norm to use in the computations (see above). Default: `NULL`
#'
#' @returns
#' A real-valued tensor, even when `A` is complex.
#'
#' @examples
#' a <- torch_tensor(rbind(c(1., 0, -1), c(0, 1, 0), c(1, 0, 1)))
#' linalg_cond(a)
#' linalg_cond(a, "fro")
#'
#' @export
linalg_cond <- function(A, p=NULL) {
torch_linalg_cond(A, p = p)
}
#' Computes the numerical rank of a matrix.
#'
#' The matrix rank is computed as the number of singular values
#' (or eigenvalues in absolute value when `hermitian = TRUE`)
#' that are greater than the specified `tol` threshold.
#'
#' Supports input of float, double, cfloat and cdouble dtypes.
#' Also supports batches of matrices, and if `A` is a batch of matrices then
#' the output has the same batch dimensions.
#'
#' If `hermitian = TRUE`, `A` is assumed to be Hermitian if complex or
#' symmetric if real, but this is not checked internally. Instead, just the lower
#' triangular part of the matrix is used in the computations.
#'
#' If `tol` is not specified and `A` is a matrix of dimensions `(m, n)`,
#' the tolerance is set to be
#'
#' \Sexpr[results=rd, stage=build]{katex::math_to_rd("
#' tol = \\\\sigma_1 \\\\max(m, n) \\\\varepsilon
#' ")}
#'
#' where \eqn{\sigma_1} is the largest singular value
#' (or eigenvalue in absolute value when `hermitian = TRUE`), and
#' \eqn{\varepsilon} is the epsilon value for the dtype of `A` (see [torch_finfo()]).
#'
#' If `A` is a batch of matrices, `tol` is computed this way for every element of
#' the batch.
#'
#' @param A (Tensor): tensor of shape `(*, m, n)` where `*` is zero or more
#' batch dimensions.
#' @param tol (float, Tensor, optional): the tolerance value. See above for
#' the value it takes when `NULL`. Default: `NULL`.
#' @param hermitian (bool, optional): indicates whether `A` is Hermitian if complex
#' or symmetric if real. Default: `FALSE`.
#'
#' @examples
#' a <- torch_eye(10)
#' linalg_matrix_rank(a)
#'
#' @family linalg
#' @export
linalg_matrix_rank <- function(A, tol=NULL, hermitian=FALSE) {
if (is.null(tol))
torch_linalg_matrix_rank(self = A, tol = tol, hermitian = hermitian)
else {
if (!is_torch_tensor(tol))
tol <- torch_scalar_tensor(tol)
torch_linalg_matrix_rank(input = A, tol = tol, hermitian = hermitian)
}
}
#' Computes the Cholesky decomposition of a complex Hermitian or real symmetric positive-definite matrix.
#'
#' Letting \eqn{\mathbb{K}} be \eqn{\mathbb{R}} or \eqn{\mathbb{C}},
#' the **Cholesky decomposition** of a complex Hermitian or real symmetric positive-definite matrix
#' \eqn{A \in \mathbb{K}^{n \times n}} is defined as
#'
#' \Sexpr[results=rd, stage=build]{katex::math_to_rd("
#' A = LL^{H}\\\\mathrlap{\\\\qquad L \\\\in \\\\mathbb{K}^{n \\\\times n}}
#' ")}
#'
#' where \eqn{L} is a lower triangular matrix and
#' \eqn{L^{H}} is the conjugate transpose when \eqn{L} is complex, and the
#' transpose when \eqn{L} is real-valued.
#'
#' Supports input of float, double, cfloat and cdouble dtypes.
#' Also supports batches of matrices, and if `A` is a batch of matrices then
#' the output has the same batch dimensions.
#'
#' @seealso
#' - [linalg_cholesky_ex()] for a version of this operation that
#' skips the (slow) error checking by default and instead returns the debug
#' information. This makes it a faster way to check if a matrix is
#' positive-definite.
#' [linalg_eigh()] for a different decomposition of a Hermitian matrix.
#' The eigenvalue decomposition gives more information about the matrix but it
#' slower to compute than the Cholesky decomposition.
#'
#' @param A (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions
#' consisting of symmetric or Hermitian positive-definite matrices.
#'
#' @examples
#' a <- torch_eye(10)
#' linalg_cholesky(a)
#'
#' @family linalg
#' @export
linalg_cholesky <- function(A) {
torch_linalg_cholesky(A)
}
#' Computes the QR decomposition of a matrix.
#'
#' Letting \eqn{\mathbb{K}} be \eqn{\mathbb{R}} or \eqn{\mathbb{C}},
#' the **full QR decomposition** of a matrix
#' \eqn{A \in \mathbb{K}^{m \times n}} is defined as
#'
#' \Sexpr[results=rd, stage=build]{katex::math_to_rd("
#' A = QR\\\\mathrlap{\\\\qquad Q \\\\in \\\\mathbb{K}^{m \\\\times m}, R \\\\in \\\\mathbb{K}^{m \\\\times n}}
#' ")}
#'
#' where \eqn{Q} is orthogonal in the real case and unitary in the complex case, and \eqn{R} is upper triangular.
#' When `m > n` (tall matrix), as `R` is upper triangular, its last `m - n` rows are zero.
#' In this case, we can drop the last `m - n` columns of `Q` to form the
#' **reduced QR decomposition**:
#'
#' \Sexpr[results=rd, stage=build]{katex::math_to_rd("
#' A = QR\\\\mathrlap{\\\\qquad Q \\\\in \\\\mathbb{K}^{m \\\\times n}, R \\\\in \\\\mathbb{K}^{n \\\\times n}}
#' ")}
#'
#' The reduced QR decomposition agrees with the full QR decomposition when `n >= m` (wide matrix).
#' Supports input of float, double, cfloat and cdouble dtypes.
#' Also supports batches of matrices, and if `A` is a batch of matrices then
#' the output has the same batch dimensions.
#' The parameter `mode` chooses between the full and reduced QR decomposition.
#'
#' If `A` has shape `(*, m, n)`, denoting `k = min(m, n)`
#' - `mode = 'reduced'` (default): Returns `(Q, R)` of shapes `(*, m, k)`, `(*, k, n)` respectively.
#' - `mode = 'complete'`: Returns `(Q, R)` of shapes `(*, m, m)`, `(*, m, n)` respectively.
#' - `mode = 'r'`: Computes only the reduced `R`. Returns `(Q, R)` with `Q` empty and `R` of shape `(*, k, n)`.
#'
#'
#' @param A (Tensor): tensor of shape `(*, m, n)` where `*` is zero or more batch dimensions.
#' @param mode (str, optional): one of `'reduced'`, `'complete'`, `'r'`.
#' Controls the shape of the returned tensors. Default: `'reduced'`.
#'
#' @returns A list `(Q, R)`.
#'
#' @examples
#' a <- torch_tensor(rbind(c(12., -51, 4), c(6, 167, -68), c(-4, 24, -41)))
#' qr <- linalg_qr(a)
#'
#' torch_mm(qr[[1]], qr[[2]])$round()
#' torch_mm(qr[[1]]$t(), qr[[1]])$round()
#'
#' @family linalg
#' @export
linalg_qr <- function(A, mode='reduced') {
torch_linalg_qr(A, mode = mode)
}
#' Computes the eigenvalue decomposition of a square matrix if it exists.
#'
#' Letting \eqn{\mathbb{K}} be \eqn{\mathbb{R}} or \eqn{\mathbb{C}},
#' the **eigenvalue decomposition** of a square matrix
#' \eqn{A \in \mathbb{K}^{n \times n}} (if it exists) is defined as
#'
#' \Sexpr[results=rd, stage=build]{katex::math_to_rd("
#' A = V \\\\operatorname{diag}(\\\\Lambda) V^{-1}\\\\mathrlap{\\\\qquad V \\\\in \\\\mathbb{C}^{n \\\\times n}, \\\\Lambda \\\\in \\\\mathbb{C}^n}
#' ")}
#'
#' This decomposition exists if and only if \eqn{A} is `diagonalizable`_.
#' This is the case when all its eigenvalues are different.
#' Supports input of float, double, cfloat and cdouble dtypes.
#' Also supports batches of matrices, and if `A` is a batch of matrices then
#' the output has the same batch dimensions.
#'
#' @note The eigenvalues and eigenvectors of a real matrix may be complex.
#'
#' @section Warning:
#'
#' - This function assumes that `A` is `diagonalizable`_ (for example, when all the
#' eigenvalues are different). If it is not diagonalizable, the returned
#' eigenvalues will be correct but \eqn{A \neq V \operatorname{diag}(\Lambda)V^{-1}}.
#'
#' - The eigenvectors of a matrix are not unique, nor are they continuous with respect to
#' `A`. Due to this lack of uniqueness, different hardware and software may compute
#' different eigenvectors.
#' This non-uniqueness is caused by the fact that multiplying an eigenvector by a
#' non-zero number produces another set of valid eigenvectors of the matrix.
#' In this implmentation, the returned eigenvectors are normalized to have norm
#' `1` and largest real component.
#'
#' - Gradients computed using `V` will only be finite when `A` does not have repeated eigenvalues.
#' Furthermore, if the distance between any two eigenvalues is close to zero,
#' the gradient will be numerically unstable, as it depends on the eigenvalues
#' \eqn{\lambda_i} through the computation of
#' \eqn{\frac{1}{\min_{i \neq j} \lambda_i - \lambda_j}}.
#'
#' @seealso
#' - [linalg_eigvals()] computes only the eigenvalues. Unlike [linalg_eig()], the gradients of
#' [linalg_eigvals()] are always numerically stable.
#' - [linalg_eigh()] for a (faster) function that computes the eigenvalue decomposition
#' for Hermitian and symmetric matrices.
#' - [linalg_svd()] for a function that computes another type of spectral
#' decomposition that works on matrices of any shape.
#' - [linalg_qr()] for another (much faster) decomposition that works on matrices of
#' any shape.
#'
#' @param A (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions
#' consisting of diagonalizable matrices.
#'
#' @returns
#' A list `(eigenvalues, eigenvectors)` which corresponds to \eqn{\Lambda} and \eqn{V} above.
#' `eigenvalues` and `eigenvectors` will always be complex-valued, even when `A` is real. The eigenvectors
#' will be given by the columns of `eigenvectors`.
#'
#' @examples
#' a <- torch_randn(2, 2)
#' wv = linalg_eig(a)
#'
#' @family linalg
#' @export
linalg_eig <- function(A) {
torch_linalg_eig(A)
}
#' Computes the eigenvalues of a square matrix.
#'
#' Letting \eqn{\mathbb{K}} be \eqn{\mathbb{R}} or \eqn{\mathbb{C}},
#' the **eigenvalues** of a square matrix \eqn{A \in \mathbb{K}^{n \times n}} are defined
#' as the roots (counted with multiplicity) of the polynomial `p` of degree `n` given by
#'
#' \Sexpr[results=rd, stage=build]{katex::math_to_rd("
#' p(\\\\lambda) = \\\\operatorname{det}(A - \\\\lambda \\\\mathrm{I}_n)\\\\mathrlap{\\\\qquad \\\\lambda \\\\in \\\\mathbb{C}}
#' ")}
#'
#' where \eqn{\mathrm{I}_n} is the `n`-dimensional identity matrix.
#' Supports input of float, double, cfloat and cdouble dtypes.
#' Also supports batches of matrices, and if `A` is a batch of matrices then
#' the output has the same batch dimensions.
#'
#' @note The eigenvalues of a real matrix may be complex, as the roots of a real polynomial may be complex.
#' The eigenvalues of a matrix are always well-defined, even when the matrix is not diagonalizable.
#'
#' @seealso [linalg_eig()] computes the full eigenvalue decomposition.
#'
#' @param A (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions.
#'
#' @examples
#' a <- torch_randn(2, 2)
#' w <- linalg_eigvals(a)
#'
#' @family linalg
#' @export
linalg_eigvals <- function(A) {
torch_linalg_eigvals(A)
}
#' Computes the eigenvalue decomposition of a complex Hermitian or real symmetric matrix.
#'
#' Letting \eqn{\mathbb{K}} be \eqn{\mathbb{R}} or \eqn{\mathbb{C}},
#' the **eigenvalue decomposition** of a complex Hermitian or real symmetric matrix
#' \eqn{A \in \mathbb{K}^{n \times n}} is defined as
#'
#' \Sexpr[results=rd, stage=build]{katex::math_to_rd("
#' A = Q \\\\operatorname{diag}(\\\\Lambda) Q^{H}\\\\mathrlap{\\\\qquad Q \\\\in \\\\mathbb{K}^{n \\\\times n}, \\\\Lambda \\\\in \\\\mathbb{R}^n}
#' ")}
#'
#' where \eqn{Q^{H}} is the conjugate transpose when \eqn{Q} is complex, and the transpose when \eqn{Q} is real-valued.
#' \eqn{Q} is orthogonal in the real case and unitary in the complex case.
#'
#' Supports input of float, double, cfloat and cdouble dtypes.
#' Also supports batches of matrices, and if `A` is a batch of matrices then
#' the output has the same batch dimensions.
#'
#' `A` is assumed to be Hermitian (resp. symmetric), but this is not checked internally, instead:
#' - If `UPLO`\ `= 'L'` (default), only the lower triangular part of the matrix is used in the computation.
#' - If `UPLO`\ `= 'U'`, only the upper triangular part of the matrix is used.
#' The eigenvalues are returned in ascending order.
#'
#' @note The eigenvalues of real symmetric or complex Hermitian matrices are always real.
#'
#' @section Warning:
#' - The eigenvectors of a symmetric matrix are not unique, nor are they continuous with
#' respect to `A`. Due to this lack of uniqueness, different hardware and
#' software may compute different eigenvectors.
#' This non-uniqueness is caused by the fact that multiplying an eigenvector by
#' `-1` in the real case or by \eqn{e^{i \phi}, \phi \in \mathbb{R}} in the complex
#' case produces another set of valid eigenvectors of the matrix.
#' This non-uniqueness problem is even worse when the matrix has repeated eigenvalues.
#' In this case, one may multiply the associated eigenvectors spanning
#' the subspace by a rotation matrix and the resulting eigenvectors will be valid
#' eigenvectors.
#' - Gradients computed using the `eigenvectors` tensor will only be finite when
#' `A` has unique eigenvalues.
#' Furthermore, if the distance between any two eigvalues is close to zero,
#' the gradient will be numerically unstable, as it depends on the eigenvalues
#' \eqn{\lambda_i} through the computation of
#' \eqn{\frac{1}{\min_{i \neq j} \lambda_i - \lambda_j}}.
#'
#' @seealso
#' - [linalg_eigvalsh()] computes only the eigenvalues values of a Hermitian matrix.
#' Unlike [linalg_eigh()], the gradients of [linalg_eigvalsh()] are always
#' numerically stable.
#' - [linalg_cholesky()] for a different decomposition of a Hermitian matrix.
#' The Cholesky decomposition gives less information about the matrix but is much faster
#' to compute than the eigenvalue decomposition.
#' - [linalg_eig()] for a (slower) function that computes the eigenvalue decomposition
#' of a not necessarily Hermitian square matrix.
#' - [linalg_svd()] for a (slower) function that computes the more general SVD
#' decomposition of matrices of any shape.
#' - [linalg_qr()] for another (much faster) decomposition that works on general
#' matrices.
#'
#' @param A (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions
#' consisting of symmetric or Hermitian matrices.
#' @param UPLO ('L', 'U', optional): controls whether to use the upper or lower triangular part
#' of `A` in the computations. Default: `'L'`.
#'
#' @returns
#' A list `(eigenvalues, eigenvectors)` which corresponds to \eqn{\Lambda} and \eqn{Q} above.
#' `eigenvalues` will always be real-valued, even when `A` is complex.
#'
#' It will also be ordered in ascending order.
#' `eigenvectors` will have the same dtype as `A` and will contain the eigenvectors as its columns.
#'
#' @examples
#' a <- torch_randn(2, 2)
#' linalg_eigh(a)
#'
#' @family linalg
#' @export
linalg_eigh <- function(A, UPLO='L') {
torch_linalg_eigh(A, UPLO)
}
#' Computes the eigenvalues of a complex Hermitian or real symmetric matrix.
#'
#' Letting \eqn{\mathbb{K}} be \eqn{\mathbb{R}} or \eqn{\mathbb{C}},
#' the **eigenvalues** of a complex Hermitian or real symmetric matrix \eqn{A \in \mathbb{K}^{n \times n}}
#' are defined as the roots (counted with multiplicity) of the polynomial `p` of degree `n` given by
#'
#' \Sexpr[results=rd, stage=build]{katex::math_to_rd("
#' p(\\\\lambda) = \\\\operatorname{det}(A - \\\\lambda \\\\mathrm{I}_n)\\\\mathrlap{\\\\qquad \\\\lambda \\\\in \\\\mathbb{R}}
#' ")}
#'
#' where \eqn{\mathrm{I}_n} is the `n`-dimensional identity matrix.
#'
#' The eigenvalues of a real symmetric or complex Hermitian matrix are always real.
#' Supports input of float, double, cfloat and cdouble dtypes.
#' Also supports batches of matrices, and if `A` is a batch of matrices then
#' the output has the same batch dimensions.
#' The eigenvalues are returned in ascending order.
#'
#' `A` is assumed to be Hermitian (resp. symmetric), but this is not checked internally, instead:
#' - If `UPLO`\ `= 'L'` (default), only the lower triangular part of the matrix is used in the computation.
#' - If `UPLO`\ `= 'U'`, only the upper triangular part of the matrix is used.
#'
#'
#' @seealso
#' - [linalg_eigh()] computes the full eigenvalue decomposition.
#'
#' @param A (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions
#' consisting of symmetric or Hermitian matrices.
#' @param UPLO ('L', 'U', optional): controls whether to use the upper or lower triangular part
#' of `A` in the computations. Default: `'L'`.
#'
#' @returns
#' A real-valued tensor cointaining the eigenvalues even when `A` is complex.
#' The eigenvalues are returned in ascending order.
#'
#' @examples
#' a <- torch_randn(2, 2)
#' linalg_eigvalsh(a)
#'
#' @family linalg
#' @export
linalg_eigvalsh <- function(A, UPLO='L') {
torch_linalg_eigvalsh(A, UPLO = UPLO)
}
#' Computes the singular value decomposition (SVD) of a matrix.
#'
#' Letting \eqn{\mathbb{K}} be \eqn{\mathbb{R}} or \eqn{\mathbb{C}},
#' the **full SVD** of a matrix
#' \eqn{A \in \mathbb{K}^{m \times n}}, if `k = min(m,n)`, is defined as
#'
#' \Sexpr[results=rd, stage=build]{katex::math_to_rd("
#' A = U \\\\operatorname{diag}(S) V^{H} \\\\mathrlap{\\\\qquad U \\\\in \\\\mathbb{K}^{m \\\\times m}, S \\\\in \\\\mathbb{R}^k, V \\\\in \\\\mathbb{K}^{n \\\\times n}}
#' ")}
#'
#' where \eqn{\operatorname{diag}(S) \in \mathbb{K}^{m \times n}},
#' \eqn{V^{H}} is the conjugate transpose when \eqn{V} is complex, and the transpose when \eqn{V} is real-valued.
#'
#' The matrices \eqn{U}, \eqn{V} (and thus \eqn{V^{H}}) are orthogonal in the real case, and unitary in the complex case.
#' When `m > n` (resp. `m < n`) we can drop the last `m - n` (resp. `n - m`) columns of `U` (resp. `V`) to form the **reduced SVD**:
#'
#' \Sexpr[results=rd, stage=build]{katex::math_to_rd("
#' A = U \\\\operatorname{diag}(S) V^{H} \\\\mathrlap{\\\\qquad U \\\\in \\\\mathbb{K}^{m \\\\times k}, S \\\\in \\\\mathbb{R}^k, V \\\\in \\\\mathbb{K}^{k \\\\times n}}
#' ")}
#'
#' where \eqn{\operatorname{diag}(S) \in \mathbb{K}^{k \times k}}.
#'
#' In this case, \eqn{U} and \eqn{V} also have orthonormal columns.
#' Supports input of float, double, cfloat and cdouble dtypes.
#'
#' Also supports batches of matrices, and if `A` is a batch of matrices then
#' the output has the same batch dimensions.
#'
#' The returned decomposition is a named tuple `(U, S, Vᴴ)`
#' which corresponds to \eqn{U}, \eqn{S}, \eqn{V^{H}} above.
#'
#' The singular values are returned in descending order.
#' The parameter `full_matrices` chooses between the full (default) and reduced SVD.
#'
#' @note
#' When `full_matrices=TRUE`, the gradients with respect to `U[..., :, min(m, n):]`
#' and `Vh[..., min(m, n):, :]` will be ignored, as those vectors can be arbitrary bases
#' of the corresponding subspaces.
#'
#' @section Warnings:
#' The returned tensors `U` and `V` are not unique, nor are they continuous with
#' respect to `A`.
#' Due to this lack of uniqueness, different hardware and software may compute
#' different singular vectors.
#' This non-uniqueness is caused by the fact that multiplying any pair of singular
#' vectors \eqn{u_k, v_k} by `-1` in the real case or by
#' \eqn{e^{i \phi}, \phi \in \mathbb{R}} in the complex case produces another two
#' valid singular vectors of the matrix.
#' This non-uniqueness problem is even worse when the matrix has repeated singular values.
#' In this case, one may multiply the associated singular vectors of `U` and `V` spanning
#' the subspace by a rotation matrix and the resulting vectors will span the same subspace.
#'
#' Gradients computed using `U` or `Vᴴ` will only be finite when
#' `A` does not have zero as a singular value or repeated singular values.
#' Furthermore, if the distance between any two singular values is close to zero,
#' the gradient will be numerically unstable, as it depends on the singular values
#' \eqn{\sigma_i} through the computation of
#' \eqn{\frac{1}{\min_{i \neq j} \sigma_i^2 - \sigma_j^2}}.
#' The gradient will also be numerically unstable when `A` has small singular
#' values, as it also depends on the computaiton of \eqn{\frac{1}{\sigma_i}}.
#'
#' @seealso
#' - [linalg_svdvals()] computes only the singular values.
#' Unlike [linalg_svd()], the gradients of [linalg_svdvals()] are always
#' numerically stable.
#' - [linalg_eig()] for a function that computes another type of spectral
#' decomposition of a matrix. The eigendecomposition works just on on square matrices.
#' - [linalg_eigh()] for a (faster) function that computes the eigenvalue decomposition
#' for Hermitian and symmetric matrices.
#' - [linalg_qr()] for another (much faster) decomposition that works on general
#' matrices.
#'
#' @param A (Tensor): tensor of shape `(*, m, n)` where `*` is zero or more batch dimensions.
#' @param full_matrices (bool, optional): controls whether to compute the full or reduced
#' SVD, and consequently, the shape of the returned tensors `U` and `Vᴴ`. Default: `TRUE`.
#'
#' @returns
#' A list `(U, S, V)` which corresponds to \eqn{U}, \eqn{S}, \eqn{V^{H}} above.
#' `S` will always be real-valued, even when `A` is complex.
#' It will also be ordered in descending order.
#' `U` and `Vᴴ` will have the same dtype as `A`. The left / right singular vectors will be given by
#' the columns of `U` and the rows of `Vᴴ` respectively.
#'
#' @examples
#'
#' a <- torch_randn(5, 3)
#' linalg_svd(a, full_matrices=FALSE)
#'
#' @family linalg
#' @export
linalg_svd <- function(A, full_matrices=TRUE) {
torch_linalg_svd(A, full_matrices = full_matrices)
}
#' Computes the singular values of a matrix.
#'
#' Supports input of float, double, cfloat and cdouble dtypes.
#' Also supports batches of matrices, and if `A` is a batch of matrices then
#' the output has the same batch dimensions.
#' The singular values are returned in descending order.
#'
#' @seealso
#' [linalg_svd()] computes the full singular value decomposition.
#'
#' @param A (Tensor): tensor of shape `(*, m, n)` where `*` is zero or more batch dimensions.
#'
#' @returns
#' A real-valued tensor, even when `A` is complex.
#'
#' @examples
#' A <- torch_randn(5, 3)
#' S <- linalg_svdvals(A)
#' S
#'
#' @family linalg
#' @export
linalg_svdvals <- function(A) {
torch_linalg_svdvals(A)
}
#' Computes the solution of a square system of linear equations with a unique solution.
#'
#' Letting \eqn{\mathbb{K}} be \eqn{\mathbb{R}} or \eqn{\mathbb{C}},
#' this function computes the solution \eqn{X \in \mathbb{K}^{n \times k}} of the **linear system** associated to
#' \eqn{A \in \mathbb{K}^{n \times n}, B \in \mathbb{K}^{m \times k}}, which is defined as
#'
#' \deqn{
#' AX = B
#' }
#'
#' This system of linear equations has one solution if and only if \eqn{A} is `invertible`_.
#' This function assumes that \eqn{A} is invertible.
#' Supports inputs of float, double, cfloat and cdouble dtypes.
#' Also supports batches of matrices, and if the inputs are batches of matrices then
#' the output has the same batch dimensions.
#'
#' Letting `*` be zero or more batch dimensions,
#'
#' - If `A` has shape `(*, n, n)` and `B` has shape `(*, n)` (a batch of vectors) or shape
#' `(*, n, k)` (a batch of matrices or "multiple right-hand sides"), this function returns `X` of shape
#' `(*, n)` or `(*, n, k)` respectively.
#' - Otherwise, if `A` has shape `(*, n, n)` and `B` has shape `(n,)` or `(n, k)`, `B`
#' is broadcasted to have shape `(*, n)` or `(*, n, k)` respectively.
#'
#' This function then returns the solution of the resulting batch of systems of linear equations.
#'
#' @note
#' This function computes `X = A$inverse() @ B` in a faster and
#' more numerically stable way than performing the computations separately.
#'
#' @param A (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions.
#' @param B (Tensor): right-hand side tensor of shape `(*, n)` or `(*, n, k)` or `(n,)` or `(n, k)`
#' according to the rules described above
#'
#' @examples
#' A <- torch_randn(3, 3)
#' b <- torch_randn(3)
#' x <- linalg_solve(A, b)
#' torch_allclose(torch_matmul(A, x), b)
#'
#' @family linalg
#' @export
linalg_solve <- function(A, B) {
torch_linalg_solve(A, B)
}
#' Computes a solution to the least squares problem of a system of linear equations.
#'
#' Letting \eqn{\mathbb{K}} be \eqn{\mathbb{R}} or \eqn{\mathbb{C}},
#' the **least squares problem** for a linear system \eqn{AX = B} with
#' \eqn{A \in \mathbb{K}^{m \times n}, B \in \mathbb{K}^{m \times k}} is defined as
#'
#' \Sexpr[results=rd, stage=build]{katex::math_to_rd("
#' \\\\min_{X \\\\in \\\\mathbb{K}^{n \\\\times k}} \\\\|AX - B\\\\|_F
#' ")}
#'
#' where \eqn{\|-\|_F} denotes the Frobenius norm.
#' Supports inputs of float, double, cfloat and cdouble dtypes.
#'
#' Also supports batches of matrices, and if the inputs are batches of matrices then
#' the output has the same batch dimensions.
#' `driver` chooses the LAPACK/MAGMA function that will be used.
#'
#' For CPU inputs the valid values are `'gels'`, `'gelsy'`, `'gelsd`, `'gelss'`.
#' For CUDA input, the only valid driver is `'gels'`, which assumes that `A` is full-rank.
#'
#' To choose the best driver on CPU consider:
#' - If `A` is well-conditioned (its [condition number](https://pytorch.org/docs/master/linalg.html#torch.linalg.cond) is not too large), or you do not mind some precision loss.
#' - For a general matrix: `'gelsy'` (QR with pivoting) (default)
#' - If `A` is full-rank: `'gels'` (QR)
#' - If `A` is not well-conditioned.
#' - `'gelsd'` (tridiagonal reduction and SVD)
#' - But if you run into memory issues: `'gelss'` (full SVD).
#'
#' See also the [full description of these drivers](https://www.netlib.org/lapack/lug/node27.html)
#'
#' `rcond` is used to determine the effective rank of the matrices in `A`
#' when `driver` is one of (`'gelsy'`, `'gelsd'`, `'gelss'`).
#' In this case, if \eqn{\sigma_i} are the singular values of `A` in decreasing order,
#' \eqn{\sigma_i} will be rounded down to zero if \eqn{\sigma_i \leq rcond \cdot \sigma_1}.
#' If `rcond = NULL` (default), `rcond` is set to the machine precision of the dtype of `A`.
#'
#' This function returns the solution to the problem and some extra information in a list of
#' four tensors `(solution, residuals, rank, singular_values)`. For inputs `A`, `B`
#' of shape `(*, m, n)`, `(*, m, k)` respectively, it cointains
#' - `solution`: the least squares solution. It has shape `(*, n, k)`.
#' - `residuals`: the squared residuals of the solutions, that is, \eqn{\|AX - B\|_F^2}.
#' It has shape equal to the batch dimensions of `A`.
#' It is computed when `m > n` and every matrix in `A` is full-rank,
#' otherwise, it is an empty tensor.
#' If `A` is a batch of matrices and any matrix in the batch is not full rank,
#' then an empty tensor is returned. This behavior may change in a future PyTorch release.
#' - `rank`: tensor of ranks of the matrices in `A`.
#' It has shape equal to the batch dimensions of `A`.
#' It is computed when `driver` is one of (`'gelsy'`, `'gelsd'`, `'gelss'`),
#' otherwise it is an empty tensor.
#' - `singular_values`: tensor of singular values of the matrices in `A`.
#' It has shape `(*, min(m, n))`.
#' It is computed when `driver` is one of (`'gelsd'`, `'gelss'`),
#' otherwise it is an empty tensor.
#'
#' @note
#' This function computes `X = A$pinverse() %*% B` in a faster and
#' more numerically stable way than performing the computations separately.
#'
#' @section Warning:
#' The default value of `rcond` may change in a future PyTorch release.
#' It is therefore recommended to use a fixed value to avoid potential
#' breaking changes.
#'
#' @param A (Tensor): lhs tensor of shape `(*, m, n)` where `*` is zero or more batch dimensions.
#' @param B (Tensor): rhs tensor of shape `(*, m, k)` where `*` is zero or more batch dimensions.
#' @param rcond (float, optional): used to determine the effective rank of `A`.
#' If `rcond = NULL`, `rcond` is set to the machine
#' precision of the dtype of `A` times `max(m, n)`. Default: `NULL`.
#' @param ... currently unused.
#' @param driver (str, optional): name of the LAPACK/MAGMA method to be used.
#' If `NULL`, `'gelsy'` is used for CPU inputs and `'gels'` for CUDA inputs.
#' Default: `NULL`.
#'
#' @returns
#' A list `(solution, residuals, rank, singular_values)`.
#'
#' @examples
#' A <- torch_tensor(rbind(c(10, 2, 3), c(3, 10, 5), c(5, 6, 12)))$unsqueeze(1) # shape (1, 3, 3)
#' B <- torch_stack(list(rbind(c(2, 5, 1), c(3, 2, 1), c(5, 1, 9)),
#' rbind(c(4, 2, 9), c(2, 0, 3), c(2, 5, 3))), dim = 1) # shape (2, 3, 3)
#' X <- linalg_lstsq(A, B)$solution # A is broadcasted to shape (2, 3, 3)
#'
#' @family linalg
#' @export
linalg_lstsq <- function(A, B, rcond = NULL, ..., driver = NULL) {
ellipsis::check_dots_empty()
args <- list(
self = A,
b = B
)
if (is.null(driver)) {
if (!is_torch_tensor(A) || is_cpu_device(A$device))
driver <- "gelsy"
else
driver <- "gels"
}
args$driver <- driver
if (!is.null(rcond))
args$rcond <- rcond
res <- do.call(torch_linalg_lstsq, args)
res <- setNames(res, c("solution", "residuals", "rank", "singular_values"))
res
}
#' Computes the inverse of a square matrix if it exists.
#'
#' Throws a `runtime_error` if the matrix is not invertible.
#'
#' Letting \eqn{\mathbb{K}} be \eqn{\mathbb{R}} or \eqn{\mathbb{C}},
#' for a matrix \eqn{A \in \mathbb{K}^{n \times n}},
#' its **inverse matrix** \eqn{A^{-1} \in \mathbb{K}^{n \times n}} (if it exists) is defined as
#'
#' \Sexpr[results=rd, stage=build]{katex::math_to_rd("
#' A^{-1}A = AA^{-1} = \\\\mathrm{I}_n
#' ")}
#' where \eqn{\mathrm{I}_n} is the `n`-dimensional identity matrix.
#'
#' The inverse matrix exists if and only if \eqn{A} is invertible. In this case,
#' the inverse is unique.
#' Supports input of float, double, cfloat and cdouble dtypes.
#' Also supports batches of matrices, and if `A` is a batch of matrices
#' then the output has the same batch dimensions.
#'
#' Consider using [linalg_solve()] if possible for multiplying a matrix on the left by
#' the inverse, as `linalg_solve(A, B) == A$inv() %*% B`
#' It is always prefered to use [linalg_solve()] when possible, as it is faster and more
#' numerically stable than computing the inverse explicitly.
#'
#' @seealso
#' [linalg_pinv()] computes the pseudoinverse (Moore-Penrose inverse) of matrices
#' of any shape.
#' [linalg_solve()] computes `A$inv() %*% B` with a
#' numerically stable algorithm.
#'
#'
#' @param A (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions
#' consisting of invertible matrices.
#'
#' @examples
#' A <- torch_randn(4, 4)
#' linalg_inv(A)
#'
#' @family linalg
#' @export
linalg_inv <- function(A) {
torch_linalg_inv(self = A)
}
#' Computes the pseudoinverse (Moore-Penrose inverse) of a matrix.
#'
#' The pseudoinverse may be `defined algebraically`_
#' but it is more computationally convenient to understand it `through the SVD`_
#' Supports input of float, double, cfloat and cdouble dtypes.
#' Also supports batches of matrices, and if `A` is a batch of matrices then
#' the output has the same batch dimensions.
#'
#' If `hermitian= TRUE`, `A` is assumed to be Hermitian if complex or
#' symmetric if real, but this is not checked internally. Instead, just the lower
#' triangular part of the matrix is used in the computations.
#' The singular values (or the norm of the eigenvalues when `hermitian= TRUE`)
#' that are below the specified `rcond` threshold are treated as zero and discarded
#' in the computation.
#'
#' @note This function uses [linalg_svd()] if `hermitian= FALSE` and
#' [linalg_eigh()] if `hermitian= TRUE`.
#' For CUDA inputs, this function synchronizes that device with the CPU.
#'
#' @note
#' Consider using [linalg_lstsq()] if possible for multiplying a matrix on the left by
#' the pseudoinverse, as `linalg_lstsq(A, B)$solution == A$pinv() %*% B`
#'
#' It is always prefered to use [linalg_lstsq()] when possible, as it is faster and more
#' numerically stable than computing the pseudoinverse explicitly.
#'
#' @seealso
#' - [linalg_inv()] computes the inverse of a square matrix.
#' - [linalg_lstsq()] computes `A$pinv() %*% B` with a
#' numerically stable algorithm.
#'
#' @param A (Tensor): tensor of shape `(*, m, n)` where `*` is zero or more batch dimensions.
#' @param rcond (float or Tensor, optional): the tolerance value to determine when is a singular value zero
#' If it is a `torch_Tensor`, its shape must be
#' broadcastable to that of the singular values of
#' `A` as returned by [linalg_svd()].
#' Default: `1e-15`.
#' @param hermitian (bool, optional): indicates whether `A` is Hermitian if complex
#' or symmetric if real. Default: `FALSE`.
#'
#' @examples
#' A <- torch_randn(3, 5)
#' linalg_pinv(A)
#'
#' @family linalg
#' @export
linalg_pinv <- function(A, rcond = 1e-15, hermitian=FALSE) {
out <- torch_linalg_pinv(A, rcond = rcond, hermitian = hermitian)
if (length(dim(out)) != length(dim(A)))
out <- out$squeeze(1)
out
}
#' Computes the `n`-th power of a square matrix for an integer `n`.
#'
#' Supports input of float, double, cfloat and cdouble dtypes.
#' Also supports batches of matrices, and if `A` is a batch of matrices then
#' the output has the same batch dimensions.
#'
#' If `n=0`, it returns the identity matrix (or batch) of the same shape
#' as `A`. If `n` is negative, it returns the inverse of each matrix
#' (if invertible) raised to the power of `abs(n)`.
#'
#' @seealso
#' [linalg_solve()] computes `A$inverse() %*% B` with a
#' numerically stable algorithm.
#'
#'
#' @param A (Tensor): tensor of shape `(*, m, m)` where `*` is zero or more batch dimensions.
#' @param n (int): the exponent.
#'
#' @examples
#' A <- torch_randn(3, 3)
#' linalg_matrix_power(A, 0)
#'
#' @family linalg
#' @export
linalg_matrix_power <- function(A, n) {
torch_linalg_matrix_power(A, n = n)
}
#' Efficiently multiplies two or more matrices
#'
#' Efficiently multiplies two or more matrices by reordering the multiplications so that
#' the fewest arithmetic operations are performed.
#'
#' Supports inputs of `float`, `double`, `cfloat` and `cdouble` dtypes.
#' This function does not support batched inputs.
#'
#' Every tensor in `tensors` must be 2D, except for the first and last which
#' may be 1D. If the first tensor is a 1D vector of shape `(n,)` it is treated as a row vector
#' of shape `(1, n)`, similarly if the last tensor is a 1D vector of shape `(n,)` it is treated
#' as a column vector of shape `(n, 1)`.
#'
#' If the first and last tensors are matrices, the output will be a matrix.
#' However, if either is a 1D vector, then the output will be a 1D vector.
#' @note This function is implemented by chaining [torch_mm()] calls after
#' computing the optimal matrix multiplication order.
#'
#' @note The cost of multiplying two matrices with shapes `(a, b)` and `(b, c)` is
#' `a * b * c`. Given matrices `A`, `B`, `C` with shapes `(10, 100)`,
#' `(100, 5)`, `(5, 50)` respectively, we can calculate the cost of different
#' multiplication orders as follows:
#'
#' \Sexpr[results=rd, stage=build]{katex::math_to_rd("
#' \\\\begin{align*}
#' \\\\operatorname{cost}((AB)C) &= 10 \\\\times 100 \\\\times 5 + 10 \\\\times 5 \\\\times 50 = 7500 \\\\
#' \\\\operatorname{cost}(A(BC)) &= 10 \\\\times 100 \\\\times 50 + 100 \\\\times 5 \\\\times 50 = 75000
#' \\\\end{align*}
#' ")}
#'
#' In this case, multiplying `A` and `B` first followed by `C` is 10 times faster.
#'
#'
#' @param tensors (`Sequence[Tensor]`): two or more tensors to multiply. The first and last
#' tensors may be 1D or 2D. Every other tensor must be 2D.
#'
#' @examples
#'
#' linalg_multi_dot(list(torch_tensor(c(1,2)), torch_tensor(c(2,3))))
#'
#' @family linalg
#' @export
linalg_multi_dot <- function(tensors) {
torch_linalg_multi_dot(tensors)
}
#' Computes the first `n` columns of a product of Householder matrices.
#'
#' Letting \eqn{\mathbb{K}} be \eqn{\mathbb{R}} or \eqn{\mathbb{C}},
#' for a matrix \eqn{V \in \mathbb{K}^{m \times n}} with columns \eqn{v_i \in \mathbb{K}^m}
#' with \eqn{m \geq n} and a vector \eqn{\tau \in \mathbb{K}^k} with \eqn{k \leq n},
#' this function computes the first \eqn{n} columns of the matrix
#'
#' \Sexpr[results=rd, stage=build]{katex::math_to_rd("
#' H_1H_2 ... H_k \\\\qquad with \\\\qquad H_i = \\\\mathrm{I}_m - \\\\tau_i v_i v_i^{H}
#' ")}
#'
#' where \eqn{\mathrm{I}_m} is the `m`-dimensional identity matrix and
#' \eqn{v^{H}} is the conjugate transpose when \eqn{v} is complex, and the transpose when \eqn{v} is real-valued.
#' See [Representation of Orthogonal or Unitary Matrices](https://www.netlib.org/lapack/lug/node128.html) for
#' further details.
#'
#' Supports inputs of float, double, cfloat and cdouble dtypes.
#' Also supports batches of matrices, and if the inputs are batches of matrices then
#' the output has the same batch dimensions.
#' @note This function only uses the values strictly below the main diagonal of `A`.
#' The other values are ignored.
#'
#' @seealso
#' - [torch_geqrf()] can be used together with this function to form the `Q` from the
#' [linalg_qr()] decomposition.
#'
#' - [torch_ormqr()] is a related function that computes the matrix multiplication
#' of a product of Householder matrices with another matrix.
#' However, that function is not supported by autograd.
#'
#' @param A (Tensor): tensor of shape `(*, m, n)` where `*` is zero or more batch dimensions.
#' @param tau (Tensor): tensor of shape `(*, k)` where `*` is zero or more batch dimensions.
#'
#' @examples
#' A <- torch_randn(2, 2)
#' h_tau <- torch_geqrf(A)
#' Q <- linalg_householder_product(h_tau[[1]], h_tau[[2]])
#' torch_allclose(Q, linalg_qr(A)[[1]])
#'
#' @family linalg
#' @export
linalg_householder_product <- function(A, tau) {
torch_linalg_householder_product(A, tau)
}
#' Computes the multiplicative inverse of [torch_tensordot()]
#'
#' If `m` is the product of the first `ind` dimensions of `A` and `n` is the product of
#' the rest of the dimensions, this function expects `m` and `n` to be equal.
#' If this is the case, it computes a tensor `X` such that
#' `tensordot(A, X, ind)` is the identity matrix in dimension `m`.
#'
#' Supports input of float, double, cfloat and cdouble dtypes.
#'
#' @note Consider using [linalg_tensorsolve()] if possible for multiplying a tensor on the left
#' by the tensor inverse as `linalg_tensorsolve(A, B) == torch_tensordot(linalg_tensorinv(A), B))`
#'
#' It is always prefered to use [linalg_tensorsolve()] when possible, as it is faster and more
#' numerically stable than computing the pseudoinverse explicitly.
#'
#' @seealso
#' - [linalg_tensorsolve()] computes `torch_tensordot(linalg_tensorinv(A), B))`.
#'
#' @param A (Tensor): tensor to invert.
#' @param ind (int): index at which to compute the inverse of [torch_tensordot()]. Default: `3`.
#'
#' @examples
#' A <- torch_eye(4 * 6)$reshape(c(4, 6, 8, 3))
#' Ainv <- linalg_tensorinv(A, ind=3)
#' Ainv$shape
#' B <- torch_randn(4, 6)
#' torch_allclose(torch_tensordot(Ainv, B), linalg_tensorsolve(A, B))
#'
#' A <- torch_randn(4, 4)
#' Atensorinv<- linalg_tensorinv(A, 2)
#' Ainv <- linalg_inv(A)
#' torch_allclose(Atensorinv, Ainv)
#'
#' @family linalg
#' @export
linalg_tensorinv <- function(A, ind = 3L) {
torch_linalg_tensorinv(A, ind = ind - 1L)
}
#' Computes the solution `X` to the system `torch_tensordot(A, X) = B`.
#'
#' If `m` is the product of the first `B`\ `.ndim` dimensions of `A` and
#' `n` is the product of the rest of the dimensions, this function expects `m` and `n` to be equal.
#' The returned tensor `x` satisfies
#' `tensordot(A, x, dims=x$ndim) == B`.
#'
#' If `dims` is specified, `A` will be reshaped as
#' `A = movedim(A, dims, seq(len(dims) - A$ndim + 1, 0))`
#'
#' Supports inputs of float, double, cfloat and cdouble dtypes.
#'
#' @seealso
#' - [linalg_tensorinv()] computes the multiplicative inverse of
#' [torch_tensordot()].
#'
#' @param A (Tensor): tensor to solve for.
#' @param B (Tensor): the solution
#' @param dims (Tuple[int], optional): dimensions of `A` to be moved.
#' If `NULL`, no dimensions are moved. Default: `NULL`.
#'
#' @examples
#' A <- torch_eye(2 * 3 * 4)$reshape(c(2 * 3, 4, 2, 3, 4))
#' B <- torch_randn(2 * 3, 4)
#' X <- linalg_tensorsolve(A, B)
#' X$shape
#' torch_allclose(torch_tensordot(A, X, dims=X$ndim), B)
#'
#' A <- torch_randn(6, 4, 4, 3, 2)
#' B <- torch_randn(4, 3, 2)
#' X <- linalg_tensorsolve(A, B, dims=c(1, 3))
#' A <- A$permute(c(2, 4, 5, 1, 3))
#' torch_allclose(torch_tensordot(A, X, dims=X$ndim), B, atol=1e-6)
#'
#' @family linalg
#' @export
linalg_tensorsolve <- function(A, B, dims = NULL) {
torch_linalg_tensorsolve(A, B, dims)
}
#' Computes the Cholesky decomposition of a complex Hermitian or real
#' symmetric positive-definite matrix.
#'
#' This function skips the (slow) error checking and error message construction
#' of [linalg_cholesky()], instead directly returning the LAPACK
#' error codes as part of a named tuple `(L, info)`. This makes this function
#' a faster way to check if a matrix is positive-definite, and it provides an
#' opportunity to handle decomposition errors more gracefully or performantly
#' than [linalg_cholesky()] does.
#' Supports input of float, double, cfloat and cdouble dtypes.
#' Also supports batches of matrices, and if `A` is a batch of matrices then
#' the output has the same batch dimensions.
#' If `A` is not a Hermitian positive-definite matrix, or if it's a batch of matrices
#' and one or more of them is not a Hermitian positive-definite matrix,
#' then `info` stores a positive integer for the corresponding matrix.
#' The positive integer indicates the order of the leading minor that is not positive-definite,
#' and the decomposition could not be completed.
#' `info` filled with zeros indicates that the decomposition was successful.
#' If `check_errors=TRUE` and `info` contains positive integers, then a RuntimeError is thrown.
#' @note If `A` is on a CUDA device, this function may synchronize that device with the CPU.
#' @note This function is "experimental" and it may change in a future PyTorch release.
#' @seealso
#' [linalg_cholesky()] is a NumPy compatible variant that always checks for errors.
#'
#' @param A (Tensor): the Hermitian `n \times n` matrix or the batch of such matrices of size
#' `(*, n, n)` where `*` is one or more batch dimensions.
#' @param check_errors (bool, optional): controls whether to check the content of `infos`. Default: `FALSE`.
#'
#' @examples
#' A <- torch_randn(2, 2)
#' out = linalg_cholesky_ex(A)
#' out
#'
#' @family linalg
#' @export
linalg_cholesky_ex <- function(A, check_errors = FALSE) {
setNames(torch_linalg_cholesky_ex(A, check_errors = check_errors),
c("L", "info"))
}
#' Computes the inverse of a square matrix if it is invertible.
#'
#' Returns a namedtuple `(inverse, info)`. `inverse` contains the result of
#' inverting `A` and `info` stores the LAPACK error codes.
#' If `A` is not an invertible matrix, or if it's a batch of matrices
#' and one or more of them is not an invertible matrix,
#' then `info` stores a positive integer for the corresponding matrix.
#' The positive integer indicates the diagonal element of the LU decomposition of
#' the input matrix that is exactly zero.
#' `info` filled with zeros indicates that the inversion was successful.
#' If `check_errors=TRUE` and `info` contains positive integers, then a RuntimeError is thrown.
#' Supports input of float, double, cfloat and cdouble dtypes.
#' Also supports batches of matrices, and if `A` is a batch of matrices then
#' the output has the same batch dimensions.
#' @note
#' If `A` is on a CUDA device then this function may synchronize
#' that device with the CPU.
#' @note This function is "experimental" and it may change in a future PyTorch release.
#'
#' @seealso
#' [linalg_inv()] is a NumPy compatible variant that always checks for errors.
#'
#' @param A (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions
#' consisting of square matrices.
#' @param check_errors (bool, optional): controls whether to check the content of `info`. Default: `FALSE`.
#'
#' @examples
#' A <- torch_randn(3, 3)
#' out <- linalg_inv_ex(A)
#'
#' @family linalg
#' @importFrom stats setNames
#' @export
linalg_inv_ex <- function(A, check_errors = FALSE) {
setNames(
torch_linalg_inv_ex(A, check_errors = check_errors),
c("inverse", "info")
)
}
|
/R/linalg.R
|
permissive
|
dominikj2/torch
|
R
| false
| false
| 52,101
|
r
|
#' Computes a vector or matrix norm.
#'
#' If `A` is complex valued, it computes the norm of `A$abs()`
#' Supports input of float, double, cfloat and cdouble dtypes.
#' Whether this function computes a vector or matrix norm is determined as follows:
#'
#' * If `dim` is an int, the vector norm will be computed.
#' * If `dim` is a 2-tuple, the matrix norm will be computed.
#' * If `dim=NULL` and `ord=NULL`, A will be flattened to 1D and the 2-norm of the resulting vector will be computed.
#' * If `dim=NULL` and `ord!=NULL`, A must be 1D or 2D.
#'
#' @includeRmd man/rmd/linalg-norm.Rmd details
#'
#' @param A (Tensor): tensor of shape `(*, n)` or `(*, m, n)` where `*` is zero or more batch dimensions
#' @param ord (int, float, inf, -inf, 'fro', 'nuc', optional): order of norm. Default: `NULL`
#' @param dim (int, Tuple[int], optional): dimensions over which to compute
#' the vector or matrix norm. See above for the behavior when `dim=NULL`.
#' Default: `NULL`
#' @param keepdim (bool, optional): If set to `TRUE`, the reduced dimensions are retained
#' in the result as dimensions with size one. Default: `FALSE`
#' @param dtype dtype (`torch_dtype`, optional): If specified, the input tensor is cast to
#' `dtype` before performing the operation, and the returned tensor's type
#' will be `dtype`. Default: `NULL`
#'
#' @family linalg
#'
#' @examples
#' a <- torch_arange(0, 8, dtype=torch_float()) - 4
#' a
#' b <- a$reshape(c(3, 3))
#' b
#'
#' linalg_norm(a)
#' linalg_norm(b)
#'
#' @export
linalg_norm <- function(A, ord = NULL, dim = NULL, keepdim = FALSE, dtype = NULL) {
torch_linalg_norm(self = A, ord = ord, dim = dim, keepdim = keepdim, dtype = dtype)
}
#' Computes a vector norm.
#'
#' If `A` is complex valued, it computes the norm of `A$abs()`
#' Supports input of float, double, cfloat and cdouble dtypes.
#' This function does not necessarily treat multidimensonal `A` as a batch of
#' vectors, instead:
#'
#' - If `dim=NULL`, `A` will be flattened before the norm is computed.
#' - If `dim` is an `int` or a `tuple`, the norm will be computed over these dimensions
#' and the other dimensions will be treated as batch dimensions.
#'
#' This behavior is for consistency with [linalg_norm()].
#'
#' @includeRmd man/rmd/linalg-norm.Rmd details
#' @family linalg
#'
#' @param A (Tensor): tensor, flattened by default, but this behavior can be
#' controlled using `dim`.
#' @param ord (int, float, inf, -inf, 'fro', 'nuc', optional): order of norm. Default: `2`
#'
#' @inheritParams linalg_norm
#'
#' @examples
#' a <- torch_arange(0, 8, dtype=torch_float()) - 4
#' a
#' b <- a$reshape(c(3, 3))
#' b
#'
#' linalg_vector_norm(a, ord = 3.5)
#' linalg_vector_norm(b, ord = 3.5)
#'
#' @export
linalg_vector_norm <- function(A, ord=2, dim=NULL, keepdim=FALSE, dtype=NULL) {
torch_linalg_vector_norm(
self = A,
ord = ord,
dim = dim,
keepdim = keepdim,
dtype = dtype
)
}
#' Computes a matrix norm.
#'
#' If `A` is complex valued, it computes the norm of `A$abs()`
#' Support input of float, double, cfloat and cdouble dtypes.
#' Also supports batches of matrices: the norm will be computed over the
#' dimensions specified by the 2-tuple `dim` and the other dimensions will
#' be treated as batch dimensions. The output will have the same batch dimensions.
#'
#' @includeRmd man/rmd/linalg-norm.Rmd details
#' @family linalg
#'
#' @param A (Tensor): tensor with two or more dimensions. By default its
#' shape is interpreted as `(*, m, n)` where `*` is zero or more
#' batch dimensions, but this behavior can be controlled using `dim`.
#' @param ord (int, inf, -inf, 'fro', 'nuc', optional): order of norm. Default: `'fro'`
#' @inheritParams linalg_norm
#'
#' @examples
#' a <- torch_arange(0, 8, dtype=torch_float())$reshape(c(3,3))
#' linalg_matrix_norm(a)
#' linalg_matrix_norm(a, ord = -1)
#' b <- a$expand(c(2, -1, -1))
#' linalg_matrix_norm(b)
#' linalg_matrix_norm(b, dim = c(1, 3))
#'
#' @export
linalg_matrix_norm <- function(A, ord='fro', dim=c(-2, -1), keepdim=FALSE, dtype=NULL) {
torch_linalg_matrix_norm(
self = A,
ord = ord,
dim = dim,
keepdim = keepdim,
dtype = dtype
)
}
#' Computes the determinant of a square matrix.
#'
#' Supports input of float, double, cfloat and cdouble dtypes.
#' Also supports batches of matrices, and if `A` is a batch of matrices then
#' the output has the same batch dimensions.
#'
#' @param A (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions.
#'
#' @examples
#' a <- torch_randn(3,3)
#' linalg_det(a)
#'
#' a <- torch_randn(3,3,3)
#' linalg_det(a)
#'
#' @family linalg
#' @export
linalg_det <- function(A) {
torch_linalg_det(A)
}
#' Computes the sign and natural logarithm of the absolute value of the determinant of a square matrix.
#'
#' For complex `A`, it returns the angle and the natural logarithm of the modulus of the
#' determinant, that is, a logarithmic polar decomposition of the determinant.
#' Supports input of float, double, cfloat and cdouble dtypes.
#' Also supports batches of matrices, and if `A` is a batch of matrices then
#' the output has the same batch dimensions.
#'
#' @section Notes:
#' - The determinant can be recovered as `sign * exp(logabsdet)`.
#' - When a matrix has a determinant of zero, it returns `(0, -Inf)`.
#'
#' @inheritParams linalg_det
#'
#' @returns
#'
#' A list `(sign, logabsdet)`.
#' `logabsdet` will always be real-valued, even when `A` is complex.
#' `sign` will have the same dtype as `A`.
#'
#' @examples
#' a <- torch_randn(3,3)
#' linalg_slogdet(a)
#'
#' @family linalg
#' @export
linalg_slogdet <- function(A) {
torch_linalg_slogdet(A)
}
#' Computes the condition number of a matrix with respect to a matrix norm.
#'
#' Letting \eqn{\mathbb{K}} be \eqn{\mathbb{R}} or \eqn{\mathbb{C}},
#' the **condition number** \eqn{\kappa} of a matrix
#' \eqn{A \in \mathbb{K}^{n \times n}} is defined as
#'
#' \Sexpr[results=rd, stage=build]{katex::math_to_rd("\\\\kappa(A) = \\\\|A\\\\|_p\\\\|A^{-1}\\\\|_p")}
#'
#' The condition number of `A` measures the numerical stability of the linear system `AX = B`
#' with respect to a matrix norm.
#'
#' Supports input of float, double, cfloat and cdouble dtypes.
#' Also supports batches of matrices, and if `A` is a batch of matrices then
#' the output has the same batch dimensions.
#'
#' `p` defines the matrix norm that is computed. See the table in 'Details' to
#' find the supported norms.
#'
#' For `p` is one of `('fro', 'nuc', inf, -inf, 1, -1)`, this function uses
#' [linalg_norm()] and [linalg_inv()].
#'
#' As such, in this case, the matrix (or every matrix in the batch) `A` has to be square
#' and invertible.
#'
#' For `p` in `(2, -2)`, this function can be computed in terms of the singular values
#' \eqn{\sigma_1 \geq \ldots \geq \sigma_n}
#'
#' \Sexpr[results=rd, stage=build]{katex::math_to_rd("\\\\kappa_2(A) = \\\\frac{\\\\sigma_1}{\\\\sigma_n}\\\\qquad \\\\kappa_{-2}(A) = \\\\frac{\\\\sigma_n}{\\\\sigma_1}")}
#'
#' In these cases, it is computed using [linalg_svd()]. For these norms, the matrix
#' (or every matrix in the batch) `A` may have any shape.
#'
#' @note When inputs are on a CUDA device, this function synchronizes that device with the CPU if
#' if `p` is one of `('fro', 'nuc', inf, -inf, 1, -1)`.
#'
#' @includeRmd man/rmd/linalg-cond.Rmd details
#'
#' @param A (Tensor): tensor of shape `(*, m, n)` where `*` is zero or more batch dimensions
#' for `p` in `(2, -2)`, and of shape `(*, n, n)` where every matrix
#' is invertible for `p` in `('fro', 'nuc', inf, -inf, 1, -1)`.
#' @param p (int, inf, -inf, 'fro', 'nuc', optional):
#' the type of the matrix norm to use in the computations (see above). Default: `NULL`
#'
#' @returns
#' A real-valued tensor, even when `A` is complex.
#'
#' @examples
#' a <- torch_tensor(rbind(c(1., 0, -1), c(0, 1, 0), c(1, 0, 1)))
#' linalg_cond(a)
#' linalg_cond(a, "fro")
#'
#' @export
linalg_cond <- function(A, p=NULL) {
torch_linalg_cond(A, p = p)
}
#' Computes the numerical rank of a matrix.
#'
#' The matrix rank is computed as the number of singular values
#' (or eigenvalues in absolute value when `hermitian = TRUE`)
#' that are greater than the specified `tol` threshold.
#'
#' Supports input of float, double, cfloat and cdouble dtypes.
#' Also supports batches of matrices, and if `A` is a batch of matrices then
#' the output has the same batch dimensions.
#'
#' If `hermitian = TRUE`, `A` is assumed to be Hermitian if complex or
#' symmetric if real, but this is not checked internally. Instead, just the lower
#' triangular part of the matrix is used in the computations.
#'
#' If `tol` is not specified and `A` is a matrix of dimensions `(m, n)`,
#' the tolerance is set to be
#'
#' \Sexpr[results=rd, stage=build]{katex::math_to_rd("
#' tol = \\\\sigma_1 \\\\max(m, n) \\\\varepsilon
#' ")}
#'
#' where \eqn{\sigma_1} is the largest singular value
#' (or eigenvalue in absolute value when `hermitian = TRUE`), and
#' \eqn{\varepsilon} is the epsilon value for the dtype of `A` (see [torch_finfo()]).
#'
#' If `A` is a batch of matrices, `tol` is computed this way for every element of
#' the batch.
#'
#' @param A (Tensor): tensor of shape `(*, m, n)` where `*` is zero or more
#' batch dimensions.
#' @param tol (float, Tensor, optional): the tolerance value. See above for
#' the value it takes when `NULL`. Default: `NULL`.
#' @param hermitian (bool, optional): indicates whether `A` is Hermitian if complex
#' or symmetric if real. Default: `FALSE`.
#'
#' @examples
#' a <- torch_eye(10)
#' linalg_matrix_rank(a)
#'
#' @family linalg
#' @export
linalg_matrix_rank <- function(A, tol=NULL, hermitian=FALSE) {
if (is.null(tol))
torch_linalg_matrix_rank(self = A, tol = tol, hermitian = hermitian)
else {
if (!is_torch_tensor(tol))
tol <- torch_scalar_tensor(tol)
torch_linalg_matrix_rank(input = A, tol = tol, hermitian = hermitian)
}
}
#' Computes the Cholesky decomposition of a complex Hermitian or real symmetric positive-definite matrix.
#'
#' Letting \eqn{\mathbb{K}} be \eqn{\mathbb{R}} or \eqn{\mathbb{C}},
#' the **Cholesky decomposition** of a complex Hermitian or real symmetric positive-definite matrix
#' \eqn{A \in \mathbb{K}^{n \times n}} is defined as
#'
#' \Sexpr[results=rd, stage=build]{katex::math_to_rd("
#' A = LL^{H}\\\\mathrlap{\\\\qquad L \\\\in \\\\mathbb{K}^{n \\\\times n}}
#' ")}
#'
#' where \eqn{L} is a lower triangular matrix and
#' \eqn{L^{H}} is the conjugate transpose when \eqn{L} is complex, and the
#' transpose when \eqn{L} is real-valued.
#'
#' Supports input of float, double, cfloat and cdouble dtypes.
#' Also supports batches of matrices, and if `A` is a batch of matrices then
#' the output has the same batch dimensions.
#'
#' @seealso
#' - [linalg_cholesky_ex()] for a version of this operation that
#' skips the (slow) error checking by default and instead returns the debug
#' information. This makes it a faster way to check if a matrix is
#' positive-definite.
#' [linalg_eigh()] for a different decomposition of a Hermitian matrix.
#' The eigenvalue decomposition gives more information about the matrix but it
#' slower to compute than the Cholesky decomposition.
#'
#' @param A (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions
#' consisting of symmetric or Hermitian positive-definite matrices.
#'
#' @examples
#' a <- torch_eye(10)
#' linalg_cholesky(a)
#'
#' @family linalg
#' @export
linalg_cholesky <- function(A) {
torch_linalg_cholesky(A)
}
#' Computes the QR decomposition of a matrix.
#'
#' Letting \eqn{\mathbb{K}} be \eqn{\mathbb{R}} or \eqn{\mathbb{C}},
#' the **full QR decomposition** of a matrix
#' \eqn{A \in \mathbb{K}^{m \times n}} is defined as
#'
#' \Sexpr[results=rd, stage=build]{katex::math_to_rd("
#' A = QR\\\\mathrlap{\\\\qquad Q \\\\in \\\\mathbb{K}^{m \\\\times m}, R \\\\in \\\\mathbb{K}^{m \\\\times n}}
#' ")}
#'
#' where \eqn{Q} is orthogonal in the real case and unitary in the complex case, and \eqn{R} is upper triangular.
#' When `m > n` (tall matrix), as `R` is upper triangular, its last `m - n` rows are zero.
#' In this case, we can drop the last `m - n` columns of `Q` to form the
#' **reduced QR decomposition**:
#'
#' \Sexpr[results=rd, stage=build]{katex::math_to_rd("
#' A = QR\\\\mathrlap{\\\\qquad Q \\\\in \\\\mathbb{K}^{m \\\\times n}, R \\\\in \\\\mathbb{K}^{n \\\\times n}}
#' ")}
#'
#' The reduced QR decomposition agrees with the full QR decomposition when `n >= m` (wide matrix).
#' Supports input of float, double, cfloat and cdouble dtypes.
#' Also supports batches of matrices, and if `A` is a batch of matrices then
#' the output has the same batch dimensions.
#' The parameter `mode` chooses between the full and reduced QR decomposition.
#'
#' If `A` has shape `(*, m, n)`, denoting `k = min(m, n)`
#' - `mode = 'reduced'` (default): Returns `(Q, R)` of shapes `(*, m, k)`, `(*, k, n)` respectively.
#' - `mode = 'complete'`: Returns `(Q, R)` of shapes `(*, m, m)`, `(*, m, n)` respectively.
#' - `mode = 'r'`: Computes only the reduced `R`. Returns `(Q, R)` with `Q` empty and `R` of shape `(*, k, n)`.
#'
#'
#' @param A (Tensor): tensor of shape `(*, m, n)` where `*` is zero or more batch dimensions.
#' @param mode (str, optional): one of `'reduced'`, `'complete'`, `'r'`.
#' Controls the shape of the returned tensors. Default: `'reduced'`.
#'
#' @returns A list `(Q, R)`.
#'
#' @examples
#' a <- torch_tensor(rbind(c(12., -51, 4), c(6, 167, -68), c(-4, 24, -41)))
#' qr <- linalg_qr(a)
#'
#' torch_mm(qr[[1]], qr[[2]])$round()
#' torch_mm(qr[[1]]$t(), qr[[1]])$round()
#'
#' @family linalg
#' @export
linalg_qr <- function(A, mode='reduced') {
torch_linalg_qr(A, mode = mode)
}
#' Computes the eigenvalue decomposition of a square matrix if it exists.
#'
#' Letting \eqn{\mathbb{K}} be \eqn{\mathbb{R}} or \eqn{\mathbb{C}},
#' the **eigenvalue decomposition** of a square matrix
#' \eqn{A \in \mathbb{K}^{n \times n}} (if it exists) is defined as
#'
#' \Sexpr[results=rd, stage=build]{katex::math_to_rd("
#' A = V \\\\operatorname{diag}(\\\\Lambda) V^{-1}\\\\mathrlap{\\\\qquad V \\\\in \\\\mathbb{C}^{n \\\\times n}, \\\\Lambda \\\\in \\\\mathbb{C}^n}
#' ")}
#'
#' This decomposition exists if and only if \eqn{A} is `diagonalizable`_.
#' This is the case when all its eigenvalues are different.
#' Supports input of float, double, cfloat and cdouble dtypes.
#' Also supports batches of matrices, and if `A` is a batch of matrices then
#' the output has the same batch dimensions.
#'
#' @note The eigenvalues and eigenvectors of a real matrix may be complex.
#'
#' @section Warning:
#'
#' - This function assumes that `A` is `diagonalizable`_ (for example, when all the
#' eigenvalues are different). If it is not diagonalizable, the returned
#' eigenvalues will be correct but \eqn{A \neq V \operatorname{diag}(\Lambda)V^{-1}}.
#'
#' - The eigenvectors of a matrix are not unique, nor are they continuous with respect to
#' `A`. Due to this lack of uniqueness, different hardware and software may compute
#' different eigenvectors.
#' This non-uniqueness is caused by the fact that multiplying an eigenvector by a
#' non-zero number produces another set of valid eigenvectors of the matrix.
#' In this implmentation, the returned eigenvectors are normalized to have norm
#' `1` and largest real component.
#'
#' - Gradients computed using `V` will only be finite when `A` does not have repeated eigenvalues.
#' Furthermore, if the distance between any two eigenvalues is close to zero,
#' the gradient will be numerically unstable, as it depends on the eigenvalues
#' \eqn{\lambda_i} through the computation of
#' \eqn{\frac{1}{\min_{i \neq j} \lambda_i - \lambda_j}}.
#'
#' @seealso
#' - [linalg_eigvals()] computes only the eigenvalues. Unlike [linalg_eig()], the gradients of
#' [linalg_eigvals()] are always numerically stable.
#' - [linalg_eigh()] for a (faster) function that computes the eigenvalue decomposition
#' for Hermitian and symmetric matrices.
#' - [linalg_svd()] for a function that computes another type of spectral
#' decomposition that works on matrices of any shape.
#' - [linalg_qr()] for another (much faster) decomposition that works on matrices of
#' any shape.
#'
#' @param A (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions
#' consisting of diagonalizable matrices.
#'
#' @returns
#' A list `(eigenvalues, eigenvectors)` which corresponds to \eqn{\Lambda} and \eqn{V} above.
#' `eigenvalues` and `eigenvectors` will always be complex-valued, even when `A` is real. The eigenvectors
#' will be given by the columns of `eigenvectors`.
#'
#' @examples
#' a <- torch_randn(2, 2)
#' wv = linalg_eig(a)
#'
#' @family linalg
#' @export
linalg_eig <- function(A) {
torch_linalg_eig(A)
}
#' Computes the eigenvalues of a square matrix.
#'
#' Letting \eqn{\mathbb{K}} be \eqn{\mathbb{R}} or \eqn{\mathbb{C}},
#' the **eigenvalues** of a square matrix \eqn{A \in \mathbb{K}^{n \times n}} are defined
#' as the roots (counted with multiplicity) of the polynomial `p` of degree `n` given by
#'
#' \Sexpr[results=rd, stage=build]{katex::math_to_rd("
#' p(\\\\lambda) = \\\\operatorname{det}(A - \\\\lambda \\\\mathrm{I}_n)\\\\mathrlap{\\\\qquad \\\\lambda \\\\in \\\\mathbb{C}}
#' ")}
#'
#' where \eqn{\mathrm{I}_n} is the `n`-dimensional identity matrix.
#' Supports input of float, double, cfloat and cdouble dtypes.
#' Also supports batches of matrices, and if `A` is a batch of matrices then
#' the output has the same batch dimensions.
#'
#' @note The eigenvalues of a real matrix may be complex, as the roots of a real polynomial may be complex.
#' The eigenvalues of a matrix are always well-defined, even when the matrix is not diagonalizable.
#'
#' @seealso [linalg_eig()] computes the full eigenvalue decomposition.
#'
#' @param A (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions.
#'
#' @examples
#' a <- torch_randn(2, 2)
#' w <- linalg_eigvals(a)
#'
#' @family linalg
#' @export
linalg_eigvals <- function(A) {
torch_linalg_eigvals(A)
}
#' Computes the eigenvalue decomposition of a complex Hermitian or real symmetric matrix.
#'
#' Letting \eqn{\mathbb{K}} be \eqn{\mathbb{R}} or \eqn{\mathbb{C}},
#' the **eigenvalue decomposition** of a complex Hermitian or real symmetric matrix
#' \eqn{A \in \mathbb{K}^{n \times n}} is defined as
#'
#' \Sexpr[results=rd, stage=build]{katex::math_to_rd("
#' A = Q \\\\operatorname{diag}(\\\\Lambda) Q^{H}\\\\mathrlap{\\\\qquad Q \\\\in \\\\mathbb{K}^{n \\\\times n}, \\\\Lambda \\\\in \\\\mathbb{R}^n}
#' ")}
#'
#' where \eqn{Q^{H}} is the conjugate transpose when \eqn{Q} is complex, and the transpose when \eqn{Q} is real-valued.
#' \eqn{Q} is orthogonal in the real case and unitary in the complex case.
#'
#' Supports input of float, double, cfloat and cdouble dtypes.
#' Also supports batches of matrices, and if `A` is a batch of matrices then
#' the output has the same batch dimensions.
#'
#' `A` is assumed to be Hermitian (resp. symmetric), but this is not checked internally, instead:
#' - If `UPLO`\ `= 'L'` (default), only the lower triangular part of the matrix is used in the computation.
#' - If `UPLO`\ `= 'U'`, only the upper triangular part of the matrix is used.
#' The eigenvalues are returned in ascending order.
#'
#' @note The eigenvalues of real symmetric or complex Hermitian matrices are always real.
#'
#' @section Warning:
#' - The eigenvectors of a symmetric matrix are not unique, nor are they continuous with
#' respect to `A`. Due to this lack of uniqueness, different hardware and
#' software may compute different eigenvectors.
#' This non-uniqueness is caused by the fact that multiplying an eigenvector by
#' `-1` in the real case or by \eqn{e^{i \phi}, \phi \in \mathbb{R}} in the complex
#' case produces another set of valid eigenvectors of the matrix.
#' This non-uniqueness problem is even worse when the matrix has repeated eigenvalues.
#' In this case, one may multiply the associated eigenvectors spanning
#' the subspace by a rotation matrix and the resulting eigenvectors will be valid
#' eigenvectors.
#' - Gradients computed using the `eigenvectors` tensor will only be finite when
#' `A` has unique eigenvalues.
#' Furthermore, if the distance between any two eigvalues is close to zero,
#' the gradient will be numerically unstable, as it depends on the eigenvalues
#' \eqn{\lambda_i} through the computation of
#' \eqn{\frac{1}{\min_{i \neq j} \lambda_i - \lambda_j}}.
#'
#' @seealso
#' - [linalg_eigvalsh()] computes only the eigenvalues values of a Hermitian matrix.
#' Unlike [linalg_eigh()], the gradients of [linalg_eigvalsh()] are always
#' numerically stable.
#' - [linalg_cholesky()] for a different decomposition of a Hermitian matrix.
#' The Cholesky decomposition gives less information about the matrix but is much faster
#' to compute than the eigenvalue decomposition.
#' - [linalg_eig()] for a (slower) function that computes the eigenvalue decomposition
#' of a not necessarily Hermitian square matrix.
#' - [linalg_svd()] for a (slower) function that computes the more general SVD
#' decomposition of matrices of any shape.
#' - [linalg_qr()] for another (much faster) decomposition that works on general
#' matrices.
#'
#' @param A (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions
#' consisting of symmetric or Hermitian matrices.
#' @param UPLO ('L', 'U', optional): controls whether to use the upper or lower triangular part
#' of `A` in the computations. Default: `'L'`.
#'
#' @returns
#' A list `(eigenvalues, eigenvectors)` which corresponds to \eqn{\Lambda} and \eqn{Q} above.
#' `eigenvalues` will always be real-valued, even when `A` is complex.
#'
#' It will also be ordered in ascending order.
#' `eigenvectors` will have the same dtype as `A` and will contain the eigenvectors as its columns.
#'
#' @examples
#' a <- torch_randn(2, 2)
#' linalg_eigh(a)
#'
#' @family linalg
#' @export
linalg_eigh <- function(A, UPLO='L') {
torch_linalg_eigh(A, UPLO)
}
#' Computes the eigenvalues of a complex Hermitian or real symmetric matrix.
#'
#' Letting \eqn{\mathbb{K}} be \eqn{\mathbb{R}} or \eqn{\mathbb{C}},
#' the **eigenvalues** of a complex Hermitian or real symmetric matrix \eqn{A \in \mathbb{K}^{n \times n}}
#' are defined as the roots (counted with multiplicity) of the polynomial `p` of degree `n` given by
#'
#' \Sexpr[results=rd, stage=build]{katex::math_to_rd("
#' p(\\\\lambda) = \\\\operatorname{det}(A - \\\\lambda \\\\mathrm{I}_n)\\\\mathrlap{\\\\qquad \\\\lambda \\\\in \\\\mathbb{R}}
#' ")}
#'
#' where \eqn{\mathrm{I}_n} is the `n`-dimensional identity matrix.
#'
#' The eigenvalues of a real symmetric or complex Hermitian matrix are always real.
#' Supports input of float, double, cfloat and cdouble dtypes.
#' Also supports batches of matrices, and if `A` is a batch of matrices then
#' the output has the same batch dimensions.
#' The eigenvalues are returned in ascending order.
#'
#' `A` is assumed to be Hermitian (resp. symmetric), but this is not checked internally, instead:
#' - If `UPLO`\ `= 'L'` (default), only the lower triangular part of the matrix is used in the computation.
#' - If `UPLO`\ `= 'U'`, only the upper triangular part of the matrix is used.
#'
#'
#' @seealso
#' - [linalg_eigh()] computes the full eigenvalue decomposition.
#'
#' @param A (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions
#' consisting of symmetric or Hermitian matrices.
#' @param UPLO ('L', 'U', optional): controls whether to use the upper or lower triangular part
#' of `A` in the computations. Default: `'L'`.
#'
#' @returns
#' A real-valued tensor cointaining the eigenvalues even when `A` is complex.
#' The eigenvalues are returned in ascending order.
#'
#' @examples
#' a <- torch_randn(2, 2)
#' linalg_eigvalsh(a)
#'
#' @family linalg
#' @export
linalg_eigvalsh <- function(A, UPLO='L') {
torch_linalg_eigvalsh(A, UPLO = UPLO)
}
#' Computes the singular value decomposition (SVD) of a matrix.
#'
#' Letting \eqn{\mathbb{K}} be \eqn{\mathbb{R}} or \eqn{\mathbb{C}},
#' the **full SVD** of a matrix
#' \eqn{A \in \mathbb{K}^{m \times n}}, if `k = min(m,n)`, is defined as
#'
#' \Sexpr[results=rd, stage=build]{katex::math_to_rd("
#' A = U \\\\operatorname{diag}(S) V^{H} \\\\mathrlap{\\\\qquad U \\\\in \\\\mathbb{K}^{m \\\\times m}, S \\\\in \\\\mathbb{R}^k, V \\\\in \\\\mathbb{K}^{n \\\\times n}}
#' ")}
#'
#' where \eqn{\operatorname{diag}(S) \in \mathbb{K}^{m \times n}},
#' \eqn{V^{H}} is the conjugate transpose when \eqn{V} is complex, and the transpose when \eqn{V} is real-valued.
#'
#' The matrices \eqn{U}, \eqn{V} (and thus \eqn{V^{H}}) are orthogonal in the real case, and unitary in the complex case.
#' When `m > n` (resp. `m < n`) we can drop the last `m - n` (resp. `n - m`) columns of `U` (resp. `V`) to form the **reduced SVD**:
#'
#' \Sexpr[results=rd, stage=build]{katex::math_to_rd("
#' A = U \\\\operatorname{diag}(S) V^{H} \\\\mathrlap{\\\\qquad U \\\\in \\\\mathbb{K}^{m \\\\times k}, S \\\\in \\\\mathbb{R}^k, V \\\\in \\\\mathbb{K}^{k \\\\times n}}
#' ")}
#'
#' where \eqn{\operatorname{diag}(S) \in \mathbb{K}^{k \times k}}.
#'
#' In this case, \eqn{U} and \eqn{V} also have orthonormal columns.
#' Supports input of float, double, cfloat and cdouble dtypes.
#'
#' Also supports batches of matrices, and if `A` is a batch of matrices then
#' the output has the same batch dimensions.
#'
#' The returned decomposition is a named tuple `(U, S, Vᴴ)`
#' which corresponds to \eqn{U}, \eqn{S}, \eqn{V^{H}} above.
#'
#' The singular values are returned in descending order.
#' The parameter `full_matrices` chooses between the full (default) and reduced SVD.
#'
#' @note
#' When `full_matrices=TRUE`, the gradients with respect to `U[..., :, min(m, n):]`
#' and `Vh[..., min(m, n):, :]` will be ignored, as those vectors can be arbitrary bases
#' of the corresponding subspaces.
#'
#' @section Warnings:
#' The returned tensors `U` and `V` are not unique, nor are they continuous with
#' respect to `A`.
#' Due to this lack of uniqueness, different hardware and software may compute
#' different singular vectors.
#' This non-uniqueness is caused by the fact that multiplying any pair of singular
#' vectors \eqn{u_k, v_k} by `-1` in the real case or by
#' \eqn{e^{i \phi}, \phi \in \mathbb{R}} in the complex case produces another two
#' valid singular vectors of the matrix.
#' This non-uniqueness problem is even worse when the matrix has repeated singular values.
#' In this case, one may multiply the associated singular vectors of `U` and `V` spanning
#' the subspace by a rotation matrix and the resulting vectors will span the same subspace.
#'
#' Gradients computed using `U` or `Vᴴ` will only be finite when
#' `A` does not have zero as a singular value or repeated singular values.
#' Furthermore, if the distance between any two singular values is close to zero,
#' the gradient will be numerically unstable, as it depends on the singular values
#' \eqn{\sigma_i} through the computation of
#' \eqn{\frac{1}{\min_{i \neq j} \sigma_i^2 - \sigma_j^2}}.
#' The gradient will also be numerically unstable when `A` has small singular
#' values, as it also depends on the computaiton of \eqn{\frac{1}{\sigma_i}}.
#'
#' @seealso
#' - [linalg_svdvals()] computes only the singular values.
#' Unlike [linalg_svd()], the gradients of [linalg_svdvals()] are always
#' numerically stable.
#' - [linalg_eig()] for a function that computes another type of spectral
#' decomposition of a matrix. The eigendecomposition works just on on square matrices.
#' - [linalg_eigh()] for a (faster) function that computes the eigenvalue decomposition
#' for Hermitian and symmetric matrices.
#' - [linalg_qr()] for another (much faster) decomposition that works on general
#' matrices.
#'
#' @param A (Tensor): tensor of shape `(*, m, n)` where `*` is zero or more batch dimensions.
#' @param full_matrices (bool, optional): controls whether to compute the full or reduced
#' SVD, and consequently, the shape of the returned tensors `U` and `Vᴴ`. Default: `TRUE`.
#'
#' @returns
#' A list `(U, S, V)` which corresponds to \eqn{U}, \eqn{S}, \eqn{V^{H}} above.
#' `S` will always be real-valued, even when `A` is complex.
#' It will also be ordered in descending order.
#' `U` and `Vᴴ` will have the same dtype as `A`. The left / right singular vectors will be given by
#' the columns of `U` and the rows of `Vᴴ` respectively.
#'
#' @examples
#'
#' a <- torch_randn(5, 3)
#' linalg_svd(a, full_matrices=FALSE)
#'
#' @family linalg
#' @export
linalg_svd <- function(A, full_matrices=TRUE) {
torch_linalg_svd(A, full_matrices = full_matrices)
}
#' Computes the singular values of a matrix.
#'
#' Supports input of float, double, cfloat and cdouble dtypes.
#' Also supports batches of matrices, and if `A` is a batch of matrices then
#' the output has the same batch dimensions.
#' The singular values are returned in descending order.
#'
#' @seealso
#' [linalg_svd()] computes the full singular value decomposition.
#'
#' @param A (Tensor): tensor of shape `(*, m, n)` where `*` is zero or more batch dimensions.
#'
#' @returns
#' A real-valued tensor, even when `A` is complex.
#'
#' @examples
#' A <- torch_randn(5, 3)
#' S <- linalg_svdvals(A)
#' S
#'
#' @family linalg
#' @export
linalg_svdvals <- function(A) {
torch_linalg_svdvals(A)
}
#' Computes the solution of a square system of linear equations with a unique solution.
#'
#' Letting \eqn{\mathbb{K}} be \eqn{\mathbb{R}} or \eqn{\mathbb{C}},
#' this function computes the solution \eqn{X \in \mathbb{K}^{n \times k}} of the **linear system** associated to
#' \eqn{A \in \mathbb{K}^{n \times n}, B \in \mathbb{K}^{m \times k}}, which is defined as
#'
#' \deqn{
#' AX = B
#' }
#'
#' This system of linear equations has one solution if and only if \eqn{A} is `invertible`_.
#' This function assumes that \eqn{A} is invertible.
#' Supports inputs of float, double, cfloat and cdouble dtypes.
#' Also supports batches of matrices, and if the inputs are batches of matrices then
#' the output has the same batch dimensions.
#'
#' Letting `*` be zero or more batch dimensions,
#'
#' - If `A` has shape `(*, n, n)` and `B` has shape `(*, n)` (a batch of vectors) or shape
#' `(*, n, k)` (a batch of matrices or "multiple right-hand sides"), this function returns `X` of shape
#' `(*, n)` or `(*, n, k)` respectively.
#' - Otherwise, if `A` has shape `(*, n, n)` and `B` has shape `(n,)` or `(n, k)`, `B`
#' is broadcasted to have shape `(*, n)` or `(*, n, k)` respectively.
#'
#' This function then returns the solution of the resulting batch of systems of linear equations.
#'
#' @note
#' This function computes `X = A$inverse() @ B` in a faster and
#' more numerically stable way than performing the computations separately.
#'
#' @param A (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions.
#' @param B (Tensor): right-hand side tensor of shape `(*, n)` or `(*, n, k)` or `(n,)` or `(n, k)`
#' according to the rules described above
#'
#' @examples
#' A <- torch_randn(3, 3)
#' b <- torch_randn(3)
#' x <- linalg_solve(A, b)
#' torch_allclose(torch_matmul(A, x), b)
#'
#' @family linalg
#' @export
linalg_solve <- function(A, B) {
torch_linalg_solve(A, B)
}
#' Computes a solution to the least squares problem of a system of linear equations.
#'
#' Letting \eqn{\mathbb{K}} be \eqn{\mathbb{R}} or \eqn{\mathbb{C}},
#' the **least squares problem** for a linear system \eqn{AX = B} with
#' \eqn{A \in \mathbb{K}^{m \times n}, B \in \mathbb{K}^{m \times k}} is defined as
#'
#' \Sexpr[results=rd, stage=build]{katex::math_to_rd("
#' \\\\min_{X \\\\in \\\\mathbb{K}^{n \\\\times k}} \\\\|AX - B\\\\|_F
#' ")}
#'
#' where \eqn{\|-\|_F} denotes the Frobenius norm.
#' Supports inputs of float, double, cfloat and cdouble dtypes.
#'
#' Also supports batches of matrices, and if the inputs are batches of matrices then
#' the output has the same batch dimensions.
#' `driver` chooses the LAPACK/MAGMA function that will be used.
#'
#' For CPU inputs the valid values are `'gels'`, `'gelsy'`, `'gelsd`, `'gelss'`.
#' For CUDA input, the only valid driver is `'gels'`, which assumes that `A` is full-rank.
#'
#' To choose the best driver on CPU consider:
#' - If `A` is well-conditioned (its [condition number](https://pytorch.org/docs/master/linalg.html#torch.linalg.cond) is not too large), or you do not mind some precision loss.
#' - For a general matrix: `'gelsy'` (QR with pivoting) (default)
#' - If `A` is full-rank: `'gels'` (QR)
#' - If `A` is not well-conditioned.
#' - `'gelsd'` (tridiagonal reduction and SVD)
#' - But if you run into memory issues: `'gelss'` (full SVD).
#'
#' See also the [full description of these drivers](https://www.netlib.org/lapack/lug/node27.html)
#'
#' `rcond` is used to determine the effective rank of the matrices in `A`
#' when `driver` is one of (`'gelsy'`, `'gelsd'`, `'gelss'`).
#' In this case, if \eqn{\sigma_i} are the singular values of `A` in decreasing order,
#' \eqn{\sigma_i} will be rounded down to zero if \eqn{\sigma_i \leq rcond \cdot \sigma_1}.
#' If `rcond = NULL` (default), `rcond` is set to the machine precision of the dtype of `A`.
#'
#' This function returns the solution to the problem and some extra information in a list of
#' four tensors `(solution, residuals, rank, singular_values)`. For inputs `A`, `B`
#' of shape `(*, m, n)`, `(*, m, k)` respectively, it cointains
#' - `solution`: the least squares solution. It has shape `(*, n, k)`.
#' - `residuals`: the squared residuals of the solutions, that is, \eqn{\|AX - B\|_F^2}.
#' It has shape equal to the batch dimensions of `A`.
#' It is computed when `m > n` and every matrix in `A` is full-rank,
#' otherwise, it is an empty tensor.
#' If `A` is a batch of matrices and any matrix in the batch is not full rank,
#' then an empty tensor is returned. This behavior may change in a future PyTorch release.
#' - `rank`: tensor of ranks of the matrices in `A`.
#' It has shape equal to the batch dimensions of `A`.
#' It is computed when `driver` is one of (`'gelsy'`, `'gelsd'`, `'gelss'`),
#' otherwise it is an empty tensor.
#' - `singular_values`: tensor of singular values of the matrices in `A`.
#' It has shape `(*, min(m, n))`.
#' It is computed when `driver` is one of (`'gelsd'`, `'gelss'`),
#' otherwise it is an empty tensor.
#'
#' @note
#' This function computes `X = A$pinverse() %*% B` in a faster and
#' more numerically stable way than performing the computations separately.
#'
#' @section Warning:
#' The default value of `rcond` may change in a future PyTorch release.
#' It is therefore recommended to use a fixed value to avoid potential
#' breaking changes.
#'
#' @param A (Tensor): lhs tensor of shape `(*, m, n)` where `*` is zero or more batch dimensions.
#' @param B (Tensor): rhs tensor of shape `(*, m, k)` where `*` is zero or more batch dimensions.
#' @param rcond (float, optional): used to determine the effective rank of `A`.
#' If `rcond = NULL`, `rcond` is set to the machine
#' precision of the dtype of `A` times `max(m, n)`. Default: `NULL`.
#' @param ... currently unused.
#' @param driver (str, optional): name of the LAPACK/MAGMA method to be used.
#' If `NULL`, `'gelsy'` is used for CPU inputs and `'gels'` for CUDA inputs.
#' Default: `NULL`.
#'
#' @returns
#' A list `(solution, residuals, rank, singular_values)`.
#'
#' @examples
#' A <- torch_tensor(rbind(c(10, 2, 3), c(3, 10, 5), c(5, 6, 12)))$unsqueeze(1) # shape (1, 3, 3)
#' B <- torch_stack(list(rbind(c(2, 5, 1), c(3, 2, 1), c(5, 1, 9)),
#' rbind(c(4, 2, 9), c(2, 0, 3), c(2, 5, 3))), dim = 1) # shape (2, 3, 3)
#' X <- linalg_lstsq(A, B)$solution # A is broadcasted to shape (2, 3, 3)
#'
#' @family linalg
#' @export
linalg_lstsq <- function(A, B, rcond = NULL, ..., driver = NULL) {
ellipsis::check_dots_empty()
args <- list(
self = A,
b = B
)
if (is.null(driver)) {
if (!is_torch_tensor(A) || is_cpu_device(A$device))
driver <- "gelsy"
else
driver <- "gels"
}
args$driver <- driver
if (!is.null(rcond))
args$rcond <- rcond
res <- do.call(torch_linalg_lstsq, args)
res <- setNames(res, c("solution", "residuals", "rank", "singular_values"))
res
}
#' Computes the inverse of a square matrix if it exists.
#'
#' Throws a `runtime_error` if the matrix is not invertible.
#'
#' Letting \eqn{\mathbb{K}} be \eqn{\mathbb{R}} or \eqn{\mathbb{C}},
#' for a matrix \eqn{A \in \mathbb{K}^{n \times n}},
#' its **inverse matrix** \eqn{A^{-1} \in \mathbb{K}^{n \times n}} (if it exists) is defined as
#'
#' \Sexpr[results=rd, stage=build]{katex::math_to_rd("
#' A^{-1}A = AA^{-1} = \\\\mathrm{I}_n
#' ")}
#' where \eqn{\mathrm{I}_n} is the `n`-dimensional identity matrix.
#'
#' The inverse matrix exists if and only if \eqn{A} is invertible. In this case,
#' the inverse is unique.
#' Supports input of float, double, cfloat and cdouble dtypes.
#' Also supports batches of matrices, and if `A` is a batch of matrices
#' then the output has the same batch dimensions.
#'
#' Consider using [linalg_solve()] if possible for multiplying a matrix on the left by
#' the inverse, as `linalg_solve(A, B) == A$inv() %*% B`
#' It is always prefered to use [linalg_solve()] when possible, as it is faster and more
#' numerically stable than computing the inverse explicitly.
#'
#' @seealso
#' [linalg_pinv()] computes the pseudoinverse (Moore-Penrose inverse) of matrices
#' of any shape.
#' [linalg_solve()] computes `A$inv() %*% B` with a
#' numerically stable algorithm.
#'
#'
#' @param A (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions
#' consisting of invertible matrices.
#'
#' @examples
#' A <- torch_randn(4, 4)
#' linalg_inv(A)
#'
#' @family linalg
#' @export
linalg_inv <- function(A) {
torch_linalg_inv(self = A)
}
#' Computes the pseudoinverse (Moore-Penrose inverse) of a matrix.
#'
#' The pseudoinverse may be `defined algebraically`_
#' but it is more computationally convenient to understand it `through the SVD`_
#' Supports input of float, double, cfloat and cdouble dtypes.
#' Also supports batches of matrices, and if `A` is a batch of matrices then
#' the output has the same batch dimensions.
#'
#' If `hermitian= TRUE`, `A` is assumed to be Hermitian if complex or
#' symmetric if real, but this is not checked internally. Instead, just the lower
#' triangular part of the matrix is used in the computations.
#' The singular values (or the norm of the eigenvalues when `hermitian= TRUE`)
#' that are below the specified `rcond` threshold are treated as zero and discarded
#' in the computation.
#'
#' @note This function uses [linalg_svd()] if `hermitian= FALSE` and
#' [linalg_eigh()] if `hermitian= TRUE`.
#' For CUDA inputs, this function synchronizes that device with the CPU.
#'
#' @note
#' Consider using [linalg_lstsq()] if possible for multiplying a matrix on the left by
#' the pseudoinverse, as `linalg_lstsq(A, B)$solution == A$pinv() %*% B`
#'
#' It is always prefered to use [linalg_lstsq()] when possible, as it is faster and more
#' numerically stable than computing the pseudoinverse explicitly.
#'
#' @seealso
#' - [linalg_inv()] computes the inverse of a square matrix.
#' - [linalg_lstsq()] computes `A$pinv() %*% B` with a
#' numerically stable algorithm.
#'
#' @param A (Tensor): tensor of shape `(*, m, n)` where `*` is zero or more batch dimensions.
#' @param rcond (float or Tensor, optional): the tolerance value to determine when is a singular value zero
#' If it is a `torch_Tensor`, its shape must be
#' broadcastable to that of the singular values of
#' `A` as returned by [linalg_svd()].
#' Default: `1e-15`.
#' @param hermitian (bool, optional): indicates whether `A` is Hermitian if complex
#' or symmetric if real. Default: `FALSE`.
#'
#' @examples
#' A <- torch_randn(3, 5)
#' linalg_pinv(A)
#'
#' @family linalg
#' @export
linalg_pinv <- function(A, rcond = 1e-15, hermitian=FALSE) {
out <- torch_linalg_pinv(A, rcond = rcond, hermitian = hermitian)
if (length(dim(out)) != length(dim(A)))
out <- out$squeeze(1)
out
}
#' Computes the `n`-th power of a square matrix for an integer `n`.
#'
#' Supports input of float, double, cfloat and cdouble dtypes.
#' Also supports batches of matrices, and if `A` is a batch of matrices then
#' the output has the same batch dimensions.
#'
#' If `n=0`, it returns the identity matrix (or batch) of the same shape
#' as `A`. If `n` is negative, it returns the inverse of each matrix
#' (if invertible) raised to the power of `abs(n)`.
#'
#' @seealso
#' [linalg_solve()] computes `A$inverse() %*% B` with a
#' numerically stable algorithm.
#'
#'
#' @param A (Tensor): tensor of shape `(*, m, m)` where `*` is zero or more batch dimensions.
#' @param n (int): the exponent.
#'
#' @examples
#' A <- torch_randn(3, 3)
#' linalg_matrix_power(A, 0)
#'
#' @family linalg
#' @export
linalg_matrix_power <- function(A, n) {
torch_linalg_matrix_power(A, n = n)
}
#' Efficiently multiplies two or more matrices
#'
#' Efficiently multiplies two or more matrices by reordering the multiplications so that
#' the fewest arithmetic operations are performed.
#'
#' Supports inputs of `float`, `double`, `cfloat` and `cdouble` dtypes.
#' This function does not support batched inputs.
#'
#' Every tensor in `tensors` must be 2D, except for the first and last which
#' may be 1D. If the first tensor is a 1D vector of shape `(n,)` it is treated as a row vector
#' of shape `(1, n)`, similarly if the last tensor is a 1D vector of shape `(n,)` it is treated
#' as a column vector of shape `(n, 1)`.
#'
#' If the first and last tensors are matrices, the output will be a matrix.
#' However, if either is a 1D vector, then the output will be a 1D vector.
#' @note This function is implemented by chaining [torch_mm()] calls after
#' computing the optimal matrix multiplication order.
#'
#' @note The cost of multiplying two matrices with shapes `(a, b)` and `(b, c)` is
#' `a * b * c`. Given matrices `A`, `B`, `C` with shapes `(10, 100)`,
#' `(100, 5)`, `(5, 50)` respectively, we can calculate the cost of different
#' multiplication orders as follows:
#'
#' \Sexpr[results=rd, stage=build]{katex::math_to_rd("
#' \\\\begin{align*}
#' \\\\operatorname{cost}((AB)C) &= 10 \\\\times 100 \\\\times 5 + 10 \\\\times 5 \\\\times 50 = 7500 \\\\
#' \\\\operatorname{cost}(A(BC)) &= 10 \\\\times 100 \\\\times 50 + 100 \\\\times 5 \\\\times 50 = 75000
#' \\\\end{align*}
#' ")}
#'
#' In this case, multiplying `A` and `B` first followed by `C` is 10 times faster.
#'
#'
#' @param tensors (`Sequence[Tensor]`): two or more tensors to multiply. The first and last
#' tensors may be 1D or 2D. Every other tensor must be 2D.
#'
#' @examples
#'
#' linalg_multi_dot(list(torch_tensor(c(1,2)), torch_tensor(c(2,3))))
#'
#' @family linalg
#' @export
linalg_multi_dot <- function(tensors) {
torch_linalg_multi_dot(tensors)
}
#' Computes the first `n` columns of a product of Householder matrices.
#'
#' Letting \eqn{\mathbb{K}} be \eqn{\mathbb{R}} or \eqn{\mathbb{C}},
#' for a matrix \eqn{V \in \mathbb{K}^{m \times n}} with columns \eqn{v_i \in \mathbb{K}^m}
#' with \eqn{m \geq n} and a vector \eqn{\tau \in \mathbb{K}^k} with \eqn{k \leq n},
#' this function computes the first \eqn{n} columns of the matrix
#'
#' \Sexpr[results=rd, stage=build]{katex::math_to_rd("
#' H_1H_2 ... H_k \\\\qquad with \\\\qquad H_i = \\\\mathrm{I}_m - \\\\tau_i v_i v_i^{H}
#' ")}
#'
#' where \eqn{\mathrm{I}_m} is the `m`-dimensional identity matrix and
#' \eqn{v^{H}} is the conjugate transpose when \eqn{v} is complex, and the transpose when \eqn{v} is real-valued.
#' See [Representation of Orthogonal or Unitary Matrices](https://www.netlib.org/lapack/lug/node128.html) for
#' further details.
#'
#' Supports inputs of float, double, cfloat and cdouble dtypes.
#' Also supports batches of matrices, and if the inputs are batches of matrices then
#' the output has the same batch dimensions.
#' @note This function only uses the values strictly below the main diagonal of `A`.
#' The other values are ignored.
#'
#' @seealso
#' - [torch_geqrf()] can be used together with this function to form the `Q` from the
#' [linalg_qr()] decomposition.
#'
#' - [torch_ormqr()] is a related function that computes the matrix multiplication
#' of a product of Householder matrices with another matrix.
#' However, that function is not supported by autograd.
#'
#' @param A (Tensor): tensor of shape `(*, m, n)` where `*` is zero or more batch dimensions.
#' @param tau (Tensor): tensor of shape `(*, k)` where `*` is zero or more batch dimensions.
#'
#' @examples
#' A <- torch_randn(2, 2)
#' h_tau <- torch_geqrf(A)
#' Q <- linalg_householder_product(h_tau[[1]], h_tau[[2]])
#' torch_allclose(Q, linalg_qr(A)[[1]])
#'
#' @family linalg
#' @export
linalg_householder_product <- function(A, tau) {
torch_linalg_householder_product(A, tau)
}
#' Computes the multiplicative inverse of [torch_tensordot()]
#'
#' If `m` is the product of the first `ind` dimensions of `A` and `n` is the product of
#' the rest of the dimensions, this function expects `m` and `n` to be equal.
#' If this is the case, it computes a tensor `X` such that
#' `tensordot(A, X, ind)` is the identity matrix in dimension `m`.
#'
#' Supports input of float, double, cfloat and cdouble dtypes.
#'
#' @note Consider using [linalg_tensorsolve()] if possible for multiplying a tensor on the left
#' by the tensor inverse as `linalg_tensorsolve(A, B) == torch_tensordot(linalg_tensorinv(A), B))`
#'
#' It is always prefered to use [linalg_tensorsolve()] when possible, as it is faster and more
#' numerically stable than computing the pseudoinverse explicitly.
#'
#' @seealso
#' - [linalg_tensorsolve()] computes `torch_tensordot(linalg_tensorinv(A), B))`.
#'
#' @param A (Tensor): tensor to invert.
#' @param ind (int): index at which to compute the inverse of [torch_tensordot()]. Default: `3`.
#'
#' @examples
#' A <- torch_eye(4 * 6)$reshape(c(4, 6, 8, 3))
#' Ainv <- linalg_tensorinv(A, ind=3)
#' Ainv$shape
#' B <- torch_randn(4, 6)
#' torch_allclose(torch_tensordot(Ainv, B), linalg_tensorsolve(A, B))
#'
#' A <- torch_randn(4, 4)
#' Atensorinv<- linalg_tensorinv(A, 2)
#' Ainv <- linalg_inv(A)
#' torch_allclose(Atensorinv, Ainv)
#'
#' @family linalg
#' @export
linalg_tensorinv <- function(A, ind = 3L) {
torch_linalg_tensorinv(A, ind = ind - 1L)
}
#' Computes the solution `X` to the system `torch_tensordot(A, X) = B`.
#'
#' If `m` is the product of the first `B`\ `.ndim` dimensions of `A` and
#' `n` is the product of the rest of the dimensions, this function expects `m` and `n` to be equal.
#' The returned tensor `x` satisfies
#' `tensordot(A, x, dims=x$ndim) == B`.
#'
#' If `dims` is specified, `A` will be reshaped as
#' `A = movedim(A, dims, seq(len(dims) - A$ndim + 1, 0))`
#'
#' Supports inputs of float, double, cfloat and cdouble dtypes.
#'
#' @seealso
#' - [linalg_tensorinv()] computes the multiplicative inverse of
#' [torch_tensordot()].
#'
#' @param A (Tensor): tensor to solve for.
#' @param B (Tensor): the solution
#' @param dims (Tuple[int], optional): dimensions of `A` to be moved.
#' If `NULL`, no dimensions are moved. Default: `NULL`.
#'
#' @examples
#' A <- torch_eye(2 * 3 * 4)$reshape(c(2 * 3, 4, 2, 3, 4))
#' B <- torch_randn(2 * 3, 4)
#' X <- linalg_tensorsolve(A, B)
#' X$shape
#' torch_allclose(torch_tensordot(A, X, dims=X$ndim), B)
#'
#' A <- torch_randn(6, 4, 4, 3, 2)
#' B <- torch_randn(4, 3, 2)
#' X <- linalg_tensorsolve(A, B, dims=c(1, 3))
#' A <- A$permute(c(2, 4, 5, 1, 3))
#' torch_allclose(torch_tensordot(A, X, dims=X$ndim), B, atol=1e-6)
#'
#' @family linalg
#' @export
linalg_tensorsolve <- function(A, B, dims = NULL) {
torch_linalg_tensorsolve(A, B, dims)
}
#' Computes the Cholesky decomposition of a complex Hermitian or real
#' symmetric positive-definite matrix.
#'
#' This function skips the (slow) error checking and error message construction
#' of [linalg_cholesky()], instead directly returning the LAPACK
#' error codes as part of a named tuple `(L, info)`. This makes this function
#' a faster way to check if a matrix is positive-definite, and it provides an
#' opportunity to handle decomposition errors more gracefully or performantly
#' than [linalg_cholesky()] does.
#' Supports input of float, double, cfloat and cdouble dtypes.
#' Also supports batches of matrices, and if `A` is a batch of matrices then
#' the output has the same batch dimensions.
#' If `A` is not a Hermitian positive-definite matrix, or if it's a batch of matrices
#' and one or more of them is not a Hermitian positive-definite matrix,
#' then `info` stores a positive integer for the corresponding matrix.
#' The positive integer indicates the order of the leading minor that is not positive-definite,
#' and the decomposition could not be completed.
#' `info` filled with zeros indicates that the decomposition was successful.
#' If `check_errors=TRUE` and `info` contains positive integers, then a RuntimeError is thrown.
#' @note If `A` is on a CUDA device, this function may synchronize that device with the CPU.
#' @note This function is "experimental" and it may change in a future PyTorch release.
#' @seealso
#' [linalg_cholesky()] is a NumPy compatible variant that always checks for errors.
#'
#' @param A (Tensor): the Hermitian `n \times n` matrix or the batch of such matrices of size
#' `(*, n, n)` where `*` is one or more batch dimensions.
#' @param check_errors (bool, optional): controls whether to check the content of `infos`. Default: `FALSE`.
#'
#' @examples
#' A <- torch_randn(2, 2)
#' out = linalg_cholesky_ex(A)
#' out
#'
#' @family linalg
#' @export
linalg_cholesky_ex <- function(A, check_errors = FALSE) {
setNames(torch_linalg_cholesky_ex(A, check_errors = check_errors),
c("L", "info"))
}
#' Computes the inverse of a square matrix if it is invertible.
#'
#' Returns a namedtuple `(inverse, info)`. `inverse` contains the result of
#' inverting `A` and `info` stores the LAPACK error codes.
#' If `A` is not an invertible matrix, or if it's a batch of matrices
#' and one or more of them is not an invertible matrix,
#' then `info` stores a positive integer for the corresponding matrix.
#' The positive integer indicates the diagonal element of the LU decomposition of
#' the input matrix that is exactly zero.
#' `info` filled with zeros indicates that the inversion was successful.
#' If `check_errors=TRUE` and `info` contains positive integers, then a RuntimeError is thrown.
#' Supports input of float, double, cfloat and cdouble dtypes.
#' Also supports batches of matrices, and if `A` is a batch of matrices then
#' the output has the same batch dimensions.
#' @note
#' If `A` is on a CUDA device then this function may synchronize
#' that device with the CPU.
#' @note This function is "experimental" and it may change in a future PyTorch release.
#'
#' @seealso
#' [linalg_inv()] is a NumPy compatible variant that always checks for errors.
#'
#' @param A (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions
#' consisting of square matrices.
#' @param check_errors (bool, optional): controls whether to check the content of `info`. Default: `FALSE`.
#'
#' @examples
#' A <- torch_randn(3, 3)
#' out <- linalg_inv_ex(A)
#'
#' @family linalg
#' @importFrom stats setNames
#' @export
linalg_inv_ex <- function(A, check_errors = FALSE) {
setNames(
torch_linalg_inv_ex(A, check_errors = check_errors),
c("inverse", "info")
)
}
|
# --------------------------------------------------------------------------- #
# ASCE-ASME paper - code for Fig 1
# --------------------------------------------------------------------------- #
source("weibull-sys-functions.R")
source("plotdefs.R")
# example for Fig 1: assume mean ft = 9
beta <- 2
y0 <- failuretolambda(9, beta)
n0 <- 2
t1 <- 1
t2 <- 2
n2 <- n0 + 2
y2 <- (n0*y0 + t1^beta + t2^beta)/n2
y2
eft2 <- lambdatofailure(y2, beta)
eft2
invgammavar <- function(n0, y0)
y0^2/(1-1/n0)
invgammasd <- function(n0, y0)
sqrt(y0^2/(1-1/n0))
invgammavar(n2, y2)
invgammasd(n2, y2)
invgammasd(n0, y0)
failuretolambda(7)
(2*failuretolambda(7) + 6^2 + 7^2)/4
# the plot
xseq <- seq(0.1, 200, length.out = 300)
fig1df <- data.frame(x = rep(xseq, 2),
y = c(cpinvgamma(xseq, n0, y0), cpinvgamma(xseq, n2, y2)),
p = factor(rep(c("Prior", "Posterior"), each = length(xseq)),
levels = c("Prior", "Posterior")))
fig1 <- ggplot(fig1df, aes(x=x)) + geom_line(aes(y=y, group=p, colour=p)) + ylim(0, 1) +
theme_bw() + ijarcol + rightlegend + xlab(expression(lambda)) + ylab(expression(F(lambda)))
#setEPS()
#postscript("fig1.eps",width=5,height=3)
pdf("fig1-new.pdf",width=6,height=3)
#showtext.begin()
fig1
#showtext.end()
dev.off()
system("gs -o fig1.pdf -sDEVICE=pdfwrite -dEmbedAllFonts=true -dPDFSETTINGS=/prepress fig1-new.pdf")
#embedFonts("fig1.pdf")
#
|
/figure1.R
|
no_license
|
geeeero/asce-asme2016
|
R
| false
| false
| 1,438
|
r
|
# --------------------------------------------------------------------------- #
# ASCE-ASME paper - code for Fig 1
# --------------------------------------------------------------------------- #
source("weibull-sys-functions.R")
source("plotdefs.R")
# example for Fig 1: assume mean ft = 9
beta <- 2
y0 <- failuretolambda(9, beta)
n0 <- 2
t1 <- 1
t2 <- 2
n2 <- n0 + 2
y2 <- (n0*y0 + t1^beta + t2^beta)/n2
y2
eft2 <- lambdatofailure(y2, beta)
eft2
invgammavar <- function(n0, y0)
y0^2/(1-1/n0)
invgammasd <- function(n0, y0)
sqrt(y0^2/(1-1/n0))
invgammavar(n2, y2)
invgammasd(n2, y2)
invgammasd(n0, y0)
failuretolambda(7)
(2*failuretolambda(7) + 6^2 + 7^2)/4
# the plot
xseq <- seq(0.1, 200, length.out = 300)
fig1df <- data.frame(x = rep(xseq, 2),
y = c(cpinvgamma(xseq, n0, y0), cpinvgamma(xseq, n2, y2)),
p = factor(rep(c("Prior", "Posterior"), each = length(xseq)),
levels = c("Prior", "Posterior")))
fig1 <- ggplot(fig1df, aes(x=x)) + geom_line(aes(y=y, group=p, colour=p)) + ylim(0, 1) +
theme_bw() + ijarcol + rightlegend + xlab(expression(lambda)) + ylab(expression(F(lambda)))
#setEPS()
#postscript("fig1.eps",width=5,height=3)
pdf("fig1-new.pdf",width=6,height=3)
#showtext.begin()
fig1
#showtext.end()
dev.off()
system("gs -o fig1.pdf -sDEVICE=pdfwrite -dEmbedAllFonts=true -dPDFSETTINGS=/prepress fig1-new.pdf")
#embedFonts("fig1.pdf")
#
|
# Loading the gapminder and dplyr packages
#
# Before you can work with the gapminder dataset, you'll need to load two R
# packages that contain the tools for working with it, then display the gapminder
# dataset so that you can see what it contains.
#
# To your right, you'll see two windows inside which you can enter code:
# The script.R window, and the R Console. All of your code to solve each
# exercise must go inside script.R.
#
# If you hit Submit Answer, your R script is executed and the output is shown
# in the R Console. DataCamp checks whether your submission is correct and gives
# you feedback. You can hit Submit Answer as often as you want. If you're stuck,
# you can ask for a hint or a solution.
#
# You can use the R Console interactively by simply typing R code and hitting
# Enter. When you work in the console directly, your code will not be checked
# for correctness so it is a great way to experiment and explore.
install.packages("gapminder")
library(gapminder)
install.packages("dplyr")
library(dplyr)
View(gapminder)
#How many observations (rows) are in the dataset?
summary(gapminder)
dim(gapminder)
View(gapminder)
# Add a filter() line after the pipe (%>%) to extract only the observations
# from the year 1957. Remember that you use == to compare two values.
# Filter the gapminder dataset for the year 1957
gapminder %>%
filter(year==1957)
# Filter the gapminder data to retrieve only the observation from China in
# the year 2002.
gapminder %>%
filter(country=="China", year == 2002)
# Sort in ascending order of lifeExp
gapminder %>%
arrange(lifeExp)
# Sort in descending order of lifeExp
gapminder %>%
arrange(desc(lifeExp))
# Filter for the year 1957, then arrange in descending order of population
gapminder %>%
filter(year == 1957) %>%
arrange(desc(pop))
# Use mutate() to change the existing lifeExp column, by multiplying it
# by 12: 12 * lifeExp.
gapminder %>%
mutate(lifeExp = 12*lifeExp)
# Use mutate() to add a new column, called lifeExpMonths,
# calculated as 12 * lifeExp.
gapminder %>%
mutate(lifeExpMonths = 12 * lifeExp)
gapminder %>%
mutate(lifeExpMonths = 12*lifeExp) %>%
filter(year == 2007) %>%
arrange(desc(lifeExpMonths))
# Load the ggplot2 package after the gapminder and dplyr packages.
#
# Filter gapminder for observations from the year 1952, and assign it to a
# new dataset gapminder_1952 using the assignment operator (<-).
install.packages("ggplot2")
library(ggplot2)
gapminder_1952 <- gapminder %>% filter(year == 1952)
head(gapminder_1952)
# Change to put pop on the x-axis and gdpPercap on the y-axis
ggplot(gapminder_1952, aes(x = pop, y = gdpPercap)) +
geom_point()
# Create a scatter plot with pop on the x-axis and lifeExp on the y-axis
ggplot(gapminder_1952, aes(x = pop, y = lifeExp)) + geom_point()
# Change the existing scatter plot (code provided) to put the x-axis
# (representing population) on a log scale.
gapminder_1952 <- gapminder %>%
filter(year == 1952)
# Change this plot to put the x-axis on a log scale
ggplot(gapminder_1952, aes(x = pop, y = lifeExp)) +
geom_point()+scale_x_log10()
# Scatter plot comparing pop and gdpPercap, with both axes on a log scale
ggplot(gapminder_1952, aes(x = pop, y = gdpPercap))+geom_point()+
scale_x_log10()+scale_y_log10()
# Scatter plot comparing pop and lifeExp, with color representing continent
ggplot(gapminder_1952, aes(x = pop, y = lifeExp, col = continent))+ geom_point() +
scale_x_log10()
ggplot(gapminder_1952, aes(x = pop, y = lifeExp, col = continent))+ geom_point() +
scale_x_log10()
ggplot(gapminder_1952, aes(x = pop, y = lifeExp, color = continent,size = gdpPercap)) +
geom_point() +
scale_x_log10()
# Create a scatter plot of gapminder_1952 with the x-axis representing population
# (pop), the y-axis representing life expectancy (lifeExp), and faceted to have one
# subplot per continent (continent). Put the x-axis on a log scale.
ggplot(gapminder_1952, aes(x = pop, y = lifeExp)) + geom_point() + scale_x_log10() + facet_wrap(~continent)
# Scatter plot comparing gdpPercap and lifeExp, with color representing continent
# and size representing population, faceted by year
ggplot(gapminder, aes(x=gdpPercap, y = lifeExp, col = continent,
size = pop)) + geom_point() + scale_x_log10() + facet_wrap(~year)
# Use the median() function within a summarize() to find the median life expectancy.
# Save it into a column called medianLifeExp.
gapminder %>%
summarize(medianlifeExp = median(lifeExp))
# Filter for 1957 then summarize the median life expectancy
gapminder %>%
filter(year == 1957) %>%
summarize(medianlifeExp = median(lifeExp))
# Filter for 1957 then summarize the median life expectancy and the maximum GDP per
# capita
gapminder %>%
filter(year == 1957)%>%
summarize(medianlifeExp = median(lifeExp), maxGdpPercap = max(gdpPercap))
# Find the median life expectancy (lifeExp) and maximum GDP per capita (gdpPercap)
# within each year, saving them into medianLifeExp and maxGdpPercap, respectively.
gapminder %>%
group_by(year)%>%
summarize(medianLifeExp = median(lifeExp), maxGdpPercap = max(gdpPercap))%>%
arrange(desc(medianLifeExp))
# Filter the gapminder data for the year 1957. Then find the median life expectancy
# (lifeExp) and maximum GDP per capita (gdpPercap) within each continent, saving them
# into medianLifeExp and maxGdpPercap, respectively.
gapminder %>%
filter(year == 1957)%>%
group_by(continent)%>%
summarize(medianLifeExp = median(lifeExp), maxGdpPercap = max(gdpPercap))
# Find the median life expectancy (lifeExp) and maximum GDP per capita (gdpPercap)
# within each combination of continent and year, saving them into medianLifeExp and
# maxGdpPercap, respectively.
gapminder %>%
group_by(continent, year)%>%
summarize(medianLifeExp = median(lifeExp), maxGdpPercap = max(gdpPercap))
# Use the by_year dataset to create a scatter plot showing the change of median life
# expectancy over time, with year on the x-axis and medianLifeExp on the y-axis. Be
# sure to add expand_limits(y = 0) to make sure the plot's y-axis includes zero.
by_year <- gapminder %>%
group_by(year) %>%
summarize(medianLifeExp = median(lifeExp),maxGdpPercap = max(gdpPercap))
ggplot(by_year, aes(x = year, y = medianLifeExp)) + geom_point() + expand_limits(y=0)
# Summarize medianGdpPercap within each continent within each year: by_year_continent
by_year_continent <- gapminder %>%
group_by(year, continent)%>%
summarize(medianGdpPercap = median(gdpPercap))
# Plot the change in medianGdpPercap in each continent over time
ggplot(by_year_continent, aes(x = year, y = medianGdpPercap, col = continent)) + geom_point() + expand_limits(y=0)
# Summarize the median GDP and median life expectancy per continent in 2007
by_continent_2007 <- gapminder %>%
filter(year==2007)%>%
group_by(continent)%>%
summarize(medianLifeExp = median(lifeExp), medianGdpPercap = median(gdpPercap))
# Use a scatter plot to compare the median GDP and median life expectancy
ggplot(by_continent_2007, aes(x = medianGdpPercap, y = medianLifeExp, col = continent)) + geom_point() + expand_limits(y=0)
# Use group_by() and summarize() to find the median GDP per capita within each year,
# calling the output column medianGdpPercap. Use the assignment operator <- to save it
# to a dataset called by_year.
# Use the by_year dataset to create a line plot showing the change in median GDP per
# capita over time. Be sure to use expand_limits(y = 0) to include 0 on the y-axis.
by_year <- gapminder%>%
group_by(year)%>%
summarise(medianGdpPercap = median(gdpPercap))
ggplot(by_year, aes(x = year, y = medianGdpPercap))+ geom_line() + expand_limits(y = 0)
# Use group_by() and summarize() to find the median GDP per capita within each year and
# continent, calling the output column medianGdpPercap. Use the assignment operator
# <- to save it to a dataset called by_year_continent.
# Use the by_year_continent dataset to create a line plot showing the change in median
# GDP per capita over time, with color representing continent. Be sure to use
# expand_limits(y = 0) to include 0 on the y-axis.
# Summarize the median gdpPercap by year & continent, save as by_year_continent
by_year_continent <- gapminder %>%
group_by(year, continent)%>%
summarize(medianGdpPercap = median(gdpPercap))
# Create a line plot showing the change in medianGdpPercap by continent over time
ggplot(by_year_continent, aes(x = year, y = medianGdpPercap, col = continent)) + geom_line() + expand_limits(y=0)
# Summarize the median gdpPercap by continent in 1952
by_continent <- gapminder%>%
filter(year == 1952)%>%
group_by(continent)%>%
summarize(medianGdpPercap = median(gdpPercap))
# Create a bar plot showing medianGdp by continent
ggplot(by_continent, aes(x = continent, y = medianGdpPercap)) + geom_col()
# Filter for observations in the Oceania continent in 1952
oceania_1952 <- gapminder%>%
filter(year==1952 & continent=='Oceania')
oceania_1952
# Create a bar plot of gdpPercap by country
ggplot(oceania_1952, aes(x = country, y = gdpPercap)) + geom_col()
# Use the gapminder_1952 dataset to create a histogram of country population
# (pop_by_mil) in the year 1952. Inside the histogram geom, set the number of
# bins to 50.
library(gapminder)
library(dplyr)
library(ggplot2)
gapminder_1952 <- gapminder %>%
filter(year == 1952) %>%
mutate(pop_by_mil = pop / 1000000)
# Create a histogram of population (pop_by_mil)
ggplot(gapminder_1952, aes(x=pop_by_mil)) + geom_histogram(bins = 50)
#Use the gapminder_1952 dataset (code is provided) to create a histogram
#of country population (pop) in the year 1952, putting the x-axis on a log
#scale with scale_x_log10().
gapminder_1952 <- gapminder %>%
filter(year == 1952)
# Create a histogram of population (pop), with x on a log scale
ggplot(gapminder_1952, aes(x=pop)) + geom_histogram() + scale_x_log10()
#Use the gapminder_1952 dataset (code is provided) to create a boxplot
#comparing GDP per capita (gdpPercap) among continents. Put the y-axis on a
#log scale with scale_y_log10().
gapminder_1952 <- gapminder %>%
filter(year == 1952)
# Create a boxplot comparing gdpPercap among continents
ggplot(gapminder_1952, aes(x = continent, y = gdpPercap)) + geom_boxplot() + scale_y_log10()
# Add a title to this graph: "Comparing GDP per capita across continents"
ggplot(gapminder_1952, aes(x = continent, y = gdpPercap)) +
geom_boxplot() +
scale_y_log10() +
ggtitle("Comparing GDP per capita across continents")
|
/Introduction to Tidyverse.R
|
no_license
|
pranjalshandilya/DataCamp-R-Data-Scientist
|
R
| false
| false
| 11,079
|
r
|
# Loading the gapminder and dplyr packages
#
# Before you can work with the gapminder dataset, you'll need to load two R
# packages that contain the tools for working with it, then display the gapminder
# dataset so that you can see what it contains.
#
# To your right, you'll see two windows inside which you can enter code:
# The script.R window, and the R Console. All of your code to solve each
# exercise must go inside script.R.
#
# If you hit Submit Answer, your R script is executed and the output is shown
# in the R Console. DataCamp checks whether your submission is correct and gives
# you feedback. You can hit Submit Answer as often as you want. If you're stuck,
# you can ask for a hint or a solution.
#
# You can use the R Console interactively by simply typing R code and hitting
# Enter. When you work in the console directly, your code will not be checked
# for correctness so it is a great way to experiment and explore.
install.packages("gapminder")
library(gapminder)
install.packages("dplyr")
library(dplyr)
View(gapminder)
#How many observations (rows) are in the dataset?
summary(gapminder)
dim(gapminder)
View(gapminder)
# Add a filter() line after the pipe (%>%) to extract only the observations
# from the year 1957. Remember that you use == to compare two values.
# Filter the gapminder dataset for the year 1957
gapminder %>%
filter(year==1957)
# Filter the gapminder data to retrieve only the observation from China in
# the year 2002.
gapminder %>%
filter(country=="China", year == 2002)
# Sort in ascending order of lifeExp
gapminder %>%
arrange(lifeExp)
# Sort in descending order of lifeExp
gapminder %>%
arrange(desc(lifeExp))
# Filter for the year 1957, then arrange in descending order of population
gapminder %>%
filter(year == 1957) %>%
arrange(desc(pop))
# Use mutate() to change the existing lifeExp column, by multiplying it
# by 12: 12 * lifeExp.
gapminder %>%
mutate(lifeExp = 12*lifeExp)
# Use mutate() to add a new column, called lifeExpMonths,
# calculated as 12 * lifeExp.
gapminder %>%
mutate(lifeExpMonths = 12 * lifeExp)
gapminder %>%
mutate(lifeExpMonths = 12*lifeExp) %>%
filter(year == 2007) %>%
arrange(desc(lifeExpMonths))
# Load the ggplot2 package after the gapminder and dplyr packages.
#
# Filter gapminder for observations from the year 1952, and assign it to a
# new dataset gapminder_1952 using the assignment operator (<-).
install.packages("ggplot2")
library(ggplot2)
gapminder_1952 <- gapminder %>% filter(year == 1952)
head(gapminder_1952)
# Change to put pop on the x-axis and gdpPercap on the y-axis
ggplot(gapminder_1952, aes(x = pop, y = gdpPercap)) +
geom_point()
# Create a scatter plot with pop on the x-axis and lifeExp on the y-axis
ggplot(gapminder_1952, aes(x = pop, y = lifeExp)) + geom_point()
# Change the existing scatter plot (code provided) to put the x-axis
# (representing population) on a log scale.
gapminder_1952 <- gapminder %>%
filter(year == 1952)
# Change this plot to put the x-axis on a log scale
ggplot(gapminder_1952, aes(x = pop, y = lifeExp)) +
geom_point()+scale_x_log10()
# Scatter plot comparing pop and gdpPercap, with both axes on a log scale
ggplot(gapminder_1952, aes(x = pop, y = gdpPercap))+geom_point()+
scale_x_log10()+scale_y_log10()
# Scatter plot comparing pop and lifeExp, with color representing continent
ggplot(gapminder_1952, aes(x = pop, y = lifeExp, col = continent))+ geom_point() +
scale_x_log10()
ggplot(gapminder_1952, aes(x = pop, y = lifeExp, col = continent))+ geom_point() +
scale_x_log10()
ggplot(gapminder_1952, aes(x = pop, y = lifeExp, color = continent,size = gdpPercap)) +
geom_point() +
scale_x_log10()
# Create a scatter plot of gapminder_1952 with the x-axis representing population
# (pop), the y-axis representing life expectancy (lifeExp), and faceted to have one
# subplot per continent (continent). Put the x-axis on a log scale.
ggplot(gapminder_1952, aes(x = pop, y = lifeExp)) + geom_point() + scale_x_log10() + facet_wrap(~continent)
# Scatter plot comparing gdpPercap and lifeExp, with color representing continent
# and size representing population, faceted by year
ggplot(gapminder, aes(x=gdpPercap, y = lifeExp, col = continent,
size = pop)) + geom_point() + scale_x_log10() + facet_wrap(~year)
# Use the median() function within a summarize() to find the median life expectancy.
# Save it into a column called medianLifeExp.
gapminder %>%
summarize(medianlifeExp = median(lifeExp))
# Filter for 1957 then summarize the median life expectancy
gapminder %>%
filter(year == 1957) %>%
summarize(medianlifeExp = median(lifeExp))
# Filter for 1957 then summarize the median life expectancy and the maximum GDP per
# capita
gapminder %>%
filter(year == 1957)%>%
summarize(medianlifeExp = median(lifeExp), maxGdpPercap = max(gdpPercap))
# Find the median life expectancy (lifeExp) and maximum GDP per capita (gdpPercap)
# within each year, saving them into medianLifeExp and maxGdpPercap, respectively.
gapminder %>%
group_by(year)%>%
summarize(medianLifeExp = median(lifeExp), maxGdpPercap = max(gdpPercap))%>%
arrange(desc(medianLifeExp))
# Filter the gapminder data for the year 1957. Then find the median life expectancy
# (lifeExp) and maximum GDP per capita (gdpPercap) within each continent, saving them
# into medianLifeExp and maxGdpPercap, respectively.
gapminder %>%
filter(year == 1957)%>%
group_by(continent)%>%
summarize(medianLifeExp = median(lifeExp), maxGdpPercap = max(gdpPercap))
# Find the median life expectancy (lifeExp) and maximum GDP per capita (gdpPercap)
# within each combination of continent and year, saving them into medianLifeExp and
# maxGdpPercap, respectively.
gapminder %>%
group_by(continent, year)%>%
summarize(medianLifeExp = median(lifeExp), maxGdpPercap = max(gdpPercap))
# Use the by_year dataset to create a scatter plot showing the change of median life
# expectancy over time, with year on the x-axis and medianLifeExp on the y-axis. Be
# sure to add expand_limits(y = 0) to make sure the plot's y-axis includes zero.
by_year <- gapminder %>%
group_by(year) %>%
summarize(medianLifeExp = median(lifeExp),maxGdpPercap = max(gdpPercap))
ggplot(by_year, aes(x = year, y = medianLifeExp)) + geom_point() + expand_limits(y=0)
# Summarize medianGdpPercap within each continent within each year: by_year_continent
by_year_continent <- gapminder %>%
group_by(year, continent)%>%
summarize(medianGdpPercap = median(gdpPercap))
# Plot the change in medianGdpPercap in each continent over time
ggplot(by_year_continent, aes(x = year, y = medianGdpPercap, col = continent)) + geom_point() + expand_limits(y=0)
# Summarize the median GDP and median life expectancy per continent in 2007
by_continent_2007 <- gapminder %>%
filter(year==2007)%>%
group_by(continent)%>%
summarize(medianLifeExp = median(lifeExp), medianGdpPercap = median(gdpPercap))
# Use a scatter plot to compare the median GDP and median life expectancy
ggplot(by_continent_2007, aes(x = medianGdpPercap, y = medianLifeExp, col = continent)) + geom_point() + expand_limits(y=0)
# Use group_by() and summarize() to find the median GDP per capita within each year,
# calling the output column medianGdpPercap. Use the assignment operator <- to save it
# to a dataset called by_year.
# Use the by_year dataset to create a line plot showing the change in median GDP per
# capita over time. Be sure to use expand_limits(y = 0) to include 0 on the y-axis.
by_year <- gapminder%>%
group_by(year)%>%
summarise(medianGdpPercap = median(gdpPercap))
ggplot(by_year, aes(x = year, y = medianGdpPercap))+ geom_line() + expand_limits(y = 0)
# Use group_by() and summarize() to find the median GDP per capita within each year and
# continent, calling the output column medianGdpPercap. Use the assignment operator
# <- to save it to a dataset called by_year_continent.
# Use the by_year_continent dataset to create a line plot showing the change in median
# GDP per capita over time, with color representing continent. Be sure to use
# expand_limits(y = 0) to include 0 on the y-axis.
# Summarize the median gdpPercap by year & continent, save as by_year_continent
by_year_continent <- gapminder %>%
group_by(year, continent)%>%
summarize(medianGdpPercap = median(gdpPercap))
# Create a line plot showing the change in medianGdpPercap by continent over time
ggplot(by_year_continent, aes(x = year, y = medianGdpPercap, col = continent)) + geom_line() + expand_limits(y=0)
# Summarize the median gdpPercap by continent in 1952
by_continent <- gapminder%>%
filter(year == 1952)%>%
group_by(continent)%>%
summarize(medianGdpPercap = median(gdpPercap))
# Create a bar plot showing medianGdp by continent
ggplot(by_continent, aes(x = continent, y = medianGdpPercap)) + geom_col()
# Filter for observations in the Oceania continent in 1952
oceania_1952 <- gapminder%>%
filter(year==1952 & continent=='Oceania')
oceania_1952
# Create a bar plot of gdpPercap by country
ggplot(oceania_1952, aes(x = country, y = gdpPercap)) + geom_col()
# Use the gapminder_1952 dataset to create a histogram of country population
# (pop_by_mil) in the year 1952. Inside the histogram geom, set the number of
# bins to 50.
library(gapminder)
library(dplyr)
library(ggplot2)
gapminder_1952 <- gapminder %>%
filter(year == 1952) %>%
mutate(pop_by_mil = pop / 1000000)
# Create a histogram of population (pop_by_mil)
ggplot(gapminder_1952, aes(x=pop_by_mil)) + geom_histogram(bins = 50)
#Use the gapminder_1952 dataset (code is provided) to create a histogram
#of country population (pop) in the year 1952, putting the x-axis on a log
#scale with scale_x_log10().
gapminder_1952 <- gapminder %>%
filter(year == 1952)
# Create a histogram of population (pop), with x on a log scale
ggplot(gapminder_1952, aes(x=pop)) + geom_histogram() + scale_x_log10()
#Use the gapminder_1952 dataset (code is provided) to create a boxplot
#comparing GDP per capita (gdpPercap) among continents. Put the y-axis on a
#log scale with scale_y_log10().
gapminder_1952 <- gapminder %>%
filter(year == 1952)
# Create a boxplot comparing gdpPercap among continents
ggplot(gapminder_1952, aes(x = continent, y = gdpPercap)) + geom_boxplot() + scale_y_log10()
# Add a title to this graph: "Comparing GDP per capita across continents"
ggplot(gapminder_1952, aes(x = continent, y = gdpPercap)) +
geom_boxplot() +
scale_y_log10() +
ggtitle("Comparing GDP per capita across continents")
|
\name{overallmean}
\alias{overallmean}
\title{
Global mean matrix
}
\description{
This function returns the global mean matrix of an input matrix
}
\usage{
overallmean(x)
}
\arguments{
\item{x}{
The input matrix. Make sure this is a matrix object
}
}
\details{
This function calculates the mean of all cells within the input matrix. If the matrix has dimension m x n, this function will return a m x n matrix, where each cell has the same result, as the average of the mn elements in the corresponding input matrix.
}
\value{
The overall mean matrix
}
\author{
Yao Wang (wang1150@purdue.edu);
Lingsong Zhang (lingsong@purdue.edu)
}
\seealso{
See Also in \code{\link{svd}}, \code{\link{apply}}, \code{\link{columnmean}}, \code{\link{doublemean}}, \code{\link{rowmean}}.
}
\examples{
#generate a random matrix
x<-matrix(rnorm(100), nrow=20);
#calculate the row mean matrix
y<-overallmean(x);
y
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{svd}
\keyword{mean}
\keyword{matrix}% __ONLY ONE__ keyword per line
|
/man/overallmean.Rd
|
no_license
|
cran/svdvisual
|
R
| false
| false
| 1,070
|
rd
|
\name{overallmean}
\alias{overallmean}
\title{
Global mean matrix
}
\description{
This function returns the global mean matrix of an input matrix
}
\usage{
overallmean(x)
}
\arguments{
\item{x}{
The input matrix. Make sure this is a matrix object
}
}
\details{
This function calculates the mean of all cells within the input matrix. If the matrix has dimension m x n, this function will return a m x n matrix, where each cell has the same result, as the average of the mn elements in the corresponding input matrix.
}
\value{
The overall mean matrix
}
\author{
Yao Wang (wang1150@purdue.edu);
Lingsong Zhang (lingsong@purdue.edu)
}
\seealso{
See Also in \code{\link{svd}}, \code{\link{apply}}, \code{\link{columnmean}}, \code{\link{doublemean}}, \code{\link{rowmean}}.
}
\examples{
#generate a random matrix
x<-matrix(rnorm(100), nrow=20);
#calculate the row mean matrix
y<-overallmean(x);
y
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{svd}
\keyword{mean}
\keyword{matrix}% __ONLY ONE__ keyword per line
|
pollutantmean <- function(directory, pollutant, id = 1:332) {
setwd(paste(getwd(), directory, sep = "/"))
temp <- paste(formatC(id, width = 3, flag = "0"), ".csv",
sep = "")
sourceframe <- do.call("rbind", lapply(temp, read.csv, header = TRUE))
ifelse(pollutant == "sulfate", round(mean(sourceframe[, 2],
na.rm = TRUE), 3), round(mean(sourceframe[, 3], na.rm = TRUE),
3))
}
|
/pollutantmean variants pretty/post-5093.R
|
no_license
|
aa989190f363e46d/pm.bench.shiny
|
R
| false
| false
| 417
|
r
|
pollutantmean <- function(directory, pollutant, id = 1:332) {
setwd(paste(getwd(), directory, sep = "/"))
temp <- paste(formatC(id, width = 3, flag = "0"), ".csv",
sep = "")
sourceframe <- do.call("rbind", lapply(temp, read.csv, header = TRUE))
ifelse(pollutant == "sulfate", round(mean(sourceframe[, 2],
na.rm = TRUE), 3), round(mean(sourceframe[, 3], na.rm = TRUE),
3))
}
|
#' Friedman rank sum test
#'
#' Perform Friedman rank sum test on sensory table.
#'
#' @param tbl_sensory a sensory table
#'
#' @importFrom dplyr select group_by mutate arrange
#' @importFrom tidyr gather spread nest unnest
#' @importFrom purrr map map_dbl
#' @importFrom broom tidy
#' @importFrom tibble new_tibble
perform_friedman <- function(tbl_sensory) {
meta_panelist <- parse_meta(tbl_sensory, "panelist")
meta_product <- parse_meta(tbl_sensory, "product")
meta_attribute <- parse_meta(tbl_sensory, "attribute")
fmla <- "value ~ product | panelist"
tbl <- tbl_sensory %>%
select(
panelist = meta_panelist,
product = meta_product,
meta_attribute
) %>%
gather("attribute", "value", meta_attribute) %>%
group_by(attribute) %>%
nest() %>%
mutate(
model = map(
data, ~ friedman.test(as.formula(fmla), data = .x)
),
statistic = map_dbl(model, ~tidy(.x)[["statistic"]]),
p.value = map_dbl(model, ~tidy(.x)[["p.value"]]),
values = map(
data,
~ group_by(.x, product) %>%
summarise(value = sum(value, na.rm = TRUE)) %>%
spread(product, value))
) %>%
unnest(values) %>%
spread(product, values) %>%
arrange(desc(statistic))
res <- new_tibble(tbl,
"sensory_method" = parse_meta(tbl_sensory, "sensory_method"),
"method_local" = "Friedman rank sum test",
"model" = fmla,
nrow = NROW(tbl),
class = "tbl_sensory_local"
)
return(res)
}
|
/R/perform-friedman.R
|
permissive
|
isoletslicer/sensehubr
|
R
| false
| false
| 1,588
|
r
|
#' Friedman rank sum test
#'
#' Perform Friedman rank sum test on sensory table.
#'
#' @param tbl_sensory a sensory table
#'
#' @importFrom dplyr select group_by mutate arrange
#' @importFrom tidyr gather spread nest unnest
#' @importFrom purrr map map_dbl
#' @importFrom broom tidy
#' @importFrom tibble new_tibble
perform_friedman <- function(tbl_sensory) {
meta_panelist <- parse_meta(tbl_sensory, "panelist")
meta_product <- parse_meta(tbl_sensory, "product")
meta_attribute <- parse_meta(tbl_sensory, "attribute")
fmla <- "value ~ product | panelist"
tbl <- tbl_sensory %>%
select(
panelist = meta_panelist,
product = meta_product,
meta_attribute
) %>%
gather("attribute", "value", meta_attribute) %>%
group_by(attribute) %>%
nest() %>%
mutate(
model = map(
data, ~ friedman.test(as.formula(fmla), data = .x)
),
statistic = map_dbl(model, ~tidy(.x)[["statistic"]]),
p.value = map_dbl(model, ~tidy(.x)[["p.value"]]),
values = map(
data,
~ group_by(.x, product) %>%
summarise(value = sum(value, na.rm = TRUE)) %>%
spread(product, value))
) %>%
unnest(values) %>%
spread(product, values) %>%
arrange(desc(statistic))
res <- new_tibble(tbl,
"sensory_method" = parse_meta(tbl_sensory, "sensory_method"),
"method_local" = "Friedman rank sum test",
"model" = fmla,
nrow = NROW(tbl),
class = "tbl_sensory_local"
)
return(res)
}
|
#' Cummulative commits plot
#'
#' @export
#' @param x File name, e.g., github_commits_2016-05-19.csv. Default is \code{NULL},
#' and if so, we look for file with most recent date in its file name in
#' \code{rappdirs::user_cache_dir("rostats")} + "/commits/"
#' @param exclude (character) github user names to exclude
#' @param exclude_core (logical) exclude core user names. Default: \code{FALSE}
#' @examples \dontrun{
#' x <- cum_commits()
#' cum_commits(exclude_core = TRUE)
#' }
cum_commits <- function(x = NULL, exclude = c("sckott", "karthik", "cboettig", "jeroenooms", "jeroen"),
exclude_core = FALSE) {
dat <- dplyr::tbl_df(get_github(x))
if (exclude_core) dat <- dplyr::filter(dat, !author %in% exclude)
## summarise
dat <- dat %>%
group_by(date) %>%
summarise(count = n()) %>%
mutate(cumsum = cumsum(count))
## plot
ggplot(dat, aes(date, cumsum)) +
geom_line(size = 2) +
theme_grey(base_size = 18) +
scale_x_date(labels = scales::date_format("%Y/%m")) +
labs(x = 'May 2011 to Nov 2017', y = 'Cumulative Code Contributions')
}
|
/R/cumcommits.R
|
no_license
|
ropensci/rostats
|
R
| false
| false
| 1,103
|
r
|
#' Cummulative commits plot
#'
#' @export
#' @param x File name, e.g., github_commits_2016-05-19.csv. Default is \code{NULL},
#' and if so, we look for file with most recent date in its file name in
#' \code{rappdirs::user_cache_dir("rostats")} + "/commits/"
#' @param exclude (character) github user names to exclude
#' @param exclude_core (logical) exclude core user names. Default: \code{FALSE}
#' @examples \dontrun{
#' x <- cum_commits()
#' cum_commits(exclude_core = TRUE)
#' }
cum_commits <- function(x = NULL, exclude = c("sckott", "karthik", "cboettig", "jeroenooms", "jeroen"),
exclude_core = FALSE) {
dat <- dplyr::tbl_df(get_github(x))
if (exclude_core) dat <- dplyr::filter(dat, !author %in% exclude)
## summarise
dat <- dat %>%
group_by(date) %>%
summarise(count = n()) %>%
mutate(cumsum = cumsum(count))
## plot
ggplot(dat, aes(date, cumsum)) +
geom_line(size = 2) +
theme_grey(base_size = 18) +
scale_x_date(labels = scales::date_format("%Y/%m")) +
labs(x = 'May 2011 to Nov 2017', y = 'Cumulative Code Contributions')
}
|
plot.mc.N<-function(mc.object, spp.string, quants=c(0.2, 0.5, 0.8), plot.trials = TRUE, annual.plots = TRUE, connector = "avg", page.layout = c(4, 4), Nylimits = c(0, 3), color.tags = NULL)
{
# auxiliary function to plot abundances from Monte Carlo trials
# George Watters
# code last edited 18 July 2006
#
if(!is.null(mc.object$color.tags)&&max(mc.object$color.tags>15)){
stop("FAULT -- software not designed to deal with plotting more than 15 colors in a single panel.\nIf you're sure you want to do this we can easily edit the color table.")
}
if(!is.null(quants)&&length(quants)>3){ stop("FAULT: Sorry, you can only plot 3 quantiles.") }
#
tt.data1 <- eval(parse(text = paste(as.character(quote(mc.object)),"$N$",spp.string,sep="")))
Rage <- eval(parse(text = paste(as.character(quote(mc.object)),"$R$maxRage$",spp.string,sep="")))
ntrials <- mc.object$setup$ntrials
nssmus <- mc.object$setup$nssmus
nyears <- mc.object$setup$nyears
nseasons <- mc.object$setup$nseasons
ntimes <- mc.object$setup$ntimes
#
# get the desired data as determined by the arguments annual.plots and connector
# then standardize these data as appropriate
#
if(annual.plots){
season.vector <- rep(1:nseasons, length.out = ntimes)
year.vector <- rep(1:(ntimes/nseasons),each = nseasons)
time.label <- "year"
if(is.character(connector)){
plot.time <- c(0,unique(year.vector))
tt.data2 <- array(0,dim=c(length(unique(year.vector))+1,nssmus,ntrials))
for(j in 1:ntrials){
for(i in 1:nssmus){
tt.denom <- mean(tt.data1[1:nseasons,i,j])
# if you have already used relative.mc() then the denominator should be 1
if(!is.null(mc.object$setup$relative)){tt.denom<-1}
tt.y <- c(mean(tt.data1[1:nseasons,i,j]),as.vector(tapply(tt.data1[((Rage+1):(Rage + ntimes)),i,j],list(year.vector),mean)))
tt.data2[,i,j] <- tt.y/tt.denom
}
}
title.prefix <- "avgN/avgN[yr1]"
if(!is.null(mc.object$setup$relative)){title.prefix <- "avg(relative N)"}
}
if(is.numeric(connector)){
# first check that the connector is a feasible season
if(connector > nseasons){stop("FAULT: connector season > nseasons")}
keepers <- (season.vector == connector)
plot.time <- c(0,year.vector[keepers])
tt.data2 <- array(0,dim=c(length(unique(year.vector))+1,nssmus,ntrials))
for(j in 1:ntrials){
for(i in 1:nssmus){
tt.denom <- tt.data1[connector,i,j]
# if you have already used relative.mc() then the denominator should be 1
if(!is.null(mc.object$setup$relative)){tt.denom<-1}
tt.y <- c(tt.data1[connector,i,j],tt.data1[((Rage+1):(Rage + ntimes))[keepers],i,j])
tt.data2[,i,j] <- tt.y/tt.denom
}
}
title.prefix <- paste("N",connector,"/N",connector,"[yr1]",sep="")
if(!is.null(mc.object$setup$relative)){title.prefix <- paste("relative N",connector,sep="")}
}
}
else {
plot.time <- 0:ntimes
tt.data2 <- array(0,dim=c(ntimes+1,nssmus,ntrials))
for(j in 1:ntrials){
for(i in 1:nssmus){
tt.denom <- tt.data1[1,i,j]
# if you have already used relative.mc() then the denominator should be 1
if(!is.null(mc.object$setup$relative)){tt.denom<-1}
tt.y <- c(tt.data1[1,i,j],tt.data1[((Rage+1):(Rage + ntimes)),i,j])
tt.data2[,i,j] <- tt.y/tt.denom
}
}
title.prefix <- "N/N[1]"
if(!is.null(mc.object$setup$relative)){title.prefix <- "relative N"}
time.label<-"season"
}
#
# now compute quantiles
if(is.null(quants)){
quants <- rep(NA, 3)
plot.trials <- TRUE
}
if(!is.na(quants[2])) {
ttmed <- apply(tt.data2, 2, FUN = function(x, prob)
{
apply(x, 1, quantile, probs = prob, na.rm = TRUE)
}
, prob = quants[2])
}
if(!is.na(quants[1])) {
ttlow <- apply(tt.data2, 2, FUN = function(x, prob)
{
apply(x, 1, quantile, probs = prob, na.rm = TRUE)
}
, prob = quants[1])
}
if(!is.na(quants[3])) {
tthigh <- apply(tt.data2, 2, FUN = function(x, prob)
{
apply(x, 1, quantile, probs = prob, na.rm = TRUE)
}
, prob = quants[3])
}
title.suffix <- paste("quantiles = ", deparse(quants), sep="")
title.string <- paste(spp.string, title.prefix, title.suffix, sep = " -- ")
red.width <- ifelse(plot.trials,2,1)
#
# set up the color table
# now actually turn the color.tags into colors that are interpretable by the plot functions
# black, blue, green, yellow, magenta, orange, cyan, lightgoldenrod, blueviolet, springgreen, gray47, aquamarine3, orange4, purple, yellow4
if(!is.null(mc.object$color.tags)){
tt.colors <- colors()[c(24,26,254,652,450,498,68,410,31,610,200,11,502,547,656)]
tt.colors <- tt.colors[match(mc.object$color.tags,1:15)]
}
else{
tt.colors <- rep("black",ntrials)
}
#
# now do the plotting
windows()
origpar <- par(no.readonly=TRUE)
#par(oma = c(0, 0, 2, 0), mfrow = page.layout)
par(oma = c(4, 2, 4, 3), mar=c(4,4,1,0)+0.1, mgp=c(2,0.75,0), xpd=FALSE, mfrow = page.layout)
panel.count <- 1
left.col.panels <- seq(from=1,to=page.layout[1]*page.layout[2],by=page.layout[2])
bottom.panels <- (1:(page.layout[1]*page.layout[2]))[max(left.col.panels):((page.layout[1]*page.layout[2]))]
for(i in 1:nssmus) {
if(panel.count > (page.layout[1] * page.layout[2])) {
panel.count <- 1
par(origpar)
windows()
#par(oma = c(0, 0, 2, 0), mfrow = page.layout)
par(oma = c(4, 2, 4, 3), mar=c(4,4,1,0)+0.1, mgp=c(2,0.75,0), xpd=FALSE, mfrow = page.layout)
}
if(is.element(panel.count,left.col.panels)){ylabel<-"relative abundance"}else{ylabel<-""}
if(is.element(panel.count,bottom.panels)){xlabel<-time.label}else{xlabel<-""}
if(!all(is.na(tt.data2[, i, 1]))) {
plot(plot.time, tt.data2[, i, 1], type = "n", ylim = Nylimits, ylab = ylabel,
xlab = xlabel,axes=FALSE)
box()
axis(1,cex.axis=0.8)
axis(2,cex.axis=0.8)
if(plot.trials){
for(j in 1:ntrials) {
lines(plot.time, tt.data2[, i, j], col = tt.colors[j])
}
}
if(!is.na(quants[2])){
lines(plot.time, ttmed[, i], col = "red", lwd = red.width, lty = 1)
}
if(!is.na(quants[1])) {
lines(plot.time, ttlow[, i], col = "red", lwd = red.width, lty = 2)
}
if(!is.na(quants[3])) {
lines(plot.time, tthigh[, i], col = "red", lwd = red.width, lty = 2)
}
}
else {
plot(range(plot.time), Nylimits, type = "n", ylab = ylabel, xlab = xlabel, axes=FALSE)
box()
axis(1,cex.axis=0.8)
axis(2,cex.axis=0.8)
}
title(main=paste("SSMU ", i, sep = ""), line = 0.5, outer = FALSE, cex.main = 0.9)
panel.count <- panel.count + 1
if(panel.count > (page.layout[1] * page.layout[2])) {
mtext(title.string, line = 1, cex = 0.75, outer = TRUE)
}
}
mtext(title.string, line = 1, cex = 0.75, outer = TRUE)
}
|
/plot.mc.N.r
|
no_license
|
EmilyKlein/KPFM2
|
R
| false
| false
| 7,020
|
r
|
plot.mc.N<-function(mc.object, spp.string, quants=c(0.2, 0.5, 0.8), plot.trials = TRUE, annual.plots = TRUE, connector = "avg", page.layout = c(4, 4), Nylimits = c(0, 3), color.tags = NULL)
{
# auxiliary function to plot abundances from Monte Carlo trials
# George Watters
# code last edited 18 July 2006
#
if(!is.null(mc.object$color.tags)&&max(mc.object$color.tags>15)){
stop("FAULT -- software not designed to deal with plotting more than 15 colors in a single panel.\nIf you're sure you want to do this we can easily edit the color table.")
}
if(!is.null(quants)&&length(quants)>3){ stop("FAULT: Sorry, you can only plot 3 quantiles.") }
#
tt.data1 <- eval(parse(text = paste(as.character(quote(mc.object)),"$N$",spp.string,sep="")))
Rage <- eval(parse(text = paste(as.character(quote(mc.object)),"$R$maxRage$",spp.string,sep="")))
ntrials <- mc.object$setup$ntrials
nssmus <- mc.object$setup$nssmus
nyears <- mc.object$setup$nyears
nseasons <- mc.object$setup$nseasons
ntimes <- mc.object$setup$ntimes
#
# get the desired data as determined by the arguments annual.plots and connector
# then standardize these data as appropriate
#
if(annual.plots){
season.vector <- rep(1:nseasons, length.out = ntimes)
year.vector <- rep(1:(ntimes/nseasons),each = nseasons)
time.label <- "year"
if(is.character(connector)){
plot.time <- c(0,unique(year.vector))
tt.data2 <- array(0,dim=c(length(unique(year.vector))+1,nssmus,ntrials))
for(j in 1:ntrials){
for(i in 1:nssmus){
tt.denom <- mean(tt.data1[1:nseasons,i,j])
# if you have already used relative.mc() then the denominator should be 1
if(!is.null(mc.object$setup$relative)){tt.denom<-1}
tt.y <- c(mean(tt.data1[1:nseasons,i,j]),as.vector(tapply(tt.data1[((Rage+1):(Rage + ntimes)),i,j],list(year.vector),mean)))
tt.data2[,i,j] <- tt.y/tt.denom
}
}
title.prefix <- "avgN/avgN[yr1]"
if(!is.null(mc.object$setup$relative)){title.prefix <- "avg(relative N)"}
}
if(is.numeric(connector)){
# first check that the connector is a feasible season
if(connector > nseasons){stop("FAULT: connector season > nseasons")}
keepers <- (season.vector == connector)
plot.time <- c(0,year.vector[keepers])
tt.data2 <- array(0,dim=c(length(unique(year.vector))+1,nssmus,ntrials))
for(j in 1:ntrials){
for(i in 1:nssmus){
tt.denom <- tt.data1[connector,i,j]
# if you have already used relative.mc() then the denominator should be 1
if(!is.null(mc.object$setup$relative)){tt.denom<-1}
tt.y <- c(tt.data1[connector,i,j],tt.data1[((Rage+1):(Rage + ntimes))[keepers],i,j])
tt.data2[,i,j] <- tt.y/tt.denom
}
}
title.prefix <- paste("N",connector,"/N",connector,"[yr1]",sep="")
if(!is.null(mc.object$setup$relative)){title.prefix <- paste("relative N",connector,sep="")}
}
}
else {
plot.time <- 0:ntimes
tt.data2 <- array(0,dim=c(ntimes+1,nssmus,ntrials))
for(j in 1:ntrials){
for(i in 1:nssmus){
tt.denom <- tt.data1[1,i,j]
# if you have already used relative.mc() then the denominator should be 1
if(!is.null(mc.object$setup$relative)){tt.denom<-1}
tt.y <- c(tt.data1[1,i,j],tt.data1[((Rage+1):(Rage + ntimes)),i,j])
tt.data2[,i,j] <- tt.y/tt.denom
}
}
title.prefix <- "N/N[1]"
if(!is.null(mc.object$setup$relative)){title.prefix <- "relative N"}
time.label<-"season"
}
#
# now compute quantiles
if(is.null(quants)){
quants <- rep(NA, 3)
plot.trials <- TRUE
}
if(!is.na(quants[2])) {
ttmed <- apply(tt.data2, 2, FUN = function(x, prob)
{
apply(x, 1, quantile, probs = prob, na.rm = TRUE)
}
, prob = quants[2])
}
if(!is.na(quants[1])) {
ttlow <- apply(tt.data2, 2, FUN = function(x, prob)
{
apply(x, 1, quantile, probs = prob, na.rm = TRUE)
}
, prob = quants[1])
}
if(!is.na(quants[3])) {
tthigh <- apply(tt.data2, 2, FUN = function(x, prob)
{
apply(x, 1, quantile, probs = prob, na.rm = TRUE)
}
, prob = quants[3])
}
title.suffix <- paste("quantiles = ", deparse(quants), sep="")
title.string <- paste(spp.string, title.prefix, title.suffix, sep = " -- ")
red.width <- ifelse(plot.trials,2,1)
#
# set up the color table
# now actually turn the color.tags into colors that are interpretable by the plot functions
# black, blue, green, yellow, magenta, orange, cyan, lightgoldenrod, blueviolet, springgreen, gray47, aquamarine3, orange4, purple, yellow4
if(!is.null(mc.object$color.tags)){
tt.colors <- colors()[c(24,26,254,652,450,498,68,410,31,610,200,11,502,547,656)]
tt.colors <- tt.colors[match(mc.object$color.tags,1:15)]
}
else{
tt.colors <- rep("black",ntrials)
}
#
# now do the plotting
windows()
origpar <- par(no.readonly=TRUE)
#par(oma = c(0, 0, 2, 0), mfrow = page.layout)
par(oma = c(4, 2, 4, 3), mar=c(4,4,1,0)+0.1, mgp=c(2,0.75,0), xpd=FALSE, mfrow = page.layout)
panel.count <- 1
left.col.panels <- seq(from=1,to=page.layout[1]*page.layout[2],by=page.layout[2])
bottom.panels <- (1:(page.layout[1]*page.layout[2]))[max(left.col.panels):((page.layout[1]*page.layout[2]))]
for(i in 1:nssmus) {
if(panel.count > (page.layout[1] * page.layout[2])) {
panel.count <- 1
par(origpar)
windows()
#par(oma = c(0, 0, 2, 0), mfrow = page.layout)
par(oma = c(4, 2, 4, 3), mar=c(4,4,1,0)+0.1, mgp=c(2,0.75,0), xpd=FALSE, mfrow = page.layout)
}
if(is.element(panel.count,left.col.panels)){ylabel<-"relative abundance"}else{ylabel<-""}
if(is.element(panel.count,bottom.panels)){xlabel<-time.label}else{xlabel<-""}
if(!all(is.na(tt.data2[, i, 1]))) {
plot(plot.time, tt.data2[, i, 1], type = "n", ylim = Nylimits, ylab = ylabel,
xlab = xlabel,axes=FALSE)
box()
axis(1,cex.axis=0.8)
axis(2,cex.axis=0.8)
if(plot.trials){
for(j in 1:ntrials) {
lines(plot.time, tt.data2[, i, j], col = tt.colors[j])
}
}
if(!is.na(quants[2])){
lines(plot.time, ttmed[, i], col = "red", lwd = red.width, lty = 1)
}
if(!is.na(quants[1])) {
lines(plot.time, ttlow[, i], col = "red", lwd = red.width, lty = 2)
}
if(!is.na(quants[3])) {
lines(plot.time, tthigh[, i], col = "red", lwd = red.width, lty = 2)
}
}
else {
plot(range(plot.time), Nylimits, type = "n", ylab = ylabel, xlab = xlabel, axes=FALSE)
box()
axis(1,cex.axis=0.8)
axis(2,cex.axis=0.8)
}
title(main=paste("SSMU ", i, sep = ""), line = 0.5, outer = FALSE, cex.main = 0.9)
panel.count <- panel.count + 1
if(panel.count > (page.layout[1] * page.layout[2])) {
mtext(title.string, line = 1, cex = 0.75, outer = TRUE)
}
}
mtext(title.string, line = 1, cex = 0.75, outer = TRUE)
}
|
library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Lasso/thyroid.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.5,family="gaussian",standardize=FALSE)
sink('./thyroid_060.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
/Model/EN/Lasso/thyroid/thyroid_060.R
|
no_license
|
esbgkannan/QSMART
|
R
| false
| false
| 346
|
r
|
library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Lasso/thyroid.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.5,family="gaussian",standardize=FALSE)
sink('./thyroid_060.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
#' @title get.dgp
#' @description randomly creates a dgp and attempts to satisfy user specs. Number of covariates
#' is not limited but may take a while beyond d = 20 with too many terms. Limit time with depth
#' and maxterms parameters.
#' @param n, sample size
#' @param d, dimension of potential confounders
#' @param pos, a small value to make sure prop scores are in (pos, 1 - pos)
#' @param minATE, minimum causal risk difference for the population.
#' @param minBV, minimum TE variance for population
#' @param depth, specify depth of interaction--must be less than or equal d.
#' @param maxterms, maximum terms per interaction. For example, this would limit
#' two way interactions to maximally 10 terms as well as three way or main terms.
#' With high dimension it is wise to set this low because it might take a while
#' otherwise. Still in development--perhaps future will set this for each depth
#' @param minterms sets a minimum number of total covariate terms, including
#' interactions with eachother--do not set lower than 1.
#' @param mininters sets the minimum number of interactions with treatment to include
#' This must be bigger or equal to minterms
#' @param num.binaries specifies number of main terms you want as binaries, must be
#' less than d.
#' @param force.confounding forces variables used for p-score to overlap with those
#' used for outcome regression.
#' @param skewing randomly skews an otherwise centered dgp for generating binary treatment
#' default is c(-1, 1). Set to c(-5,-1) to deliberately skew more regularly or widen to
#' c(-3, 3) to skew more randomly.
#' @return a sample DF, the true average treatment effect, ATE0 and TE variance
#' BV0, the sample pscores, PGn, the sample true TEs, TE_n, the sample
#' true prob of death under treatment, PQ1n, and prob of death under control
#' PQ0n
#' @export
#' @example /inst/examples/example_get.dgp.R
remakeDGP = function(n, object, limit_inter = NULL)
{
f_Aforms = object$f_Aforms
f_Yforms = object$f_Yforms
terms = object$terms
skewage = object$skewage
termsQW = object$termsQW
terms_inter = object$terms_inter
coef_Q = object$coef_Q
U_Wdist = object$U_Wdist
d = object$d
num.binaries = object$num.binaries
depth = object$depth
coef_G = object$coef_G
coef_Q = object$coef_Q
pos = object$pos
# sample size of population
# the population matrix of potential confounders, consisting of normals and binaries
types = list(function(x) sin(x), function(x) cos(x),
function(x) x^2, function(x) x, function(x) x^3, function(x) exp(x))
no.types = length(types)
# Create the W matrix
Wmat = lapply(1:d, FUN = function(i) {
W = effect(n, dist = U_Wdist[[i]]$dist, params = U_Wdist[[i]]$params)
return(W)})
U_W = do.call(cbind, Wmat)
f_A = lapply(1:length(f_Aforms), FUN = function(x) {
if (x<=num.binaries) return(Wmat[[x]]) else {
return(do.call(f_Aforms[[x]], list(x=Wmat[[x]])))
}})
f_A = do.call(cbind, f_A)
f_Y = lapply(1:length(f_Aforms), FUN = function(x) {
if (x<=num.binaries) return(Wmat[[x]]) else {
return(do.call(f_Yforms[[x]], list(x=Wmat[[x]])))
}})
f_Y = do.call(cbind, f_Y)
# All of the interaction combos of columns possible up to the depth of interaction
# user specifies
choos = lapply(1:depth, FUN = function(x) {
c = combn(1:d, x)
if (!is.matrix(c)) c = as.matrix(c)
return(c)
})
# combine specified columns as to randomly chosen interactions
col.comb = lapply(1:length(terms), FUN = function(a) {
col.choos = terms[[a]]
if (length(col.choos) == 0) {
return(integer(0))
} else {
df = vapply(col.choos, FUN = function(x) {
col.inds = choos[[a]][,x]
v = rep(1, n)
for (c in col.inds) v = v*f_A[,c]
return(v)
}, FUN.VALUE = rep(1, n))
return(df)
}
})
# put the cols in one matrix used for p-score
dfG = do.call(cbind, col.comb)
# transform the columns by plugging into randomly drawn functions and standardize
# so no variable dominates unnecessarily
dfG = apply(dfG, 2, FUN = function(col) {
if (all(col == 1 | col ==0)) {
v = col
return(v)
} else {
v = (col - mean(col))/sd(col)
return(v)
}
})
# create an intercept for skewing deliberately
dfG = cbind(dfG, rep(1, n))
PG = plogis(dfG %*% c(coef_G, skewage))
# Creating A based on p-scores for whole population of 1e6
PG = pmin(pmax(PG, pos), 1-pos)
# hist(PG0, breaks = 100)
A = rbinom(n, 1, PG)
# combine W interactions and mains for OC
col.combQ = lapply(1:length(termsQW), FUN = function(a) {
col.choos = termsQW[[a]]
if (length(col.choos) == 0) {
return(integer(0))
} else {
df = vapply(col.choos, FUN = function(x) {
col.inds = choos[[a]][,x]
v = rep(1, n)
for (c in col.inds) v = v*f_Y[,c]
return(v)
}, FUN.VALUE = rep(1, n))
return(df)
}
})
# combine cols used for interaction with A
col.comb_inter = lapply(1:length(terms_inter), FUN = function(a) {
col.choos = terms_inter[[a]]
if (length(col.choos) == 0) {
return(integer(0))
} else {
df = vapply(col.choos, FUN = function(x) {
col.inds = choos[[a]][,x]
v = rep(1, n)
for (c in col.inds) v = v*f_Y[,c]
return(v)
}, FUN.VALUE = rep(1, n))
return(df)
}
})
# put the cols in one matrix for W interactions and mains
dfQWA = do.call(cbind, col.combQ)
dfQWA = cbind(dfQWA, A)
# put the cols in one matrix for interactions with A = 1
dfQ_inter = do.call(cbind, col.comb_inter)
# and for population
dfQ_interA = apply(dfQ_inter, 2, FUN = function(col) A*col)
# OC df cols for W interactions and A plugged into randomly drawn functions (types)
dfQWA = apply(dfQWA, 2, FUN = function(col) {
if (all(col == 1 | col ==0)) {
return(col)
} else {
# v = types[[sample(1:no.types, 1)]](col)
v = (col - mean(col))/sd(col)
return(v)
}
})
# This skips interactions appendages to dfQWA if no interactions are chosen
no.inters = sum(unlist(lapply(terms_inter, sum)))
if (no.inters != 0) {
# apply the fcns as per setting A to its draw, A =1 and A=0
dfQ_inter0 = vapply(1:ncol(dfQ_interA), FUN = function(col) rep(0,n), FUN.VALUE = rep(1,n))
# We standardize these columns too for the population as is
means = apply(dfQ_interA, 2, FUN = function(col) mean(col))
sds = apply(dfQ_interA, 2, FUN = function(col) sd(col))
dfQ_interA = apply(dfQ_interA, 2, FUN = function(col) (col - mean(col))/sd(col))
# We apply this to the pop under A =1 and A = 0 so we apply the same fcn for these as for
# the true observed population as is
dfQ_inter = vapply(1:ncol(dfQ_inter), FUN = function(col) {
(dfQ_inter[,col] - means[col])/sds[col]
}, FUN.VALUE = rep(1,n))
dfQ_inter0 = vapply(1:ncol(dfQ_inter0), FUN = function(col) {
(dfQ_inter0[,col] - means[col])/sds[col]
}, FUN.VALUE = rep(1,n))
# standardize the treatment column too, to be fair!!
dfQ = cbind(dfQWA, dfQ_interA)
dfQW1 = dfQWA
dfQW1[, ncol(dfQW1)] = (1 - mean(A))/sd(A)
dfQ1 = cbind(dfQW1, dfQ_inter)
dfQW0 = dfQWA
dfQW0[, ncol(dfQW0)] = - mean(A)/sd(A)
dfQ0 = cbind(dfQW0, dfQ_inter0)
# else we have no interactions with A shenanigans and everything is very simple
} else {
dfQ = cbind(dfQWA)
dfQW1 = dfQWA
dfQW1[, ncol(dfQW1)] = (1 - mean(A))/sd(A)
dfQ1 = cbind(dfQW1)
dfQW0 = dfQWA
dfQW0[, ncol(dfQW0)] = - mean(A)/sd(A)
dfQ0 = cbind(dfQW0)
}
# compute true probs under A = 1 and A = 0 and related truths
PQ1 = plogis(dfQ1 %*% coef_Q)
PQ0 = plogis(dfQ0 %*% coef_Q)
TE_true = PQ1 - PQ0
ATE0 = mean(TE_true)
BV0 = var(TE_true)
# finally we create the population probs of death
PQ = plogis(dfQ %*% coef_Q)
PQ = pmin(pmax(PQ, .00001), 1-.00001)
# hist(PQ)
# take the draw for the population
Y = rbinom(n, 1, PQ)
# make sure our loglikelihood loss is bounded reasonably, no one gets super lucky or unlucky!
# mean(Y*A/PG0-Y*(1-A)/(1-PG0))
# ATE0
# take a sample of size n and return sample probs TEs, the dataframe
# with covariates but the user never sees the formula. Now they can use DF
# to try and recover the truth
TE_n = PQ1 - PQ0
An = A
Yn = Y
Wn = U_W
DF = as.data.frame(cbind(Wn, An, Yn))
colnames(DF)[c((d + 1), (d + 2))] = c("A", "Y")
colnames(DF)[1:d] = paste0("W",1:d)
return(list(DF = DF, TE_n = TE_n, Wn = Wn,
PQ1n = PQ1, PQ0n = PQ0, PQn = PQ, PGn = PG))
}
|
/R/remakeDGP.R
|
no_license
|
jlstiles/Simulations
|
R
| false
| false
| 8,719
|
r
|
#' @title get.dgp
#' @description randomly creates a dgp and attempts to satisfy user specs. Number of covariates
#' is not limited but may take a while beyond d = 20 with too many terms. Limit time with depth
#' and maxterms parameters.
#' @param n, sample size
#' @param d, dimension of potential confounders
#' @param pos, a small value to make sure prop scores are in (pos, 1 - pos)
#' @param minATE, minimum causal risk difference for the population.
#' @param minBV, minimum TE variance for population
#' @param depth, specify depth of interaction--must be less than or equal d.
#' @param maxterms, maximum terms per interaction. For example, this would limit
#' two way interactions to maximally 10 terms as well as three way or main terms.
#' With high dimension it is wise to set this low because it might take a while
#' otherwise. Still in development--perhaps future will set this for each depth
#' @param minterms sets a minimum number of total covariate terms, including
#' interactions with eachother--do not set lower than 1.
#' @param mininters sets the minimum number of interactions with treatment to include
#' This must be bigger or equal to minterms
#' @param num.binaries specifies number of main terms you want as binaries, must be
#' less than d.
#' @param force.confounding forces variables used for p-score to overlap with those
#' used for outcome regression.
#' @param skewing randomly skews an otherwise centered dgp for generating binary treatment
#' default is c(-1, 1). Set to c(-5,-1) to deliberately skew more regularly or widen to
#' c(-3, 3) to skew more randomly.
#' @return a sample DF, the true average treatment effect, ATE0 and TE variance
#' BV0, the sample pscores, PGn, the sample true TEs, TE_n, the sample
#' true prob of death under treatment, PQ1n, and prob of death under control
#' PQ0n
#' @export
#' @example /inst/examples/example_get.dgp.R
remakeDGP = function(n, object, limit_inter = NULL)
{
f_Aforms = object$f_Aforms
f_Yforms = object$f_Yforms
terms = object$terms
skewage = object$skewage
termsQW = object$termsQW
terms_inter = object$terms_inter
coef_Q = object$coef_Q
U_Wdist = object$U_Wdist
d = object$d
num.binaries = object$num.binaries
depth = object$depth
coef_G = object$coef_G
coef_Q = object$coef_Q
pos = object$pos
# sample size of population
# the population matrix of potential confounders, consisting of normals and binaries
types = list(function(x) sin(x), function(x) cos(x),
function(x) x^2, function(x) x, function(x) x^3, function(x) exp(x))
no.types = length(types)
# Create the W matrix
Wmat = lapply(1:d, FUN = function(i) {
W = effect(n, dist = U_Wdist[[i]]$dist, params = U_Wdist[[i]]$params)
return(W)})
U_W = do.call(cbind, Wmat)
f_A = lapply(1:length(f_Aforms), FUN = function(x) {
if (x<=num.binaries) return(Wmat[[x]]) else {
return(do.call(f_Aforms[[x]], list(x=Wmat[[x]])))
}})
f_A = do.call(cbind, f_A)
f_Y = lapply(1:length(f_Aforms), FUN = function(x) {
if (x<=num.binaries) return(Wmat[[x]]) else {
return(do.call(f_Yforms[[x]], list(x=Wmat[[x]])))
}})
f_Y = do.call(cbind, f_Y)
# All of the interaction combos of columns possible up to the depth of interaction
# user specifies
choos = lapply(1:depth, FUN = function(x) {
c = combn(1:d, x)
if (!is.matrix(c)) c = as.matrix(c)
return(c)
})
# combine specified columns as to randomly chosen interactions
col.comb = lapply(1:length(terms), FUN = function(a) {
col.choos = terms[[a]]
if (length(col.choos) == 0) {
return(integer(0))
} else {
df = vapply(col.choos, FUN = function(x) {
col.inds = choos[[a]][,x]
v = rep(1, n)
for (c in col.inds) v = v*f_A[,c]
return(v)
}, FUN.VALUE = rep(1, n))
return(df)
}
})
# put the cols in one matrix used for p-score
dfG = do.call(cbind, col.comb)
# transform the columns by plugging into randomly drawn functions and standardize
# so no variable dominates unnecessarily
dfG = apply(dfG, 2, FUN = function(col) {
if (all(col == 1 | col ==0)) {
v = col
return(v)
} else {
v = (col - mean(col))/sd(col)
return(v)
}
})
# create an intercept for skewing deliberately
dfG = cbind(dfG, rep(1, n))
PG = plogis(dfG %*% c(coef_G, skewage))
# Creating A based on p-scores for whole population of 1e6
PG = pmin(pmax(PG, pos), 1-pos)
# hist(PG0, breaks = 100)
A = rbinom(n, 1, PG)
# combine W interactions and mains for OC
col.combQ = lapply(1:length(termsQW), FUN = function(a) {
col.choos = termsQW[[a]]
if (length(col.choos) == 0) {
return(integer(0))
} else {
df = vapply(col.choos, FUN = function(x) {
col.inds = choos[[a]][,x]
v = rep(1, n)
for (c in col.inds) v = v*f_Y[,c]
return(v)
}, FUN.VALUE = rep(1, n))
return(df)
}
})
# combine cols used for interaction with A
col.comb_inter = lapply(1:length(terms_inter), FUN = function(a) {
col.choos = terms_inter[[a]]
if (length(col.choos) == 0) {
return(integer(0))
} else {
df = vapply(col.choos, FUN = function(x) {
col.inds = choos[[a]][,x]
v = rep(1, n)
for (c in col.inds) v = v*f_Y[,c]
return(v)
}, FUN.VALUE = rep(1, n))
return(df)
}
})
# put the cols in one matrix for W interactions and mains
dfQWA = do.call(cbind, col.combQ)
dfQWA = cbind(dfQWA, A)
# put the cols in one matrix for interactions with A = 1
dfQ_inter = do.call(cbind, col.comb_inter)
# and for population
dfQ_interA = apply(dfQ_inter, 2, FUN = function(col) A*col)
# OC df cols for W interactions and A plugged into randomly drawn functions (types)
dfQWA = apply(dfQWA, 2, FUN = function(col) {
if (all(col == 1 | col ==0)) {
return(col)
} else {
# v = types[[sample(1:no.types, 1)]](col)
v = (col - mean(col))/sd(col)
return(v)
}
})
# This skips interactions appendages to dfQWA if no interactions are chosen
no.inters = sum(unlist(lapply(terms_inter, sum)))
if (no.inters != 0) {
# apply the fcns as per setting A to its draw, A =1 and A=0
dfQ_inter0 = vapply(1:ncol(dfQ_interA), FUN = function(col) rep(0,n), FUN.VALUE = rep(1,n))
# We standardize these columns too for the population as is
means = apply(dfQ_interA, 2, FUN = function(col) mean(col))
sds = apply(dfQ_interA, 2, FUN = function(col) sd(col))
dfQ_interA = apply(dfQ_interA, 2, FUN = function(col) (col - mean(col))/sd(col))
# We apply this to the pop under A =1 and A = 0 so we apply the same fcn for these as for
# the true observed population as is
dfQ_inter = vapply(1:ncol(dfQ_inter), FUN = function(col) {
(dfQ_inter[,col] - means[col])/sds[col]
}, FUN.VALUE = rep(1,n))
dfQ_inter0 = vapply(1:ncol(dfQ_inter0), FUN = function(col) {
(dfQ_inter0[,col] - means[col])/sds[col]
}, FUN.VALUE = rep(1,n))
# standardize the treatment column too, to be fair!!
dfQ = cbind(dfQWA, dfQ_interA)
dfQW1 = dfQWA
dfQW1[, ncol(dfQW1)] = (1 - mean(A))/sd(A)
dfQ1 = cbind(dfQW1, dfQ_inter)
dfQW0 = dfQWA
dfQW0[, ncol(dfQW0)] = - mean(A)/sd(A)
dfQ0 = cbind(dfQW0, dfQ_inter0)
# else we have no interactions with A shenanigans and everything is very simple
} else {
dfQ = cbind(dfQWA)
dfQW1 = dfQWA
dfQW1[, ncol(dfQW1)] = (1 - mean(A))/sd(A)
dfQ1 = cbind(dfQW1)
dfQW0 = dfQWA
dfQW0[, ncol(dfQW0)] = - mean(A)/sd(A)
dfQ0 = cbind(dfQW0)
}
# compute true probs under A = 1 and A = 0 and related truths
PQ1 = plogis(dfQ1 %*% coef_Q)
PQ0 = plogis(dfQ0 %*% coef_Q)
TE_true = PQ1 - PQ0
ATE0 = mean(TE_true)
BV0 = var(TE_true)
# finally we create the population probs of death
PQ = plogis(dfQ %*% coef_Q)
PQ = pmin(pmax(PQ, .00001), 1-.00001)
# hist(PQ)
# take the draw for the population
Y = rbinom(n, 1, PQ)
# make sure our loglikelihood loss is bounded reasonably, no one gets super lucky or unlucky!
# mean(Y*A/PG0-Y*(1-A)/(1-PG0))
# ATE0
# take a sample of size n and return sample probs TEs, the dataframe
# with covariates but the user never sees the formula. Now they can use DF
# to try and recover the truth
TE_n = PQ1 - PQ0
An = A
Yn = Y
Wn = U_W
DF = as.data.frame(cbind(Wn, An, Yn))
colnames(DF)[c((d + 1), (d + 2))] = c("A", "Y")
colnames(DF)[1:d] = paste0("W",1:d)
return(list(DF = DF, TE_n = TE_n, Wn = Wn,
PQ1n = PQ1, PQ0n = PQ0, PQn = PQ, PGn = PG))
}
|
#' If NULL then ...
#'
#' Replace NULL
#'
#' @param x A value to check
#' @param y A value to substitute if x is null
#' @examples
#' \dontrun{
#' x <- NULL
#' x <- x %||% "none"
#' x <- x %||% NA
#' }
#'
#' @name if_null_then
`%||%` <- function(x, y) if (is.null(x)) y else x
is_not_class <- function(x, class) {
!(inherits(x, class) | is.null(x))
}
is_true_false <- function(x) {
is.logical(x) && length(x) == 1L && !is.na(x)
}
do_call_params <- function(fun, params, ..., .map = FALSE) {
fun <- match.fun(fun)
call_params <- c(list(...), params[names(params) %in% names(formals(fun))])
call_params <- lapply(call_params, function(x) if (is.object(x)) list(x) else x)
call_fun <- if (.map) {
function(...) mapply(fun, ..., MoreArgs = NULL, SIMPLIFY = FALSE, USE.NAMES = FALSE)
} else {
fun
}
do.call(call_fun, call_params)
}
# sets temporary options
# option() returns the original values
get_set_options <- function() {
options(
# increase scipen to avoid writing in scientific
scipen = 200,
OutDec = ".",
digits = 22
)
}
#' helper function to create tempory directory for testing purpose
#' @param name for the temp file
#' @export
temp_xlsx <- function(name = "temp_xlsx") {
tempfile(pattern = paste0(name, "_"), fileext = ".xlsx")
}
|
/R/utils.R
|
no_license
|
cran/openxlsx
|
R
| false
| false
| 1,351
|
r
|
#' If NULL then ...
#'
#' Replace NULL
#'
#' @param x A value to check
#' @param y A value to substitute if x is null
#' @examples
#' \dontrun{
#' x <- NULL
#' x <- x %||% "none"
#' x <- x %||% NA
#' }
#'
#' @name if_null_then
`%||%` <- function(x, y) if (is.null(x)) y else x
is_not_class <- function(x, class) {
!(inherits(x, class) | is.null(x))
}
is_true_false <- function(x) {
is.logical(x) && length(x) == 1L && !is.na(x)
}
do_call_params <- function(fun, params, ..., .map = FALSE) {
fun <- match.fun(fun)
call_params <- c(list(...), params[names(params) %in% names(formals(fun))])
call_params <- lapply(call_params, function(x) if (is.object(x)) list(x) else x)
call_fun <- if (.map) {
function(...) mapply(fun, ..., MoreArgs = NULL, SIMPLIFY = FALSE, USE.NAMES = FALSE)
} else {
fun
}
do.call(call_fun, call_params)
}
# sets temporary options
# option() returns the original values
get_set_options <- function() {
options(
# increase scipen to avoid writing in scientific
scipen = 200,
OutDec = ".",
digits = 22
)
}
#' helper function to create tempory directory for testing purpose
#' @param name for the temp file
#' @export
temp_xlsx <- function(name = "temp_xlsx") {
tempfile(pattern = paste0(name, "_"), fileext = ".xlsx")
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bind_miss.R
\name{mindx}
\alias{mindx}
\title{Missing index}
\usage{
mindx(data, drop_empty = TRUE)
}
\arguments{
\item{data}{a data set with missing values.}
\item{drop_empty}{a logical value. If \code{TRUE}, columns in \code{data} without any
missing values will be dropped from the output. If \code{FALSE}, all column
names in \code{data} will be present in the output.}
}
\value{
a list with indices of missing values in \code{data}.
}
\description{
Missing index
}
\examples{
dat <- data.frame(a = c(1, NA), b = c(NA, 2), c = c(1,2))
mindx(dat)
mindx(dat, drop_empty = FALSE)
}
|
/man/mindx.Rd
|
permissive
|
bcjaeger/ipa
|
R
| false
| true
| 666
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bind_miss.R
\name{mindx}
\alias{mindx}
\title{Missing index}
\usage{
mindx(data, drop_empty = TRUE)
}
\arguments{
\item{data}{a data set with missing values.}
\item{drop_empty}{a logical value. If \code{TRUE}, columns in \code{data} without any
missing values will be dropped from the output. If \code{FALSE}, all column
names in \code{data} will be present in the output.}
}
\value{
a list with indices of missing values in \code{data}.
}
\description{
Missing index
}
\examples{
dat <- data.frame(a = c(1, NA), b = c(NA, 2), c = c(1,2))
mindx(dat)
mindx(dat, drop_empty = FALSE)
}
|
# Author: Sharan Naribole
# Filename: helpers.R
# H-1B Visa Petitions Dashboard web application to enable exploratory data analysis
# on H-1B Visa applications disclosure data in the period 2011-2016
require(lazyeval)
require(dplyr)
job_filter <- function(df,input_vec) {
# Function to filter only the rows from dataframe
# with Job titles provided in the inputs
# Inputs:
# df : H-1B dataset dataframe
# input_vec : vector of job types input
# Output : filtered dataframe
# If no match, returns an empty data frame
# If the inputs are all equal to "", it returns the complete dataframe
# A new column JOB_INPUT_CLASS is created to identify the Job Type
# If multiple job type inputs match with a single row in the dataframe df, the
# output contains them in different rows each with distinct JOB_INPUT_CLASS
# If input_vec is empty, return without any filtering
if(length(input_vec) == 0) {
return(df %>%
mutate(JOB_INPUT_CLASS = JOB_TITLE))
}
new_df <- data.frame()
for(value in input_vec){
new_df <- rbind(new_df, df %>%
filter(regexpr(value,JOB_TITLE,ignore.case=TRUE) != -1) %>%
mutate(JOB_INPUT_CLASS = toupper(value)))
}
return(unique(new_df))
}
employer_filter <- function(df, input_vec) {
# Function to filter only the rows in dataframe with
# Employers provided in the inputs
# Inputs:
# df : H-1B dataset dataframe
# input_vec : vector of job types input
# Output : filtered dataframe
# If no match, returns an empty data frame
# If the inputs are all equal to "", it returns the complete dataframe
# Only difference from job_filter() is that there is no new column created
if(length(input_vec) == 0) {
return(df)
}
new_df <- data.frame()
for(value in input_vec){
new_df <- rbind(new_df, df %>%
filter(regexpr(value,EMPLOYER_NAME,ignore.case=TRUE) != -1))
}
return(unique(new_df))
}
find_top <- function(df,x_feature,metric, Ntop = 3) {
# Function to find the top values in x_feature based on metric value
# Inputs:
# df : filtered dataframe from job_type, location, employer and year range inputs
# x_feature : the column in df against which the metric is plotted for e.g., EMPLOYER_NAME
# metric : metric for data comparison
# Output : list of top values in x_feature based on metric
arrange_criteria <- interp(~ desc(x), x = as.name(metric))
df %>%
group_by_(x_feature) %>%
mutate(certified =ifelse(CASE_STATUS == "CERTIFIED",1,0)) %>%
summarise(TotalApps = n(),
Wage = median(PREVAILING_WAGE),
CertiApps = sum(certified),
Share = CertiApps/850) %>%
arrange_(arrange_criteria) -> top_df
top_len <- min(dim(top_df)[1],Ntop)
return(top_df[1:top_len,1])
}
plot_input <- function(df, x_feature, fill_feature, metric,filter = FALSE, ...) {
# Function to transform the filtered dataframe to one with computed metrics
# Inputs:
# df : filtered dataframe from job_type, location, employer and year range inputs
# x_feature : the column in df against which the metric is plotted for e.g., EMPLOYER_NAME
# fill_feature : additional level of classification; for e.g., Year
# metric : metric for data comparison
# filter : logical operator that filters only the rows with x_feature value belonging to top_find() output
# Output : dataframe grouped by x_feature and fill_feature with metrics as columns
#Finding out the top across the entire range independent of the fill_feature e.g. Year
top_x <- unlist(find_top(df,x_feature,metric, ...))
# lazyeval package interp () generates expression that interprets x_feature and metric arguments
# this is fed into filter_ and arrange_ accordingly
# Source: https://cran.r-project.org/web/packages/lazyeval/vignettes/lazyeval.html
filter_criteria <- interp(~x %in% y, .values = list(x = as.name(x_feature), y = top_x))
arrange_criteria <- interp(~ desc(x), x = as.name(metric))
if(filter == TRUE) {
df %>%
filter_(filter_criteria) -> df
}
#Grouping by not just x_feature but also fill_feature
return(df %>%
group_by_(.dots=c(x_feature,fill_feature)) %>%
mutate(certified =ifelse(CASE_STATUS == "CERTIFIED",1,0)) %>%
summarise(TotalApps = n(),
CertiApps = sum(certified),
Wage = median(PREVAILING_WAGE),
Share = CertiApps/850))
}
plot_output <- function(df, x_feature,fill_feature,metric, xlabb,ylabb) {
# Function to plot output
# Inputs:
# df : dataframe output of plot_input()
# x_feature : the column in df against which the metric is plotted for e.g., EMPLOYER_NAME
# fill_feature : additional level of classification; for e.g., Year
# metric : metric for data comparison
# xlabb : x label
# ylabb : y label
# Output : ggplot object
# Prevents numbers on plot transforming into scientific notation
options(scipen = 999)
g <- ggplot(df, aes_string(x=x_feature,y=metric)) +
geom_bar(stat = "identity", aes_string(fill = fill_feature), position = "dodge") +
coord_flip() + xlab(xlabb) + ylab(ylabb) + get_theme()
return(g)
}
map_gen <- function(df,metric,USA,...) {
# Function to generate map plot for given metric in df
# This is laid on top of USA map
# Inputs:
# df : dataframe with metrics, lat, lon, WORKSITE columns
# metric : metric for data comparison
# USA : dataframe for US maps with lat, long columns. map_data(map = "usa") from ggplot2
# Output : ggplot object
# Creating Map Dataframe
df %>%
mutate(certified =ifelse(CASE_STATUS == "CERTIFIED",1,0)) %>%
group_by(WORKSITE,lat,lon) %>%
summarise(TotalApps = n(),CertiApps = sum(certified), Wage = median(PREVAILING_WAGE)) -> map_df
# # Lat-Long Limits
# df %>%
# summarise(lat_min = min(lat,na.rm=TRUE),
# lat_max = max(lat,na.rm=TRUE),
# long_min = min(lon,na.rm=TRUE),
# long_max = max(lon,na.rm=TRUE)) -> geo_coord
# Finding top Locations for metric
top_locations <- unlist(find_top(df,"WORKSITE",metric, ...))
# First layer : USA Map
# Second layer : geom_point() with point alpha and size varying with metric
# Third layer : points mapping to top locations using ggrepel package
g <- ggplot(USA, aes(x=long, y=lat)) +
geom_polygon() + xlab("Longitude (deg)") + ylab("Latitude(deg)") +
geom_point(data=map_df, aes_string(x="lon", y="lat", label = "WORKSITE", alpha = metric, size = metric), color="yellow") +
geom_label_repel(data=map_df %>% filter(WORKSITE %in% top_locations),aes_string(x="lon", y="lat",label = "WORKSITE"),
fontface = 'bold', color = 'black',
box.padding = unit(0.0, "lines"),
point.padding = unit(1.0, "lines"),
segment.color = 'grey50',
force = 3) +
# Zoom into the specific location input
#coord_map(ylim = c(max(geo_coord$lat_min - 5,23), min(geo_coord$lat_max - 5,50)),xlim=c(max(geo_coord$long_min - 5,-130),min(geo_coord$long_max + 5,-65))) +
# Using the whole USA map
coord_map(ylim = c(23,50),xlim=c(-130,-65)) +
get_theme()
return(g)
}
get_theme <- function() {
# Function for ggplot2 graphics parameters
return(
theme(axis.title = element_text(size = rel(1.5)),
legend.position = "right",
legend.text = element_text(size = rel(1.5)),
legend.title = element_text(size=rel(1.5)),
axis.text.y = element_text(size=rel(1.5),face="bold"),
axis.text.x = element_text(size=rel(1.5),face="bold"))
)
}
split_first <- function(word, split = " ") {
# Function to obtain first value ina strsplit
# Inputs:
# word : word to be split
# split : split parameter to be passed to strsplit
return(strsplit(word,split= split)[[1]][1])
}
|
/Project1-ExploreVis&Shiny/H1B_Sharan/helpers.R
|
no_license
|
yunweidashuju/online_bootcamp_project
|
R
| false
| false
| 8,111
|
r
|
# Author: Sharan Naribole
# Filename: helpers.R
# H-1B Visa Petitions Dashboard web application to enable exploratory data analysis
# on H-1B Visa applications disclosure data in the period 2011-2016
require(lazyeval)
require(dplyr)
job_filter <- function(df,input_vec) {
# Function to filter only the rows from dataframe
# with Job titles provided in the inputs
# Inputs:
# df : H-1B dataset dataframe
# input_vec : vector of job types input
# Output : filtered dataframe
# If no match, returns an empty data frame
# If the inputs are all equal to "", it returns the complete dataframe
# A new column JOB_INPUT_CLASS is created to identify the Job Type
# If multiple job type inputs match with a single row in the dataframe df, the
# output contains them in different rows each with distinct JOB_INPUT_CLASS
# If input_vec is empty, return without any filtering
if(length(input_vec) == 0) {
return(df %>%
mutate(JOB_INPUT_CLASS = JOB_TITLE))
}
new_df <- data.frame()
for(value in input_vec){
new_df <- rbind(new_df, df %>%
filter(regexpr(value,JOB_TITLE,ignore.case=TRUE) != -1) %>%
mutate(JOB_INPUT_CLASS = toupper(value)))
}
return(unique(new_df))
}
employer_filter <- function(df, input_vec) {
# Function to filter only the rows in dataframe with
# Employers provided in the inputs
# Inputs:
# df : H-1B dataset dataframe
# input_vec : vector of job types input
# Output : filtered dataframe
# If no match, returns an empty data frame
# If the inputs are all equal to "", it returns the complete dataframe
# Only difference from job_filter() is that there is no new column created
if(length(input_vec) == 0) {
return(df)
}
new_df <- data.frame()
for(value in input_vec){
new_df <- rbind(new_df, df %>%
filter(regexpr(value,EMPLOYER_NAME,ignore.case=TRUE) != -1))
}
return(unique(new_df))
}
find_top <- function(df,x_feature,metric, Ntop = 3) {
# Function to find the top values in x_feature based on metric value
# Inputs:
# df : filtered dataframe from job_type, location, employer and year range inputs
# x_feature : the column in df against which the metric is plotted for e.g., EMPLOYER_NAME
# metric : metric for data comparison
# Output : list of top values in x_feature based on metric
arrange_criteria <- interp(~ desc(x), x = as.name(metric))
df %>%
group_by_(x_feature) %>%
mutate(certified =ifelse(CASE_STATUS == "CERTIFIED",1,0)) %>%
summarise(TotalApps = n(),
Wage = median(PREVAILING_WAGE),
CertiApps = sum(certified),
Share = CertiApps/850) %>%
arrange_(arrange_criteria) -> top_df
top_len <- min(dim(top_df)[1],Ntop)
return(top_df[1:top_len,1])
}
plot_input <- function(df, x_feature, fill_feature, metric,filter = FALSE, ...) {
# Function to transform the filtered dataframe to one with computed metrics
# Inputs:
# df : filtered dataframe from job_type, location, employer and year range inputs
# x_feature : the column in df against which the metric is plotted for e.g., EMPLOYER_NAME
# fill_feature : additional level of classification; for e.g., Year
# metric : metric for data comparison
# filter : logical operator that filters only the rows with x_feature value belonging to top_find() output
# Output : dataframe grouped by x_feature and fill_feature with metrics as columns
#Finding out the top across the entire range independent of the fill_feature e.g. Year
top_x <- unlist(find_top(df,x_feature,metric, ...))
# lazyeval package interp () generates expression that interprets x_feature and metric arguments
# this is fed into filter_ and arrange_ accordingly
# Source: https://cran.r-project.org/web/packages/lazyeval/vignettes/lazyeval.html
filter_criteria <- interp(~x %in% y, .values = list(x = as.name(x_feature), y = top_x))
arrange_criteria <- interp(~ desc(x), x = as.name(metric))
if(filter == TRUE) {
df %>%
filter_(filter_criteria) -> df
}
#Grouping by not just x_feature but also fill_feature
return(df %>%
group_by_(.dots=c(x_feature,fill_feature)) %>%
mutate(certified =ifelse(CASE_STATUS == "CERTIFIED",1,0)) %>%
summarise(TotalApps = n(),
CertiApps = sum(certified),
Wage = median(PREVAILING_WAGE),
Share = CertiApps/850))
}
plot_output <- function(df, x_feature,fill_feature,metric, xlabb,ylabb) {
# Function to plot output
# Inputs:
# df : dataframe output of plot_input()
# x_feature : the column in df against which the metric is plotted for e.g., EMPLOYER_NAME
# fill_feature : additional level of classification; for e.g., Year
# metric : metric for data comparison
# xlabb : x label
# ylabb : y label
# Output : ggplot object
# Prevents numbers on plot transforming into scientific notation
options(scipen = 999)
g <- ggplot(df, aes_string(x=x_feature,y=metric)) +
geom_bar(stat = "identity", aes_string(fill = fill_feature), position = "dodge") +
coord_flip() + xlab(xlabb) + ylab(ylabb) + get_theme()
return(g)
}
map_gen <- function(df,metric,USA,...) {
# Function to generate map plot for given metric in df
# This is laid on top of USA map
# Inputs:
# df : dataframe with metrics, lat, lon, WORKSITE columns
# metric : metric for data comparison
# USA : dataframe for US maps with lat, long columns. map_data(map = "usa") from ggplot2
# Output : ggplot object
# Creating Map Dataframe
df %>%
mutate(certified =ifelse(CASE_STATUS == "CERTIFIED",1,0)) %>%
group_by(WORKSITE,lat,lon) %>%
summarise(TotalApps = n(),CertiApps = sum(certified), Wage = median(PREVAILING_WAGE)) -> map_df
# # Lat-Long Limits
# df %>%
# summarise(lat_min = min(lat,na.rm=TRUE),
# lat_max = max(lat,na.rm=TRUE),
# long_min = min(lon,na.rm=TRUE),
# long_max = max(lon,na.rm=TRUE)) -> geo_coord
# Finding top Locations for metric
top_locations <- unlist(find_top(df,"WORKSITE",metric, ...))
# First layer : USA Map
# Second layer : geom_point() with point alpha and size varying with metric
# Third layer : points mapping to top locations using ggrepel package
g <- ggplot(USA, aes(x=long, y=lat)) +
geom_polygon() + xlab("Longitude (deg)") + ylab("Latitude(deg)") +
geom_point(data=map_df, aes_string(x="lon", y="lat", label = "WORKSITE", alpha = metric, size = metric), color="yellow") +
geom_label_repel(data=map_df %>% filter(WORKSITE %in% top_locations),aes_string(x="lon", y="lat",label = "WORKSITE"),
fontface = 'bold', color = 'black',
box.padding = unit(0.0, "lines"),
point.padding = unit(1.0, "lines"),
segment.color = 'grey50',
force = 3) +
# Zoom into the specific location input
#coord_map(ylim = c(max(geo_coord$lat_min - 5,23), min(geo_coord$lat_max - 5,50)),xlim=c(max(geo_coord$long_min - 5,-130),min(geo_coord$long_max + 5,-65))) +
# Using the whole USA map
coord_map(ylim = c(23,50),xlim=c(-130,-65)) +
get_theme()
return(g)
}
get_theme <- function() {
# Function for ggplot2 graphics parameters
return(
theme(axis.title = element_text(size = rel(1.5)),
legend.position = "right",
legend.text = element_text(size = rel(1.5)),
legend.title = element_text(size=rel(1.5)),
axis.text.y = element_text(size=rel(1.5),face="bold"),
axis.text.x = element_text(size=rel(1.5),face="bold"))
)
}
split_first <- function(word, split = " ") {
# Function to obtain first value ina strsplit
# Inputs:
# word : word to be split
# split : split parameter to be passed to strsplit
return(strsplit(word,split= split)[[1]][1])
}
|
model <- function(t, state, parms) {
with(as.list(c(state,parms)), {
dFIR <- rFIR*FIR*(1-FIR/K1) - p * FIR * MOOSE;
dMOOSE <- rMOOSE*MOOSE*(1-MOOSE/K2) - e*MOOSE*WOLF/(h+MOOSE);
dWOLF <- c*e*MOOSE*WOLF/(h+MOOSE) - deathWOLF*WOLF;
return(list(c(dFIR, dMOOSE, dWOLF)))
})
}
p <- c(rFIR=10,rMOOSE=0.5,K1=20,K2=5.6,c=1/15,e=10,h=4,deathWOLF=1/6,p=1.8,z=0.7,deathMOOSE = 1/10)
s <- c(FIR=13,MOOSE=1.68,WOLF=0.1)
run(tmax=50,tstep=0.1,after="
if(t == 12) {
state[\"WOLF\"] = 0.2 * state[\"WOLF\"];
parms[\"deathWOLF\"] = 1/3; parms[\"c\"] = 1/30;
};
if(t == 26) {
state[\"MOOSE\"] = 0.4 * state[\"MOOSE\"];
};
if(t == 28) {
parms[\"deathWOLF\"] = 1/6;
state[\"WOLF\"] = state[\"WOLF\"] + 1 / 535;
parms[\"c\"] = 1/15
}"
,ymax=1,xlab = "Time (years)", ylab="Density per km2")
plane(xmax=20,ymax=10,eps=-0.01);f<-run(traject=T)
newton(c(FIR=10,MOOSE=3.605551,WOLF=0),plot=T)
#Heb de elk en beer per km2 voor wolven ingevuld in de starthoeveelheden.
#FIRen komen maar op 0.4 tot 0.8 % van het park voor. 0.6 x 1554 = 9.324 km2 Nu alleen nog schatten hoeveel bomen per km2.
# Kunnen we FIRen op de een of andere manier scalen? Maakt de absolute hoeveelheid uit voor de dinamiek?
# Heb de kill rate aangepast want die kan niet boven de 1 zijn.
# Heb de saturatieconstate van de beer gezet op iets meer dan de helft van de elk. Dus bij onze begin hoeveelheden zou de groei dus meer dan half maximaal zijn
# De start hoeveelheden elk en beer en wolf komen letterlijk uit de literatuur.
# De c's e's en h's heb ik uitgezocht wat een beetje werkt, terwijl ik enigszins realistisch probeerde te blijven. De hoge e en hoge c's zijn misschien wel een probleem. Even over nadenken.
# life expectancy wolves: https://www.yellowstonepark.com/things-to-do/wildlife/wolves
# life expectancy black bears, grizzly bear respectively: https://www.nps.gov/yell/learn/nature/black-bear.htm https://www.nps.gov/yell/learn/yellowstone-grizzly-bear-facts.htm
# life expectancy willow: https://nl.wikipedia.org/wiki/FIR#KnotFIR <- TODO not sure about this one
# De Elk birth rate is vooral afhankelijk van de hoeveelheid beschikbare vegetatie en de death rate van de hoeveelheid Elk. Elk Population Processes in Yellowstone National Park Under the Policy of Natural Regulation Michael B. Coughenour and Francis J. Singer Ecological Applications Vol. 6, No. 2 (May, 1996), pp. 573-593
# De Carrying capacity van elk zonder de aanwezigheid van wolven in de Northern Range was rond de 16400 +-2500 afhankelijk van de hoeveelheid consumeerbare vegetatie.Elk Population Processes in Yellowstone National Park Under the Policy of Natural Regulation Michael B. Coughenour and Francis J. Singer Ecological Applications Vol. 6, No. 2 (May, 1996), pp. 573-59
# In 1995 werden wolven geintroduceerd. In 2004 had het aantal elk afgenomen tot 8335 in de Northern Range. In deze periode word de jacht geschat op 27 +- 5% per jaar.In 2003 doden wolven zo'n 1000 Elk per jaar. Dat is meer dan er gejaagd werd. De wolvenpopulatie groeide nog steeds op dit punt.Northern Yellowstone Elk after Wolf Restoration P. J. White and Robert A. Garrott Wildlife Society Bulletin (1973-2006) Vol. 33, No. 3 (Autumn, 2005), pp. 942-955
# 14 wolves werden geintroducedeerd in 1995, 17 in 1996.Nog 10 in 1997. In totaal dus 41. Er werden geen complete packs in gezet. Kunnen wij zorgen dat er precies die hoeveelheden wolven bijkomen in ons model op dat moment? https://www.nps.gov/yell/learn/nature/wolves.htm
# Willow komt voor op ongeveer 0.4 tot 0.8 van yellowstone's Northern Range. Hiermee kunnen wij k uitrekenen als wij ook de hoeveelheid willows per m2 weten en de totale m2 van de northern range. Willow on Yellowstone's Northern Range: Evidence for a Trophic Cascade? Hawthorne L. Beyer, Evelyn H. Merrill, Nathan Varley and Mark S. Boyce Ecological Applications Vol. 17, No. 6 (Sep., 2007), pp. 1563-1571
# De Northern Range van yellowstone is 1553.99287 km2
# Ik kan geen goede wetenschappelijke bron vinden maar ik schat zo'n 500000 tot 1 miljoen FIRen per km2. Ik weet dat dit belachelijk hoog lijkt maar is echt zo.Grootste deel is kleine zaadlingen. Zoek aub een bron.
# We moeten p schatten zodat er zonder wolven vrijwel geen groei van FIRen plaatsvind maar met wolven een beetje.
# Denk niet dat we die conversiefactor van elk naar beren uit de literatuur gaan halen, dus laten we maar een redelijke schatting maken.
# The introduction of wolves has had a positive effect on the amount of bears. Should we simulate that? Impact of Wolf Reintroduction on Bison and Grizzly Bear Populations and Trophic Cascades in Yellowstone National Park Galina Lipkin Department of Biology Lake Forest College Lake Forest, Illinois
# Tegen de tijd dat wolven geintroduceerd werden was de beren populatie al sterk gegroeid tot zo'n 760 grizzly's en 550 zwarte beren. Hamlin et al. 2009, Barber-Meyer et al. 2008
# Heb kill rates e1 en e2 toegevoegd.
|
/IsleRoyale2.R
|
no_license
|
PieterKnops/BiologicalModelling
|
R
| false
| false
| 4,957
|
r
|
model <- function(t, state, parms) {
with(as.list(c(state,parms)), {
dFIR <- rFIR*FIR*(1-FIR/K1) - p * FIR * MOOSE;
dMOOSE <- rMOOSE*MOOSE*(1-MOOSE/K2) - e*MOOSE*WOLF/(h+MOOSE);
dWOLF <- c*e*MOOSE*WOLF/(h+MOOSE) - deathWOLF*WOLF;
return(list(c(dFIR, dMOOSE, dWOLF)))
})
}
p <- c(rFIR=10,rMOOSE=0.5,K1=20,K2=5.6,c=1/15,e=10,h=4,deathWOLF=1/6,p=1.8,z=0.7,deathMOOSE = 1/10)
s <- c(FIR=13,MOOSE=1.68,WOLF=0.1)
run(tmax=50,tstep=0.1,after="
if(t == 12) {
state[\"WOLF\"] = 0.2 * state[\"WOLF\"];
parms[\"deathWOLF\"] = 1/3; parms[\"c\"] = 1/30;
};
if(t == 26) {
state[\"MOOSE\"] = 0.4 * state[\"MOOSE\"];
};
if(t == 28) {
parms[\"deathWOLF\"] = 1/6;
state[\"WOLF\"] = state[\"WOLF\"] + 1 / 535;
parms[\"c\"] = 1/15
}"
,ymax=1,xlab = "Time (years)", ylab="Density per km2")
plane(xmax=20,ymax=10,eps=-0.01);f<-run(traject=T)
newton(c(FIR=10,MOOSE=3.605551,WOLF=0),plot=T)
#Heb de elk en beer per km2 voor wolven ingevuld in de starthoeveelheden.
#FIRen komen maar op 0.4 tot 0.8 % van het park voor. 0.6 x 1554 = 9.324 km2 Nu alleen nog schatten hoeveel bomen per km2.
# Kunnen we FIRen op de een of andere manier scalen? Maakt de absolute hoeveelheid uit voor de dinamiek?
# Heb de kill rate aangepast want die kan niet boven de 1 zijn.
# Heb de saturatieconstate van de beer gezet op iets meer dan de helft van de elk. Dus bij onze begin hoeveelheden zou de groei dus meer dan half maximaal zijn
# De start hoeveelheden elk en beer en wolf komen letterlijk uit de literatuur.
# De c's e's en h's heb ik uitgezocht wat een beetje werkt, terwijl ik enigszins realistisch probeerde te blijven. De hoge e en hoge c's zijn misschien wel een probleem. Even over nadenken.
# life expectancy wolves: https://www.yellowstonepark.com/things-to-do/wildlife/wolves
# life expectancy black bears, grizzly bear respectively: https://www.nps.gov/yell/learn/nature/black-bear.htm https://www.nps.gov/yell/learn/yellowstone-grizzly-bear-facts.htm
# life expectancy willow: https://nl.wikipedia.org/wiki/FIR#KnotFIR <- TODO not sure about this one
# De Elk birth rate is vooral afhankelijk van de hoeveelheid beschikbare vegetatie en de death rate van de hoeveelheid Elk. Elk Population Processes in Yellowstone National Park Under the Policy of Natural Regulation Michael B. Coughenour and Francis J. Singer Ecological Applications Vol. 6, No. 2 (May, 1996), pp. 573-593
# De Carrying capacity van elk zonder de aanwezigheid van wolven in de Northern Range was rond de 16400 +-2500 afhankelijk van de hoeveelheid consumeerbare vegetatie.Elk Population Processes in Yellowstone National Park Under the Policy of Natural Regulation Michael B. Coughenour and Francis J. Singer Ecological Applications Vol. 6, No. 2 (May, 1996), pp. 573-59
# In 1995 werden wolven geintroduceerd. In 2004 had het aantal elk afgenomen tot 8335 in de Northern Range. In deze periode word de jacht geschat op 27 +- 5% per jaar.In 2003 doden wolven zo'n 1000 Elk per jaar. Dat is meer dan er gejaagd werd. De wolvenpopulatie groeide nog steeds op dit punt.Northern Yellowstone Elk after Wolf Restoration P. J. White and Robert A. Garrott Wildlife Society Bulletin (1973-2006) Vol. 33, No. 3 (Autumn, 2005), pp. 942-955
# 14 wolves werden geintroducedeerd in 1995, 17 in 1996.Nog 10 in 1997. In totaal dus 41. Er werden geen complete packs in gezet. Kunnen wij zorgen dat er precies die hoeveelheden wolven bijkomen in ons model op dat moment? https://www.nps.gov/yell/learn/nature/wolves.htm
# Willow komt voor op ongeveer 0.4 tot 0.8 van yellowstone's Northern Range. Hiermee kunnen wij k uitrekenen als wij ook de hoeveelheid willows per m2 weten en de totale m2 van de northern range. Willow on Yellowstone's Northern Range: Evidence for a Trophic Cascade? Hawthorne L. Beyer, Evelyn H. Merrill, Nathan Varley and Mark S. Boyce Ecological Applications Vol. 17, No. 6 (Sep., 2007), pp. 1563-1571
# De Northern Range van yellowstone is 1553.99287 km2
# Ik kan geen goede wetenschappelijke bron vinden maar ik schat zo'n 500000 tot 1 miljoen FIRen per km2. Ik weet dat dit belachelijk hoog lijkt maar is echt zo.Grootste deel is kleine zaadlingen. Zoek aub een bron.
# We moeten p schatten zodat er zonder wolven vrijwel geen groei van FIRen plaatsvind maar met wolven een beetje.
# Denk niet dat we die conversiefactor van elk naar beren uit de literatuur gaan halen, dus laten we maar een redelijke schatting maken.
# The introduction of wolves has had a positive effect on the amount of bears. Should we simulate that? Impact of Wolf Reintroduction on Bison and Grizzly Bear Populations and Trophic Cascades in Yellowstone National Park Galina Lipkin Department of Biology Lake Forest College Lake Forest, Illinois
# Tegen de tijd dat wolven geintroduceerd werden was de beren populatie al sterk gegroeid tot zo'n 760 grizzly's en 550 zwarte beren. Hamlin et al. 2009, Barber-Meyer et al. 2008
# Heb kill rates e1 en e2 toegevoegd.
|
source('./process_data.R')
source('./plotting.R')
par(mfrow=c(2,3))
par(mar=c(4.5,5,3,1))
setwd('../data/nissle')
address=getwd()
data<-ProcessData(address)
medians<-GetMedians(data)
groups<-c('nissle','og241','oxb19','patol','loxb19','lpatol')
letters<-c('A)','B)','C)','D)','E)','F)')
count=1
for (item in groups){
group=item
if (item=='oxb19' || item=='loxb19'){range<-c(3,6,0,3)}
else {range<-c(1.5,5,0,3)}
PlotHist(data,group,'acetoacetate','mM',range,'none',c('black','red'))
mds=medians[medians$group==group & medians$level==0,]$median
points(mean(log10(mds)),2.5,col='grey',pch=20,cex=1.5)
segments(mean(log10(mds))-sd(log10(mds)),2.5,mean(log10(mds))+sd(log10(mds)),2.5,col='grey')
epsilon<-0.1
segments(mean(log10(mds))-sd(log10(mds)),2.5-epsilon,mean(log10(mds))-sd(log10(mds)),2.5+epsilon,col='grey')
segments(mean(log10(mds))+sd(log10(mds)),2.5-epsilon,mean(log10(mds))+sd(log10(mds)),2.5+epsilon,col='grey')
mds=medians[medians$group==group & medians$level==10,]$median
points(mean(log10(mds)),2,col='pink',pch=20,cex=1.5)
segments(mean(log10(mds))-sd(log10(mds)),2,mean(log10(mds))+sd(log10(mds)),2,col='pink')
epsilon<-0.1
segments(mean(log10(mds))-sd(log10(mds)),2-epsilon,mean(log10(mds))-sd(log10(mds)),2+epsilon,col='pink')
segments(mean(log10(mds))+sd(log10(mds)),2-epsilon,mean(log10(mds))+sd(log10(mds)),2+epsilon,col='pink')
par(new=FALSE)
par(adj=0)
title(letters[count],cex.main=1.5)
par(adj=0.5)
count=count+1
if (count==2){legend('topright',1,c('0mM AcAc','10mM AcAc'),col=c('black','red'),box.lty=0,bty='n',lty=c(1,1),lwd=2)}
}
|
/R/figure_S9.R
|
no_license
|
davidtgonzales/Bayesian-Fitting
|
R
| false
| false
| 1,604
|
r
|
source('./process_data.R')
source('./plotting.R')
par(mfrow=c(2,3))
par(mar=c(4.5,5,3,1))
setwd('../data/nissle')
address=getwd()
data<-ProcessData(address)
medians<-GetMedians(data)
groups<-c('nissle','og241','oxb19','patol','loxb19','lpatol')
letters<-c('A)','B)','C)','D)','E)','F)')
count=1
for (item in groups){
group=item
if (item=='oxb19' || item=='loxb19'){range<-c(3,6,0,3)}
else {range<-c(1.5,5,0,3)}
PlotHist(data,group,'acetoacetate','mM',range,'none',c('black','red'))
mds=medians[medians$group==group & medians$level==0,]$median
points(mean(log10(mds)),2.5,col='grey',pch=20,cex=1.5)
segments(mean(log10(mds))-sd(log10(mds)),2.5,mean(log10(mds))+sd(log10(mds)),2.5,col='grey')
epsilon<-0.1
segments(mean(log10(mds))-sd(log10(mds)),2.5-epsilon,mean(log10(mds))-sd(log10(mds)),2.5+epsilon,col='grey')
segments(mean(log10(mds))+sd(log10(mds)),2.5-epsilon,mean(log10(mds))+sd(log10(mds)),2.5+epsilon,col='grey')
mds=medians[medians$group==group & medians$level==10,]$median
points(mean(log10(mds)),2,col='pink',pch=20,cex=1.5)
segments(mean(log10(mds))-sd(log10(mds)),2,mean(log10(mds))+sd(log10(mds)),2,col='pink')
epsilon<-0.1
segments(mean(log10(mds))-sd(log10(mds)),2-epsilon,mean(log10(mds))-sd(log10(mds)),2+epsilon,col='pink')
segments(mean(log10(mds))+sd(log10(mds)),2-epsilon,mean(log10(mds))+sd(log10(mds)),2+epsilon,col='pink')
par(new=FALSE)
par(adj=0)
title(letters[count],cex.main=1.5)
par(adj=0.5)
count=count+1
if (count==2){legend('topright',1,c('0mM AcAc','10mM AcAc'),col=c('black','red'),box.lty=0,bty='n',lty=c(1,1),lwd=2)}
}
|
\name{svystdres}
\alias{svystdres}
\title{
Standardized residuals for models fitted with complex survey data
}
\description{
Compute standardized residuals for fixed effects, linear regression models fitted with data collected from one- and two-stage complex survey designs.
}
\usage{
svystdres(mobj, stvar=NULL, clvar=NULL, doplot=FALSE)
}
\arguments{
\item{mobj}{
model object produced by \code{svyglm} in the \code{survey} package
}
\item{stvar}{
name of the stratification variable in the \code{svydesign} object used to fit the model
}
\item{clvar}{
name of the cluster variable in the \code{svydesign} object used to fit the model}
\item{doplot}{
if \code{TRUE}, plot the standardized residuals vs. their sequence number in data set. Reference lines are drawn at +/-3}
}
\details{
\code{svystdres} computes the standardized residuals, i.e., the residuals divided by an estimate of the model standard deviation of the residuals. Residuals are used from a model object created by \code{svyglm} in the R \code{survey} package. The output is a vector of the standardized residuals and a scatterplot of them versus the sequence number of the sample element used in fitting the model. By default, \code{svyglm} uses only complete cases (i.e., ones for which the dependent variable and all independent variables are non-missing) to fit the model. The rows of the data frame used in fitting the model can be retrieved from the \code{svyglm} object via \code{as.numeric(names(mobj$y))}. The data for those rows is in \code{mobj$data}.
}
\value{
List object with values:
\item{stdresids}{Numeric vector whose names are the rows of the data frame in the \code{svydesign} object that were used in fitting the model}
\item{n}{number of sample clusters}
\item{mbar}{average number of non-missing, sample elements per cluster}
\item{rtsighat}{estimate of the square root of the model variance of the residuals, \eqn{\sqrt(\sigma^2)}}
\item{rhohat}{estimate of the intracluster correlation of the residuals, \eqn{\rho}}
}
\references{
Li, J., and Valliant, R. (2011). Linear regression diagnostics for unclustered survey data. \emph{Journal of Official Statistics}, 27, 99-119.
Li, J., and Valliant, R. (2015). Linear regression diagnostics in cluster samples. \emph{Journal of Official Statistics}, 31, 61-75.
Lumley, T. (2010). \emph{Complex Surveys}. New York: John Wiley & Sons.
Lumley, T. (2014). survey: analysis of complex survey samples. R package version 3.30.
}
\author{
Richard Valliant
}
\seealso{
\code{\link{svyhat}}, \code{\link{svyCooksD}}
}
\examples{
require(survey)
data(api)
# unstratified design single stage design
d0 <- svydesign(id=~1,strata=NULL, weights=~pw, data=apistrat)
m0 <- svyglm(api00 ~ ell + meals + mobility, design=d0)
svystdres(mobj=m0, stvar=NULL, clvar=NULL)
# stratified cluster design
require(NHANES)
data(NHANESraw)
dnhanes <- svydesign(id=~SDMVPSU, strata=~SDMVSTRA, weights=~WTINT2YR, nest=TRUE, data=NHANESraw)
m1 <- svyglm(BPDiaAve ~ as.factor(Race1) + BMI + AlcoholYear, design = dnhanes)
svystdres(mobj=m1, stvar= "SDMVSTRA", clvar="SDMVPSU")
}
\keyword{methods}
\keyword{survey}
|
/man/svystdres.Rd
|
no_license
|
nicholaskarlson/svydiags
|
R
| false
| false
| 3,235
|
rd
|
\name{svystdres}
\alias{svystdres}
\title{
Standardized residuals for models fitted with complex survey data
}
\description{
Compute standardized residuals for fixed effects, linear regression models fitted with data collected from one- and two-stage complex survey designs.
}
\usage{
svystdres(mobj, stvar=NULL, clvar=NULL, doplot=FALSE)
}
\arguments{
\item{mobj}{
model object produced by \code{svyglm} in the \code{survey} package
}
\item{stvar}{
name of the stratification variable in the \code{svydesign} object used to fit the model
}
\item{clvar}{
name of the cluster variable in the \code{svydesign} object used to fit the model}
\item{doplot}{
if \code{TRUE}, plot the standardized residuals vs. their sequence number in data set. Reference lines are drawn at +/-3}
}
\details{
\code{svystdres} computes the standardized residuals, i.e., the residuals divided by an estimate of the model standard deviation of the residuals. Residuals are used from a model object created by \code{svyglm} in the R \code{survey} package. The output is a vector of the standardized residuals and a scatterplot of them versus the sequence number of the sample element used in fitting the model. By default, \code{svyglm} uses only complete cases (i.e., ones for which the dependent variable and all independent variables are non-missing) to fit the model. The rows of the data frame used in fitting the model can be retrieved from the \code{svyglm} object via \code{as.numeric(names(mobj$y))}. The data for those rows is in \code{mobj$data}.
}
\value{
List object with values:
\item{stdresids}{Numeric vector whose names are the rows of the data frame in the \code{svydesign} object that were used in fitting the model}
\item{n}{number of sample clusters}
\item{mbar}{average number of non-missing, sample elements per cluster}
\item{rtsighat}{estimate of the square root of the model variance of the residuals, \eqn{\sqrt(\sigma^2)}}
\item{rhohat}{estimate of the intracluster correlation of the residuals, \eqn{\rho}}
}
\references{
Li, J., and Valliant, R. (2011). Linear regression diagnostics for unclustered survey data. \emph{Journal of Official Statistics}, 27, 99-119.
Li, J., and Valliant, R. (2015). Linear regression diagnostics in cluster samples. \emph{Journal of Official Statistics}, 31, 61-75.
Lumley, T. (2010). \emph{Complex Surveys}. New York: John Wiley & Sons.
Lumley, T. (2014). survey: analysis of complex survey samples. R package version 3.30.
}
\author{
Richard Valliant
}
\seealso{
\code{\link{svyhat}}, \code{\link{svyCooksD}}
}
\examples{
require(survey)
data(api)
# unstratified design single stage design
d0 <- svydesign(id=~1,strata=NULL, weights=~pw, data=apistrat)
m0 <- svyglm(api00 ~ ell + meals + mobility, design=d0)
svystdres(mobj=m0, stvar=NULL, clvar=NULL)
# stratified cluster design
require(NHANES)
data(NHANESraw)
dnhanes <- svydesign(id=~SDMVPSU, strata=~SDMVSTRA, weights=~WTINT2YR, nest=TRUE, data=NHANESraw)
m1 <- svyglm(BPDiaAve ~ as.factor(Race1) + BMI + AlcoholYear, design = dnhanes)
svystdres(mobj=m1, stvar= "SDMVSTRA", clvar="SDMVPSU")
}
\keyword{methods}
\keyword{survey}
|
#library(jsonlite)
library(dplyr)
library(stringr)
library(ggplot2)
library(magrittr)
# Question 1
edu_103_full <- read.csv("/Users/macbook/Desktop/三下/107bigdatacguimhw1-jason19970210/Input/103年各教育程度別初任人員經常性薪資─按大職類分.csv",stringsAsFactors = F)
edu_104_full <- read.csv("/Users/macbook/Desktop/三下/107bigdatacguimhw1-jason19970210/Input/104年各教育程度別初任人員經常性薪資─按大職類分.csv",stringsAsFactors = F)
edu_105_full <- read.csv("/Users/macbook/Desktop/三下/107bigdatacguimhw1-jason19970210/Input/105年各教育程度別初任人員每人每月經常性薪資─按大職類分.csv",stringsAsFactors = F)
edu_106_full <- read.csv("/Users/macbook/Desktop/三下/107bigdatacguimhw1-jason19970210/Input/106年各教育程度別初任人員每人每月經常性薪資─按大職類分.csv",stringsAsFactors = F)
# 大學薪資
join_103_106 <- inner_join(edu_103_full, edu_106_full, by="大職業別")
#取`大學薪資`欄位
join_103_106$大學.薪資.x <- as.numeric(join_103_106$大學.薪資.x)
join_103_106$大學.薪資.y <- as.numeric(join_103_106$大學.薪資.y)
join_103_106$percent <- (join_103_106$大學.薪資.y / join_103_106$大學.薪資.x)
join_103_106 %>% arrange(. ,desc(percent)) %>% filter(. ,percent > 1) %>% select(.,2)
join_103_106 %>% arrange(. ,desc(percent)) %>% filter(. ,percent > 1.05) %>% select(.,2)
# join_103_106 <- arrange(join_103_106,desc(percent))
# filter_105 <- filter(join_103_106 , percent > 1.05)
# head(filter_105, 10)
career <- as.character(filter_105$大職業別) # 提高超過5%的的職業有哪些(5分)
tmp <- strsplit(career, "-") # by library(stringr) export `list` type
#tmp <- unlist(tmp, use.names=F)
#tmp[[1]][1]
# unlist `tmp`
# lapply
table <- table(unlist(lapply(tmp, "[", 1)))
df <- data.frame(table)
# count
#grep("不動產",tmp)
#sum(str_count(join_103_106$大職業別,"-")) # `-` 數量
# Question 2
# How to create a data frame
# https://dzone.com/articles/learn-r-how-create-data-frames
df_103 <- edu_103_full[,c(2,12)]
df_104 <- edu_104_full[,c(2,12)]
df_105 <- edu_105_full[,c(2,12)]
df_106 <- edu_106_full[,c(2,12)]
joined_df_103_to_106 <- inner_join(df_103, df_104, by="大職業別")
joined_df_103_to_106 <- inner_join(joined_df_103_to_106, df_105, by="大職業別")
joined_df_103_to_106 <- inner_join(joined_df_103_to_106, df_106, by="大職業別")
# ===
# re-order the data frame
# example : `data <- data[c(1,3,2)]`
#joined_df_103_to_106 <- joined_df_103_to_106[c(2,1,3,4,5,6,7,8,9)]
# rename column names
# http://rprogramming.net/rename-columns-in-r/
# names(data) <- c("new_name", "another_new_name")
names(joined_df_103_to_106) <- c("大職業別","College_f_m_103","College_f_m_104","College_f_m_105","College_f_m_106")
#joined_df_103_to_106[2:5] <- as.numeric(joined_df_103_to_106[2:5])
joined_df_103_to_106$College_f_m_103 <- as.numeric(joined_df_103_to_106$College_f_m_103)
joined_df_103_to_106$College_f_m_104 <- as.numeric(joined_df_103_to_106$College_f_m_104)
joined_df_103_to_106$College_f_m_105 <- as.numeric(joined_df_103_to_106$College_f_m_105)
joined_df_103_to_106$College_f_m_106 <- as.numeric(joined_df_103_to_106$College_f_m_106)
# replace `NA` to `0`
# https://bbs.pinggu.org/thread-3589221-1-1.html
#dat[is.na(dat)] <- 0
joined_df_103_to_106[is.na(joined_df_103_to_106)] <- 0
# order the table (女生>男生) (由大至小)
joined_df_103_to_106 %>% arrange(. ,desc(College_f_m_103)) %>% filter(. ,College_f_m_103 > 0) %>% head(10)
joined_df_103_to_106 <- arrange(joined_df_103_to_106,desc(College_f_m_103))
head(joined_df_103_to_106,10)
# 103 年度:礦業及土石採取業-技術員及助理專業人員(100), 用水供應及污染整治業-服務及銷售工作人員(100), 營造業-服務及銷售工作人員(100)
joined_df_103_to_106 <- arrange(joined_df_103_to_106,desc(College_f_m_104))
head(joined_df_103_to_106,10)
# 104 年度:專業_科學及技術服務業-技藝_機械設備操作及組裝人員(100.26), 用水供應及污染整治業-服務及銷售工作人員(100),
# 醫療保健服務業-服務及銷售工作人員(100), 其他服務業-專業人員(100), 不動產業-技藝_機械設備操作及組裝人員(100)
joined_df_103_to_106 <- arrange(joined_df_103_to_106,desc(College_f_m_105))
head(joined_df_103_to_106,10)
# 105年度:金融及保險業-專業人員(100.11), 用水供應及污染整治業-服務及銷售工作人員(100), 醫療保健服務業-技藝_機械設備操作及組裝人員(100),
# 藝術_娛樂及休閒服務業-技術員及助理專業人員(100), 教育服務業-服務及銷售工作人員(100), 礦業及土石採取業-服務及銷售工作人員(100)
joined_df_103_to_106 <- arrange(joined_df_103_to_106,desc(College_f_m_106))
head(joined_df_103_to_106,10)
# 106年度:**資訊及通訊傳播業-服務及銷售工作人員(100.33)**, 用水供應及污染整治業-服務及銷售工作人員(100), 金融及保險業-技藝_機械設備操作及組裝人員(100),
# 專業_科學及技術服務業-技藝_機械設備操作及組裝人員(100), 不動產業-專業人員(100), 礦業及土石採取業-技術員及助理專業人員(100),
# 不動產業-服務及銷售工作人員(100), 資訊及通訊傳播業-技藝_機械設備操作及組裝人員(100), 不動產業-技藝_機械設備操作及組裝人員(100)
# order the table (男生>女生) (由小至大)
joined_df_103_to_106 %>% filter(., College_f_m_103 > 0 & College_f_m_104 > 0, College_f_m_105 > 0, College_f_m_106 > 0,
College_f_m_103 < 100 & College_f_m_104 < 100, College_f_m_105 < 100, College_f_m_106 < 100)
joined_df_103_to_106 %>% arrange(. ,College_f_m_103) %>% filter(. ,College_f_m_103 > 0) %>% head(10)
joined_df_103_to_106 %>% arrange(. ,College_f_m_104) %>% filter(. ,College_f_m_104 > 0) %>% head(10)
joined_df_103_to_106 %>% arrange(. ,College_f_m_105) %>% filter(. ,College_f_m_105 > 0) %>% head(10)
joined_df_103_to_106 %>% arrange(. ,College_f_m_106) %>% filter(. ,College_f_m_106 > 0) %>% head(10)
# Output
# 103年度:礦業及土石採取業-技藝_機械設備操作及組裝人員(84.97), 教育服務業-技藝_機械設備操作及組裝人員(88.49), 其他服務業-技術員及助理專業人員(89.36)
# 104年度:電力及燃氣供應業-技藝_機械設備操作及組裝人員(91.69), 教育服務業-服務及銷售工作人員(91.90), 礦業及土石採取業-技術員及助理專業人員(92.42)
# 105年度:不動產業-技藝_機械設備操作及組裝人員(91.38), 醫療保健服務業-專業人員(94.98), 用水供應及污染整治業-事務支援人員(95.04)
# 106年度:電力及燃氣供應業-技藝_機械設備操作及組裝人員(95.51), 營造業-服務及銷售工作人員(95.93), 其他服務業-事務支援人員(96.23)
#joined_df_103_to_106 <- arrange(joined_df_103_to_106,College_f_m_103)
# head(joined_df_103_to_106
# [joined_df_103_to_106$College_f_m_103 > 0,]
# , 10)
#head(filter(joined_df_103_to_106 , College_f_m_103 > 0), 10)
#joined_df_103_to_106 <- arrange(joined_df_103_to_106,College_f_m_104)
#head(filter(joined_df_103_to_106 , College_f_m_104 > 0), 10)
#joined_df_103_to_106 <- arrange(joined_df_103_to_106,College_f_m_105)
#head(filter(joined_df_103_to_106 , College_f_m_105 > 0), 10)
#joined_df_103_to_106 <- arrange(joined_df_103_to_106,College_f_m_106)
#head(filter(joined_df_103_to_106 , College_f_m_106 > 0), 10)
# drawing plots
# https://zhuanlan.zhihu.com/p/30706019
# ggplot(data = df, mapping = aes(x = factor(Year), y = Weight, group = 1)) + geom_line() + xlab('Year')
ggplot(data = joined_df_103_to_106, mapping=aes(x="大職業別",y=College_f_m_103))+geom_line()
# Question 3
# process the data
df_1_103 <- edu_103_full[,c(2,11,13)]
df_1_104 <- edu_104_full[,c(2,11,13)]
df_1_105 <- edu_105_full[,c(2,11,13)]
df_1_106 <- edu_106_full[,c(2,11,13)]
joined_df_1_103_to_106 <- inner_join(df_1_103, df_1_104, by="大職業別")
joined_df_1_103_to_106 <- inner_join(joined_df_1_103_to_106, df_1_105, by="大職業別")
joined_df_1_103_to_106 <- inner_join(joined_df_1_103_to_106, df_1_106, by="大職業別")
names(joined_df_1_103_to_106) <- c("大職業別","College103","Graduate103","College104","Graduate104","College105","Graduate105","College106","Graduate106")
#dim(joined_df_1_103_to_106)[2]
#output : 9
for (i in 2:dim(joined_df_1_103_to_106)[2]) {
joined_df_1_103_to_106[,i] <- as.numeric(joined_df_1_103_to_106[,i])
joined_df_1_103_to_106[,i][is.na(joined_df_1_103_to_106[,i])] <- 0
}
joined_df_1_103_to_106$devide103 <- round(joined_df_1_103_to_106$Graduate103 / joined_df_1_103_to_106$College103, 2)
joined_df_1_103_to_106$devide104 <- round(joined_df_1_103_to_106$Graduate104 / joined_df_1_103_to_106$College104, 2)
joined_df_1_103_to_106$devide105 <- round(joined_df_1_103_to_106$Graduate105 / joined_df_1_103_to_106$College105, 2)
joined_df_1_103_to_106$devide106 <- round(joined_df_1_103_to_106$Graduate106 / joined_df_1_103_to_106$College106, 2)
# Process the value `NaN` cause by 0 / 0
for (i in 2:dim(joined_df_1_103_to_106)[2]) {
joined_df_1_103_to_106[,i][is.na(joined_df_1_103_to_106[,i])] <- 0
}
# get the data which is by year 106
joined_df_1_103_to_106 %>% arrange(. ,desc(devide106)) %>% filter(. ,devide106 > 0) %>% select("大職業別","devide106") %>%head(10)
# 大職業別 devide106
# 礦業及土石採取業-事務支援人員 1.208946
# 專業_科學及技術服務業 1.202982
# 其他服務業-技術員及助理專業人員 1.199470
# 專業_科學及技術服務業-事務支援人員 1.192306
# 批發及零售業 1.191916
# 製造業 1.188350
# 藝術_娛樂及休閒服務業-事務支援人員 1.187705
# 工業部門 1.183455
# 工業及服務業部門 1.182345
# 服務業部門 1.181334
# Question 4
#資訊及通訊傳播業-專業人員
#工業及服務業部門-專業人員
#工業部門-專業人員
#製造業-專業人員
#joined_df_1_103_to_106[c(2,9,23,79),]
job <- joined_df_1_103_to_106[c(2,9,23,79),c(1,8,9,13)]
job$minus106 <- (job$Graduate106 - job$College106)
job[1:4,c(1,5)]
|
/DataAnalysis.R
|
no_license
|
CGUIM-BigDataAnalysis/107bigdatacguimhw1-jason19970210
|
R
| false
| false
| 10,324
|
r
|
#library(jsonlite)
library(dplyr)
library(stringr)
library(ggplot2)
library(magrittr)
# Question 1
edu_103_full <- read.csv("/Users/macbook/Desktop/三下/107bigdatacguimhw1-jason19970210/Input/103年各教育程度別初任人員經常性薪資─按大職類分.csv",stringsAsFactors = F)
edu_104_full <- read.csv("/Users/macbook/Desktop/三下/107bigdatacguimhw1-jason19970210/Input/104年各教育程度別初任人員經常性薪資─按大職類分.csv",stringsAsFactors = F)
edu_105_full <- read.csv("/Users/macbook/Desktop/三下/107bigdatacguimhw1-jason19970210/Input/105年各教育程度別初任人員每人每月經常性薪資─按大職類分.csv",stringsAsFactors = F)
edu_106_full <- read.csv("/Users/macbook/Desktop/三下/107bigdatacguimhw1-jason19970210/Input/106年各教育程度別初任人員每人每月經常性薪資─按大職類分.csv",stringsAsFactors = F)
# 大學薪資
join_103_106 <- inner_join(edu_103_full, edu_106_full, by="大職業別")
#取`大學薪資`欄位
join_103_106$大學.薪資.x <- as.numeric(join_103_106$大學.薪資.x)
join_103_106$大學.薪資.y <- as.numeric(join_103_106$大學.薪資.y)
join_103_106$percent <- (join_103_106$大學.薪資.y / join_103_106$大學.薪資.x)
join_103_106 %>% arrange(. ,desc(percent)) %>% filter(. ,percent > 1) %>% select(.,2)
join_103_106 %>% arrange(. ,desc(percent)) %>% filter(. ,percent > 1.05) %>% select(.,2)
# join_103_106 <- arrange(join_103_106,desc(percent))
# filter_105 <- filter(join_103_106 , percent > 1.05)
# head(filter_105, 10)
career <- as.character(filter_105$大職業別) # 提高超過5%的的職業有哪些(5分)
tmp <- strsplit(career, "-") # by library(stringr) export `list` type
#tmp <- unlist(tmp, use.names=F)
#tmp[[1]][1]
# unlist `tmp`
# lapply
table <- table(unlist(lapply(tmp, "[", 1)))
df <- data.frame(table)
# count
#grep("不動產",tmp)
#sum(str_count(join_103_106$大職業別,"-")) # `-` 數量
# Question 2
# How to create a data frame
# https://dzone.com/articles/learn-r-how-create-data-frames
df_103 <- edu_103_full[,c(2,12)]
df_104 <- edu_104_full[,c(2,12)]
df_105 <- edu_105_full[,c(2,12)]
df_106 <- edu_106_full[,c(2,12)]
joined_df_103_to_106 <- inner_join(df_103, df_104, by="大職業別")
joined_df_103_to_106 <- inner_join(joined_df_103_to_106, df_105, by="大職業別")
joined_df_103_to_106 <- inner_join(joined_df_103_to_106, df_106, by="大職業別")
# ===
# re-order the data frame
# example : `data <- data[c(1,3,2)]`
#joined_df_103_to_106 <- joined_df_103_to_106[c(2,1,3,4,5,6,7,8,9)]
# rename column names
# http://rprogramming.net/rename-columns-in-r/
# names(data) <- c("new_name", "another_new_name")
names(joined_df_103_to_106) <- c("大職業別","College_f_m_103","College_f_m_104","College_f_m_105","College_f_m_106")
#joined_df_103_to_106[2:5] <- as.numeric(joined_df_103_to_106[2:5])
joined_df_103_to_106$College_f_m_103 <- as.numeric(joined_df_103_to_106$College_f_m_103)
joined_df_103_to_106$College_f_m_104 <- as.numeric(joined_df_103_to_106$College_f_m_104)
joined_df_103_to_106$College_f_m_105 <- as.numeric(joined_df_103_to_106$College_f_m_105)
joined_df_103_to_106$College_f_m_106 <- as.numeric(joined_df_103_to_106$College_f_m_106)
# replace `NA` to `0`
# https://bbs.pinggu.org/thread-3589221-1-1.html
#dat[is.na(dat)] <- 0
joined_df_103_to_106[is.na(joined_df_103_to_106)] <- 0
# order the table (女生>男生) (由大至小)
joined_df_103_to_106 %>% arrange(. ,desc(College_f_m_103)) %>% filter(. ,College_f_m_103 > 0) %>% head(10)
joined_df_103_to_106 <- arrange(joined_df_103_to_106,desc(College_f_m_103))
head(joined_df_103_to_106,10)
# 103 年度:礦業及土石採取業-技術員及助理專業人員(100), 用水供應及污染整治業-服務及銷售工作人員(100), 營造業-服務及銷售工作人員(100)
joined_df_103_to_106 <- arrange(joined_df_103_to_106,desc(College_f_m_104))
head(joined_df_103_to_106,10)
# 104 年度:專業_科學及技術服務業-技藝_機械設備操作及組裝人員(100.26), 用水供應及污染整治業-服務及銷售工作人員(100),
# 醫療保健服務業-服務及銷售工作人員(100), 其他服務業-專業人員(100), 不動產業-技藝_機械設備操作及組裝人員(100)
joined_df_103_to_106 <- arrange(joined_df_103_to_106,desc(College_f_m_105))
head(joined_df_103_to_106,10)
# 105年度:金融及保險業-專業人員(100.11), 用水供應及污染整治業-服務及銷售工作人員(100), 醫療保健服務業-技藝_機械設備操作及組裝人員(100),
# 藝術_娛樂及休閒服務業-技術員及助理專業人員(100), 教育服務業-服務及銷售工作人員(100), 礦業及土石採取業-服務及銷售工作人員(100)
joined_df_103_to_106 <- arrange(joined_df_103_to_106,desc(College_f_m_106))
head(joined_df_103_to_106,10)
# 106年度:**資訊及通訊傳播業-服務及銷售工作人員(100.33)**, 用水供應及污染整治業-服務及銷售工作人員(100), 金融及保險業-技藝_機械設備操作及組裝人員(100),
# 專業_科學及技術服務業-技藝_機械設備操作及組裝人員(100), 不動產業-專業人員(100), 礦業及土石採取業-技術員及助理專業人員(100),
# 不動產業-服務及銷售工作人員(100), 資訊及通訊傳播業-技藝_機械設備操作及組裝人員(100), 不動產業-技藝_機械設備操作及組裝人員(100)
# order the table (男生>女生) (由小至大)
joined_df_103_to_106 %>% filter(., College_f_m_103 > 0 & College_f_m_104 > 0, College_f_m_105 > 0, College_f_m_106 > 0,
College_f_m_103 < 100 & College_f_m_104 < 100, College_f_m_105 < 100, College_f_m_106 < 100)
joined_df_103_to_106 %>% arrange(. ,College_f_m_103) %>% filter(. ,College_f_m_103 > 0) %>% head(10)
joined_df_103_to_106 %>% arrange(. ,College_f_m_104) %>% filter(. ,College_f_m_104 > 0) %>% head(10)
joined_df_103_to_106 %>% arrange(. ,College_f_m_105) %>% filter(. ,College_f_m_105 > 0) %>% head(10)
joined_df_103_to_106 %>% arrange(. ,College_f_m_106) %>% filter(. ,College_f_m_106 > 0) %>% head(10)
# Output
# 103年度:礦業及土石採取業-技藝_機械設備操作及組裝人員(84.97), 教育服務業-技藝_機械設備操作及組裝人員(88.49), 其他服務業-技術員及助理專業人員(89.36)
# 104年度:電力及燃氣供應業-技藝_機械設備操作及組裝人員(91.69), 教育服務業-服務及銷售工作人員(91.90), 礦業及土石採取業-技術員及助理專業人員(92.42)
# 105年度:不動產業-技藝_機械設備操作及組裝人員(91.38), 醫療保健服務業-專業人員(94.98), 用水供應及污染整治業-事務支援人員(95.04)
# 106年度:電力及燃氣供應業-技藝_機械設備操作及組裝人員(95.51), 營造業-服務及銷售工作人員(95.93), 其他服務業-事務支援人員(96.23)
#joined_df_103_to_106 <- arrange(joined_df_103_to_106,College_f_m_103)
# head(joined_df_103_to_106
# [joined_df_103_to_106$College_f_m_103 > 0,]
# , 10)
#head(filter(joined_df_103_to_106 , College_f_m_103 > 0), 10)
#joined_df_103_to_106 <- arrange(joined_df_103_to_106,College_f_m_104)
#head(filter(joined_df_103_to_106 , College_f_m_104 > 0), 10)
#joined_df_103_to_106 <- arrange(joined_df_103_to_106,College_f_m_105)
#head(filter(joined_df_103_to_106 , College_f_m_105 > 0), 10)
#joined_df_103_to_106 <- arrange(joined_df_103_to_106,College_f_m_106)
#head(filter(joined_df_103_to_106 , College_f_m_106 > 0), 10)
# drawing plots
# https://zhuanlan.zhihu.com/p/30706019
# ggplot(data = df, mapping = aes(x = factor(Year), y = Weight, group = 1)) + geom_line() + xlab('Year')
ggplot(data = joined_df_103_to_106, mapping=aes(x="大職業別",y=College_f_m_103))+geom_line()
# Question 3
# process the data
df_1_103 <- edu_103_full[,c(2,11,13)]
df_1_104 <- edu_104_full[,c(2,11,13)]
df_1_105 <- edu_105_full[,c(2,11,13)]
df_1_106 <- edu_106_full[,c(2,11,13)]
joined_df_1_103_to_106 <- inner_join(df_1_103, df_1_104, by="大職業別")
joined_df_1_103_to_106 <- inner_join(joined_df_1_103_to_106, df_1_105, by="大職業別")
joined_df_1_103_to_106 <- inner_join(joined_df_1_103_to_106, df_1_106, by="大職業別")
names(joined_df_1_103_to_106) <- c("大職業別","College103","Graduate103","College104","Graduate104","College105","Graduate105","College106","Graduate106")
#dim(joined_df_1_103_to_106)[2]
#output : 9
for (i in 2:dim(joined_df_1_103_to_106)[2]) {
joined_df_1_103_to_106[,i] <- as.numeric(joined_df_1_103_to_106[,i])
joined_df_1_103_to_106[,i][is.na(joined_df_1_103_to_106[,i])] <- 0
}
joined_df_1_103_to_106$devide103 <- round(joined_df_1_103_to_106$Graduate103 / joined_df_1_103_to_106$College103, 2)
joined_df_1_103_to_106$devide104 <- round(joined_df_1_103_to_106$Graduate104 / joined_df_1_103_to_106$College104, 2)
joined_df_1_103_to_106$devide105 <- round(joined_df_1_103_to_106$Graduate105 / joined_df_1_103_to_106$College105, 2)
joined_df_1_103_to_106$devide106 <- round(joined_df_1_103_to_106$Graduate106 / joined_df_1_103_to_106$College106, 2)
# Process the value `NaN` cause by 0 / 0
for (i in 2:dim(joined_df_1_103_to_106)[2]) {
joined_df_1_103_to_106[,i][is.na(joined_df_1_103_to_106[,i])] <- 0
}
# get the data which is by year 106
joined_df_1_103_to_106 %>% arrange(. ,desc(devide106)) %>% filter(. ,devide106 > 0) %>% select("大職業別","devide106") %>%head(10)
# 大職業別 devide106
# 礦業及土石採取業-事務支援人員 1.208946
# 專業_科學及技術服務業 1.202982
# 其他服務業-技術員及助理專業人員 1.199470
# 專業_科學及技術服務業-事務支援人員 1.192306
# 批發及零售業 1.191916
# 製造業 1.188350
# 藝術_娛樂及休閒服務業-事務支援人員 1.187705
# 工業部門 1.183455
# 工業及服務業部門 1.182345
# 服務業部門 1.181334
# Question 4
#資訊及通訊傳播業-專業人員
#工業及服務業部門-專業人員
#工業部門-專業人員
#製造業-專業人員
#joined_df_1_103_to_106[c(2,9,23,79),]
job <- joined_df_1_103_to_106[c(2,9,23,79),c(1,8,9,13)]
job$minus106 <- (job$Graduate106 - job$College106)
job[1:4,c(1,5)]
|
#' Data from Experiment 2 in Gauvrit, Singmann, Soler-Toscano & Zenil
#'
#' Responses of one participant (42 years old) to 200 randomly generated strings of length 10 from an alphabet of 6 symbols.
#' For each string, the participant was asked to indicate whther or not the string appears random or not.
#'
#' @docType data
#' @keywords dataset
#' @name exp2
#' @usage exp2
#' @format A data.frame with 200 rows and 2 variables.
#' @source Gauvrit, Singmann, Soler-Toscano & Zenil (submitted). Complexity for psychology. A user-friendly implementation of the coding theorem method.
#'
#' @example examples/examples.exp2.R
#'
#'
NULL
|
/R/exp2-data.R
|
no_license
|
singmann/acss
|
R
| false
| false
| 635
|
r
|
#' Data from Experiment 2 in Gauvrit, Singmann, Soler-Toscano & Zenil
#'
#' Responses of one participant (42 years old) to 200 randomly generated strings of length 10 from an alphabet of 6 symbols.
#' For each string, the participant was asked to indicate whther or not the string appears random or not.
#'
#' @docType data
#' @keywords dataset
#' @name exp2
#' @usage exp2
#' @format A data.frame with 200 rows and 2 variables.
#' @source Gauvrit, Singmann, Soler-Toscano & Zenil (submitted). Complexity for psychology. A user-friendly implementation of the coding theorem method.
#'
#' @example examples/examples.exp2.R
#'
#'
NULL
|
#load libraries
#install.packages("ggpubr")
library(tidyr)
library(ggplot2)
library(ggpubr)
library(nlme)
library(dplyr)
library(lme4)
library(lmerTest)
# MOLT SIZES
#set up working directory
setwd("C:/Users/emma.reinhardt/Desktop/Research/Size/Data")
#read in file
allMolts <- read.csv("molts.csv", stringsAsFactors = TRUE)
#get rid of empty rows/columns and compile all width columns
#renamed columns .csv file to make this easier
mw <- subset(allMolts, Group != "")
mw <- subset(mw, select = c(Crab, Group, J1, J2, J3, J4, J5, J6))
#turn dataset into long/skinny
mw <- gather(mw, key = Stage, value = moltWidth, J1, J2, J3, J4, J5, J6)
#turn characters into factors
mw$moltWidth <- as.numeric(mw$moltWidth)
#adg in treatments
mw$Treatment <- ""
mw$Treatment[mw$Group == "HA" | mw$Group == "HB" | mw$Group == "HC"] <- "High"
mw$Treatment[mw$Group == "LA" | mw$Group == "LB" | mw$Group == "LC"] <- "Low"
#repeat all above steps for a separate lengths dataframe (fixing the stage names for merging)
ml <- subset(allMolts, Group != "")
ml <- subset(ml, select = c(Crab, Group, J1_L, J2_L, J3_L, J4_L, J5_L, J6_L))
ml <- gather(ml, key = StageL, value = moltLength, J1_L, J2_L, J3_L, J4_L, J5_L, J6_L)
cleanUp <- function(dd){
dd$Stage = ""
dd$Stage[dd$StageL == "J1_L"] <- "J1"
dd$Stage[dd$StageL == "J2_L"] <- "J2"
dd$Stage[dd$StageL == "J3_L"] <- "J3"
dd$Stage[dd$StageL == "J4_L"] <- "J4"
dd$Stage[dd$StageL == "J5_L"] <- "J5"
dd$Stage[dd$StageL == "J6_L"] <- "J6"
dd$Stage[dd$StageL == "J7_L"] <- "J7"
dd <- subset(dd, select = c(Crab, Group, Stage, moltLength))
return(dd)
}
ml <- cleanUp(ml)
ml$moltLength <- as.numeric(ml$moltLength)
ml$Treatment <- ""
ml$Treatment[ml$Group == "HA" | ml$Group == "HB" | ml$Group == "HC"] <- "High"
ml$Treatment[ml$Group == "LA" | ml$Group == "LB" | ml$Group == "LC"] <- "Low"
#merge the consolidated width and length dataframes
#THIS IS THE SOLE MOLT DF
molt <- merge(ml, mw)
#BODY SIZES
#do the same thing to prep all of the size data
d <- read.csv("sizes.csv", stringsAsFactors = FALSE)
#fix data frame
d <- subset(d, Group != "")
d <- subset(d, select = c(Crab, Group, J1, J2, J3, J4, J5, J6))
#short-wide to long-skinny
dg <- gather(d, key = Stage, value = carapaceWidth, J1, J2, J3, J4, J5, J6)
#convert variable from character to number
dg$carapaceWidth <- as.numeric(dg$carapaceWidth)
#adg Treatment
dg$Treatment <- ""
dg$Treatment[dg$Group == "HA" | dg$Group == "HB" | dg$Group == "HC"] <- "High"
dg$Treatment[dg$Group == "LA" | dg$Group == "LB" | dg$Group == "LC"] <- "Low"
#combine width data frames
#THIS IS MOLT + LIVE CRAB WIDTHS
#total <- data.frame(mw$Crab, mw$Group, mw$Stage, mw$Treatment, mw$moltWidth, dg$carapaceWidth)
total <- merge(mw, dg)
### MOLT + BODY PLOTS
#pretty title
legendTitle = expression("CO"[2]*" Treatment")
#linear fit of molts vs bodies
ggplot(total, aes(x = moltWidth, y = carapaceWidth)) +
geom_point(aes(color = Stage, shape = Treatment)) +
theme_bw() +
geom_smooth(aes(fill = Treatment), method = lm, se = TRUE) +
labs(x = "Molt Width (mm)", y = "Carapace Width (mm)", title = "Molt vs. Carapace Width Across Stages",
legend, fill = legendTitle, color = "Stage", shape = legendTitle)
#linear fit of molts vs bodies across stages
ggplot(total, aes(moltWidth, carapaceWidth)) +
geom_point(aes(color = Stage, shape = Treatment)) +
geom_smooth(aes(fill = Treatment), method = lm, se=TRUE) +
theme_bw() +
facet_wrap(~Stage, ncol = 2) +
labs(x = "Molt Width (mm)", y = "Carapace Width (mm)", title = "Molt vs. Carapace Width Separated By Stage",
fill = legendTitle, shape = legendTitle, stage = "Stage")
## MOLT PLOTS
#overview of molt widths across stages
ggplot(mw, aes(x = Stage, y = moltWidth)) +
labs(x = "Stage", y = "Molt Widths (mm)", title = "Molt Widths Across Stage", color = legendTitle) +
geom_boxplot(aes(colour = Treatment), position= position_dodge(0.9)) +
geom_jitter(aes(color = Treatment), trim = FALSE,
binaxis = 'y', stackdir = 'center', dotsize = 0.8, alpha = 0.3,
position = position_jitterdodge(0.4)) +
theme_bw()
#plots separated based on stage
ggplot(mw, aes(x = Stage, y = moltWidth)) +
labs(x = "Stage", y = "Molt Widths (mm)", title = "Molt Widths Across Stage") +
geom_dotplot(aes(fill = Treatment, color = Treatment), trim = FALSE,
binaxis = 'y', stackdir = 'center', dotsize = 0.8, alpha = 0.3,
position = position_dodge(0.8)) +
geom_boxplot(aes(color = Treatment), width = 0.5, size = 0.4,
position = position_dodge(0.8)) +
facet_wrap(~Stage, ncol = 2) +
theme_bw(base_size = 10)
#molt length vs. width separated by stage
ggplot(molt, aes(x = moltWidth, y = moltLength)) +
labs(x = "Molt Width (mm)", y = "Molt Length (mm)", title = "Length vs. Width of Molts Across Stages",
color = legendTitle, fill = legendTitle) +
geom_point(aes(color = Treatment), trim = FALSE,
binaxis = 'y', stackdir = 'center', dotsize = 0.3, alpha = 0.5,
position = position_jitter(0.2)) +
theme_bw() +
# facet_wrap(~Stage, ncol=2) +
geom_smooth(method = lm, aes(fill = Treatment))
#plots of body sizes vs. molt sizes
#how to get y axis scale to accommodate body & molt measurements?
ggplot(total, aes(x = moltWidth, y = carapaceWidth)) +
geom_point(aes(shape = Treatment, color = Stage)) +
geom_smooth(aes(shape = Treatment), method=lm, se = TRUE) +
theme_bw() +
labs(x = "Molt Width (mm)", y = "Living Crab Width (mm)", title = "Living Crab Width vs. Molt Width Across Stages",
shape = legendTitle, color = "Stage")
ggplot(total, aes(moltWidth, carapaceWidth)) +
geom_point(aes(color = Stage, shape = Treatment)) +
geom_smooth(aes(fill = Treatment), method = lm, se=TRUE) +
theme_bw() +
facet_wrap(~Stage, ncol = 2) +
labs(title = "Living Crab Width vs. Molt Width By Stage", x = "Molt Width (mm)", y = "Living Crab Width (mm)",
color = "Stage", fill = legendTitle, shape = legendTitle)
#Function to calc stand errors and CI for error bars
## Gives count, mean, standard deviation, standard error of the mean, and confidence interval (default 95%).
## data: a data frame.
## measurevar: the name of a column that contains the variable to be summariezed
## groupvars: a vector containing names of columns that contain grouping variables
## na.rm: a boolean that indicates whether to ignore NA's
## conf.interval: the percent range of the confidence interval (default is 95%)
summarySE <- function(data=NULL, measurevar, groupvars=NULL, na.rm=FALSE,
conf.interval=.95, .drop=TRUE) {
library(plyr)
# New version of length which can handle NA's: if na.rm==T, don't count them
length2 <- function (x, na.rm=FALSE) {
if (na.rm) sum(!is.na(x))
else length(x)
}
# This does the summary. For each group's data frame, return a vector with
# N, mean, and sd
datac <- ddply(data, groupvars, .drop=.drop,
.fun = function(xx, col) {
c(N = length2(xx[[col]], na.rm=na.rm),
mean = mean (xx[[col]], na.rm=na.rm),
sd = sd (xx[[col]], na.rm=na.rm)
)
},
measurevar
)
# Rename the "mean" column
datac <- rename(datac, c("mean" = measurevar))
datac$se <- datac$sd / sqrt(datac$N) # Calculate standard error of the mean
# Confidence interval multiplier for standard error
# Calculate t-statistic for confidence interval:
# e.g., if conf.interval is .95, use .975 (above/below), and use df=N-1
ciMult <- qt(conf.interval/2 + .5, datac$N-1)
datac$ci <- datac$se * ciMult
return(datac)
}
summaryForErrorBars <- summarySE(data=molt, measurevar = "moltWidth",
groupvars = c("Stage", "Treatment"), na.rm = TRUE)
summaryForErrorBars$upperBar <- summaryForErrorBars$moltWidth + summaryForErrorBars$ci
summaryForErrorBars$lowerBar <- summaryForErrorBars$moltWidth - summaryForErrorBars$ci
moltW <- ggplot(summaryForErrorBars, aes(x = Treatment, y = moltWidth, colour = Stage)) +
geom_errorbar(aes(ymin = lowerBar, ymax = upperBar))+
geom_point(size = 5) +
geom_jitter(data=molt, aes(x = Treatment, y = moltWidth, colour = Stage), alpha = 0.6,
position = position_jitter(0.4)) +
theme_bw(base_size = 18) +
# scale_color_manual(values = myColors) +
labs(x = legendTitle, y = "Molt Width (mm)", title = "Treatment Comparison of Molt Widths") +
scale_color_discrete(breaks=c("J6","J5","J4","J3","J2","J1"))
myColors <- c("#f26d54", "#36bddc","#337d40", "#b6471d", "#00c23f", "#0231d1")
#### SAME AS ABOVE BUT FOR MOLT LENGTHS, NOT WIDTHS
summaryForErrorBars <- summarySE(data=molt, measurevar = "moltLength",
groupvars = c("Stage", "Treatment"), na.rm = TRUE)
summaryForErrorBars$upperBar <- summaryForErrorBars$moltLength + summaryForErrorBars$ci
summaryForErrorBars$lowerBar <- summaryForErrorBars$moltLength - summaryForErrorBars$ci
moltL <- ggplot(summaryForErrorBars, aes(x = Treatment, y = moltLength, colour = Stage)) +
geom_errorbar(aes(ymin = lowerBar, ymax = upperBar)) +
geom_point(size = 5) +
geom_jitter(data=molt, aes(x = Treatment, y = moltLength, colour = Stage), alpha = 0.4,
position = position_jitter(0.4)) +
theme_bw(base_size = 18) +
labs(x = legendTitle, y = "Molt Length (mm)", title = "Treatment Comparison of Molt Lengths")
ggarrange(moltW, moltL,
labels = c("A", "B"),
common.legend=TRUE,
legend="bottom")
##### MOLT STATS
###helpful functions
#isolate stages to compare each one
isolate <- function(dd, stageName){
single <- dd[dd$Stage == stageName,]
return(single)
}
#function for finding live crab width : molt width ratio
ratio <- function(dataName) {
slant <- dataName$carapaceWidth/dataName$moltWidth
return(slant)
}
#function to run for each stage
fitComp <- function(dd){
stat <- lme(Ratio~Stage + Treatment + Stage*Treatment, random = ~1|(Crab), data = dd, na.action = na.omit)
return(summary(stat))
}
#function to compare each stage for ONLY molt sizes
#for molt widths
statisticW <- function(dataName){
fitW <- lm(moltWidth ~ Treatment, data = dataName, na.action = na.omit)
return(summary(fitW))
}
#for molt lengths
statisticL <- function(dataName){
fitL <- lm(moltLength ~ Treatment, data = dataName, na.action = na.omit)
return(summary(fitL))
}
### MOLT WIDTHS AND LENGTHS
#overall
moltStatW <- lmer(moltWidth ~ Treatment + (1|Crab) + (1|Stage), data = molt)
summary(moltStatW)
#p value = 0.00019
moltStatL <- lmer(moltLength ~ Treatment + (1|Crab) + (1|Stage), data = molt)
summary(moltStatL)
#p value = 0.0146
#by stage
j1M <- isolate(molt, "J1")
j2M <- isolate(molt, "J2")
j3M <- isolate(molt, "J3")
j4M <- isolate(molt, "J4")
j5M <- isolate(molt, "J5")
j6M <- isolate(molt, "J6")
#compare j1
statisticW(j1M)
statisticL(j1M)
#compare j2
statisticW(j2M)
statisticL(j2M)
#compare j3
statisticW(j3M) #significant p value = 0.0044
statisticL(j3M)
#compare j4
statisticW(j4M) #significant p value = 0.0023
statisticL(j4M) #significant p value = 0.0025
#compare j5
statisticW(j5M) #significant p value = 0.0003
statisticL(j5M) #significant p value = 0.0005
#compare j6
statisticW(j6M)
statisticL(j6M)
### MOLTS & BODIES STATISTIC --> mostly body/molt ratio
#insert ratios into dataframe
total$Ratio <- ""
all <- ratio(total)
total$Ratio <- all
### AIC (overall)
fitAllT <- lmer(Ratio~Treatment + (1|Crab), data = total, na.action = na.omit)
isSingular(fitAllT, tol=1e-05) #TRUE
fitAllS <- lmer(Ratio~Stage + (1|Crab), data = total, na.action = na.omit)
isSingular(fitAllS, tol=1e-05) #TRUE
fitAllTS <- lmer(Ratio~Stage + Treatment + (1|Crab), data = total, na.action = na.omit)
isSingular(fitAllTS, tol=1e-05) #TRUE
fitAllTSTS <- lmer(Ratio~Stage + Treatment + Stage*Treatment + (1|Crab), data = total, na.action = na.omit)
isSingular(fitAllTSTS, tol=1e-05) #TRUE
AIC(fitAllT, fitAllS, fitAllTS, fitAllTSTS, k=2)
#fitAllTSTS has lowest AIC value
#run with treatment + stage + TR*ST parameter?
#### FOR BODY WIDTHS + MOLT WIDTHS
summaryForErrorBars <- summarySE(data=total, measurevar = "Ratio",
groupvars = c("Stage", "Treatment"), na.rm = TRUE)
summaryForErrorBars$upperBar <- summaryForErrorBars$Ratio + summaryForErrorBars$ci
summaryForErrorBars$lowerBar <- summaryForErrorBars$Ratio - summaryForErrorBars$ci
ggplot(summaryForErrorBars, aes(x = Treatment, y = Ratio, colour = Stage)) +
geom_errorbar(aes(ymin = lowerBar, ymax = upperBar)) +
geom_point(size = 5) +
geom_jitter(data=total, aes(x = Treatment, y = Ratio, colour = Stage), alpha = 0.4,
position = position_jitter(0.4)) +
theme_bw(base_size = 15) +
labs(x = legendTitle, y = "Body Width to Molt Width Ratio", title = "Treatment Comparison of Molt Lengths")
### per stage statistics
#sorry, you have to isolate the stages again (for the dataframe with LIVE crab widths, not MOLT LENGTHS)
#another isolation function until I can fix the weird nomenclature in 'total'
isolate2 <- function(dd, stageName){
single <- dd[dd$Stage == stageName,]
return(single)
}
j1T <- isolate2(total, "J1")
j2T <- isolate2(total, "J2")
j3T <- isolate2(total, "J3")
j4T <- isolate2(total, "J4")
j5T <- isolate2(total, "J5")
j6T <- isolate2(total, "J6")
#mixed model for each stage
fitComp(j1T)
stat1 <- lme(Ratio ~ Stage + Treatment + Stage*Treatment, random = ~1|Crab, data = j1T, na.action = na.omit)
|
/moltAnalysis.R
|
no_license
|
KROVINSKI/19-Hollings-Size-Data
|
R
| false
| false
| 14,016
|
r
|
#load libraries
#install.packages("ggpubr")
library(tidyr)
library(ggplot2)
library(ggpubr)
library(nlme)
library(dplyr)
library(lme4)
library(lmerTest)
# MOLT SIZES
#set up working directory
setwd("C:/Users/emma.reinhardt/Desktop/Research/Size/Data")
#read in file
allMolts <- read.csv("molts.csv", stringsAsFactors = TRUE)
#get rid of empty rows/columns and compile all width columns
#renamed columns .csv file to make this easier
mw <- subset(allMolts, Group != "")
mw <- subset(mw, select = c(Crab, Group, J1, J2, J3, J4, J5, J6))
#turn dataset into long/skinny
mw <- gather(mw, key = Stage, value = moltWidth, J1, J2, J3, J4, J5, J6)
#turn characters into factors
mw$moltWidth <- as.numeric(mw$moltWidth)
#adg in treatments
mw$Treatment <- ""
mw$Treatment[mw$Group == "HA" | mw$Group == "HB" | mw$Group == "HC"] <- "High"
mw$Treatment[mw$Group == "LA" | mw$Group == "LB" | mw$Group == "LC"] <- "Low"
#repeat all above steps for a separate lengths dataframe (fixing the stage names for merging)
ml <- subset(allMolts, Group != "")
ml <- subset(ml, select = c(Crab, Group, J1_L, J2_L, J3_L, J4_L, J5_L, J6_L))
ml <- gather(ml, key = StageL, value = moltLength, J1_L, J2_L, J3_L, J4_L, J5_L, J6_L)
cleanUp <- function(dd){
dd$Stage = ""
dd$Stage[dd$StageL == "J1_L"] <- "J1"
dd$Stage[dd$StageL == "J2_L"] <- "J2"
dd$Stage[dd$StageL == "J3_L"] <- "J3"
dd$Stage[dd$StageL == "J4_L"] <- "J4"
dd$Stage[dd$StageL == "J5_L"] <- "J5"
dd$Stage[dd$StageL == "J6_L"] <- "J6"
dd$Stage[dd$StageL == "J7_L"] <- "J7"
dd <- subset(dd, select = c(Crab, Group, Stage, moltLength))
return(dd)
}
ml <- cleanUp(ml)
ml$moltLength <- as.numeric(ml$moltLength)
ml$Treatment <- ""
ml$Treatment[ml$Group == "HA" | ml$Group == "HB" | ml$Group == "HC"] <- "High"
ml$Treatment[ml$Group == "LA" | ml$Group == "LB" | ml$Group == "LC"] <- "Low"
#merge the consolidated width and length dataframes
#THIS IS THE SOLE MOLT DF
molt <- merge(ml, mw)
#BODY SIZES
#do the same thing to prep all of the size data
d <- read.csv("sizes.csv", stringsAsFactors = FALSE)
#fix data frame
d <- subset(d, Group != "")
d <- subset(d, select = c(Crab, Group, J1, J2, J3, J4, J5, J6))
#short-wide to long-skinny
dg <- gather(d, key = Stage, value = carapaceWidth, J1, J2, J3, J4, J5, J6)
#convert variable from character to number
dg$carapaceWidth <- as.numeric(dg$carapaceWidth)
#adg Treatment
dg$Treatment <- ""
dg$Treatment[dg$Group == "HA" | dg$Group == "HB" | dg$Group == "HC"] <- "High"
dg$Treatment[dg$Group == "LA" | dg$Group == "LB" | dg$Group == "LC"] <- "Low"
#combine width data frames
#THIS IS MOLT + LIVE CRAB WIDTHS
#total <- data.frame(mw$Crab, mw$Group, mw$Stage, mw$Treatment, mw$moltWidth, dg$carapaceWidth)
total <- merge(mw, dg)
### MOLT + BODY PLOTS
#pretty title
legendTitle = expression("CO"[2]*" Treatment")
#linear fit of molts vs bodies
ggplot(total, aes(x = moltWidth, y = carapaceWidth)) +
geom_point(aes(color = Stage, shape = Treatment)) +
theme_bw() +
geom_smooth(aes(fill = Treatment), method = lm, se = TRUE) +
labs(x = "Molt Width (mm)", y = "Carapace Width (mm)", title = "Molt vs. Carapace Width Across Stages",
legend, fill = legendTitle, color = "Stage", shape = legendTitle)
#linear fit of molts vs bodies across stages
ggplot(total, aes(moltWidth, carapaceWidth)) +
geom_point(aes(color = Stage, shape = Treatment)) +
geom_smooth(aes(fill = Treatment), method = lm, se=TRUE) +
theme_bw() +
facet_wrap(~Stage, ncol = 2) +
labs(x = "Molt Width (mm)", y = "Carapace Width (mm)", title = "Molt vs. Carapace Width Separated By Stage",
fill = legendTitle, shape = legendTitle, stage = "Stage")
## MOLT PLOTS
#overview of molt widths across stages
ggplot(mw, aes(x = Stage, y = moltWidth)) +
labs(x = "Stage", y = "Molt Widths (mm)", title = "Molt Widths Across Stage", color = legendTitle) +
geom_boxplot(aes(colour = Treatment), position= position_dodge(0.9)) +
geom_jitter(aes(color = Treatment), trim = FALSE,
binaxis = 'y', stackdir = 'center', dotsize = 0.8, alpha = 0.3,
position = position_jitterdodge(0.4)) +
theme_bw()
#plots separated based on stage
ggplot(mw, aes(x = Stage, y = moltWidth)) +
labs(x = "Stage", y = "Molt Widths (mm)", title = "Molt Widths Across Stage") +
geom_dotplot(aes(fill = Treatment, color = Treatment), trim = FALSE,
binaxis = 'y', stackdir = 'center', dotsize = 0.8, alpha = 0.3,
position = position_dodge(0.8)) +
geom_boxplot(aes(color = Treatment), width = 0.5, size = 0.4,
position = position_dodge(0.8)) +
facet_wrap(~Stage, ncol = 2) +
theme_bw(base_size = 10)
#molt length vs. width separated by stage
ggplot(molt, aes(x = moltWidth, y = moltLength)) +
labs(x = "Molt Width (mm)", y = "Molt Length (mm)", title = "Length vs. Width of Molts Across Stages",
color = legendTitle, fill = legendTitle) +
geom_point(aes(color = Treatment), trim = FALSE,
binaxis = 'y', stackdir = 'center', dotsize = 0.3, alpha = 0.5,
position = position_jitter(0.2)) +
theme_bw() +
# facet_wrap(~Stage, ncol=2) +
geom_smooth(method = lm, aes(fill = Treatment))
#plots of body sizes vs. molt sizes
#how to get y axis scale to accommodate body & molt measurements?
ggplot(total, aes(x = moltWidth, y = carapaceWidth)) +
geom_point(aes(shape = Treatment, color = Stage)) +
geom_smooth(aes(shape = Treatment), method=lm, se = TRUE) +
theme_bw() +
labs(x = "Molt Width (mm)", y = "Living Crab Width (mm)", title = "Living Crab Width vs. Molt Width Across Stages",
shape = legendTitle, color = "Stage")
ggplot(total, aes(moltWidth, carapaceWidth)) +
geom_point(aes(color = Stage, shape = Treatment)) +
geom_smooth(aes(fill = Treatment), method = lm, se=TRUE) +
theme_bw() +
facet_wrap(~Stage, ncol = 2) +
labs(title = "Living Crab Width vs. Molt Width By Stage", x = "Molt Width (mm)", y = "Living Crab Width (mm)",
color = "Stage", fill = legendTitle, shape = legendTitle)
#Function to calc stand errors and CI for error bars
## Gives count, mean, standard deviation, standard error of the mean, and confidence interval (default 95%).
## data: a data frame.
## measurevar: the name of a column that contains the variable to be summariezed
## groupvars: a vector containing names of columns that contain grouping variables
## na.rm: a boolean that indicates whether to ignore NA's
## conf.interval: the percent range of the confidence interval (default is 95%)
summarySE <- function(data=NULL, measurevar, groupvars=NULL, na.rm=FALSE,
conf.interval=.95, .drop=TRUE) {
library(plyr)
# New version of length which can handle NA's: if na.rm==T, don't count them
length2 <- function (x, na.rm=FALSE) {
if (na.rm) sum(!is.na(x))
else length(x)
}
# This does the summary. For each group's data frame, return a vector with
# N, mean, and sd
datac <- ddply(data, groupvars, .drop=.drop,
.fun = function(xx, col) {
c(N = length2(xx[[col]], na.rm=na.rm),
mean = mean (xx[[col]], na.rm=na.rm),
sd = sd (xx[[col]], na.rm=na.rm)
)
},
measurevar
)
# Rename the "mean" column
datac <- rename(datac, c("mean" = measurevar))
datac$se <- datac$sd / sqrt(datac$N) # Calculate standard error of the mean
# Confidence interval multiplier for standard error
# Calculate t-statistic for confidence interval:
# e.g., if conf.interval is .95, use .975 (above/below), and use df=N-1
ciMult <- qt(conf.interval/2 + .5, datac$N-1)
datac$ci <- datac$se * ciMult
return(datac)
}
summaryForErrorBars <- summarySE(data=molt, measurevar = "moltWidth",
groupvars = c("Stage", "Treatment"), na.rm = TRUE)
summaryForErrorBars$upperBar <- summaryForErrorBars$moltWidth + summaryForErrorBars$ci
summaryForErrorBars$lowerBar <- summaryForErrorBars$moltWidth - summaryForErrorBars$ci
moltW <- ggplot(summaryForErrorBars, aes(x = Treatment, y = moltWidth, colour = Stage)) +
geom_errorbar(aes(ymin = lowerBar, ymax = upperBar))+
geom_point(size = 5) +
geom_jitter(data=molt, aes(x = Treatment, y = moltWidth, colour = Stage), alpha = 0.6,
position = position_jitter(0.4)) +
theme_bw(base_size = 18) +
# scale_color_manual(values = myColors) +
labs(x = legendTitle, y = "Molt Width (mm)", title = "Treatment Comparison of Molt Widths") +
scale_color_discrete(breaks=c("J6","J5","J4","J3","J2","J1"))
myColors <- c("#f26d54", "#36bddc","#337d40", "#b6471d", "#00c23f", "#0231d1")
#### SAME AS ABOVE BUT FOR MOLT LENGTHS, NOT WIDTHS
summaryForErrorBars <- summarySE(data=molt, measurevar = "moltLength",
groupvars = c("Stage", "Treatment"), na.rm = TRUE)
summaryForErrorBars$upperBar <- summaryForErrorBars$moltLength + summaryForErrorBars$ci
summaryForErrorBars$lowerBar <- summaryForErrorBars$moltLength - summaryForErrorBars$ci
moltL <- ggplot(summaryForErrorBars, aes(x = Treatment, y = moltLength, colour = Stage)) +
geom_errorbar(aes(ymin = lowerBar, ymax = upperBar)) +
geom_point(size = 5) +
geom_jitter(data=molt, aes(x = Treatment, y = moltLength, colour = Stage), alpha = 0.4,
position = position_jitter(0.4)) +
theme_bw(base_size = 18) +
labs(x = legendTitle, y = "Molt Length (mm)", title = "Treatment Comparison of Molt Lengths")
ggarrange(moltW, moltL,
labels = c("A", "B"),
common.legend=TRUE,
legend="bottom")
##### MOLT STATS
###helpful functions
#isolate stages to compare each one
isolate <- function(dd, stageName){
single <- dd[dd$Stage == stageName,]
return(single)
}
#function for finding live crab width : molt width ratio
ratio <- function(dataName) {
slant <- dataName$carapaceWidth/dataName$moltWidth
return(slant)
}
#function to run for each stage
fitComp <- function(dd){
stat <- lme(Ratio~Stage + Treatment + Stage*Treatment, random = ~1|(Crab), data = dd, na.action = na.omit)
return(summary(stat))
}
#function to compare each stage for ONLY molt sizes
#for molt widths
statisticW <- function(dataName){
fitW <- lm(moltWidth ~ Treatment, data = dataName, na.action = na.omit)
return(summary(fitW))
}
#for molt lengths
statisticL <- function(dataName){
fitL <- lm(moltLength ~ Treatment, data = dataName, na.action = na.omit)
return(summary(fitL))
}
### MOLT WIDTHS AND LENGTHS
#overall
moltStatW <- lmer(moltWidth ~ Treatment + (1|Crab) + (1|Stage), data = molt)
summary(moltStatW)
#p value = 0.00019
moltStatL <- lmer(moltLength ~ Treatment + (1|Crab) + (1|Stage), data = molt)
summary(moltStatL)
#p value = 0.0146
#by stage
j1M <- isolate(molt, "J1")
j2M <- isolate(molt, "J2")
j3M <- isolate(molt, "J3")
j4M <- isolate(molt, "J4")
j5M <- isolate(molt, "J5")
j6M <- isolate(molt, "J6")
#compare j1
statisticW(j1M)
statisticL(j1M)
#compare j2
statisticW(j2M)
statisticL(j2M)
#compare j3
statisticW(j3M) #significant p value = 0.0044
statisticL(j3M)
#compare j4
statisticW(j4M) #significant p value = 0.0023
statisticL(j4M) #significant p value = 0.0025
#compare j5
statisticW(j5M) #significant p value = 0.0003
statisticL(j5M) #significant p value = 0.0005
#compare j6
statisticW(j6M)
statisticL(j6M)
### MOLTS & BODIES STATISTIC --> mostly body/molt ratio
#insert ratios into dataframe
total$Ratio <- ""
all <- ratio(total)
total$Ratio <- all
### AIC (overall)
fitAllT <- lmer(Ratio~Treatment + (1|Crab), data = total, na.action = na.omit)
isSingular(fitAllT, tol=1e-05) #TRUE
fitAllS <- lmer(Ratio~Stage + (1|Crab), data = total, na.action = na.omit)
isSingular(fitAllS, tol=1e-05) #TRUE
fitAllTS <- lmer(Ratio~Stage + Treatment + (1|Crab), data = total, na.action = na.omit)
isSingular(fitAllTS, tol=1e-05) #TRUE
fitAllTSTS <- lmer(Ratio~Stage + Treatment + Stage*Treatment + (1|Crab), data = total, na.action = na.omit)
isSingular(fitAllTSTS, tol=1e-05) #TRUE
AIC(fitAllT, fitAllS, fitAllTS, fitAllTSTS, k=2)
#fitAllTSTS has lowest AIC value
#run with treatment + stage + TR*ST parameter?
#### FOR BODY WIDTHS + MOLT WIDTHS
summaryForErrorBars <- summarySE(data=total, measurevar = "Ratio",
groupvars = c("Stage", "Treatment"), na.rm = TRUE)
summaryForErrorBars$upperBar <- summaryForErrorBars$Ratio + summaryForErrorBars$ci
summaryForErrorBars$lowerBar <- summaryForErrorBars$Ratio - summaryForErrorBars$ci
ggplot(summaryForErrorBars, aes(x = Treatment, y = Ratio, colour = Stage)) +
geom_errorbar(aes(ymin = lowerBar, ymax = upperBar)) +
geom_point(size = 5) +
geom_jitter(data=total, aes(x = Treatment, y = Ratio, colour = Stage), alpha = 0.4,
position = position_jitter(0.4)) +
theme_bw(base_size = 15) +
labs(x = legendTitle, y = "Body Width to Molt Width Ratio", title = "Treatment Comparison of Molt Lengths")
### per stage statistics
#sorry, you have to isolate the stages again (for the dataframe with LIVE crab widths, not MOLT LENGTHS)
#another isolation function until I can fix the weird nomenclature in 'total'
isolate2 <- function(dd, stageName){
single <- dd[dd$Stage == stageName,]
return(single)
}
j1T <- isolate2(total, "J1")
j2T <- isolate2(total, "J2")
j3T <- isolate2(total, "J3")
j4T <- isolate2(total, "J4")
j5T <- isolate2(total, "J5")
j6T <- isolate2(total, "J6")
#mixed model for each stage
fitComp(j1T)
stat1 <- lme(Ratio ~ Stage + Treatment + Stage*Treatment, random = ~1|Crab, data = j1T, na.action = na.omit)
|
#1
library(MASS)
library(dplyr)
rm(list=ls())
#2
sum(is.na(survey))
#3
newsurvey=na.omit(survey)
#4
table(newsurvey$Sex)
#5
table(newsurvey$W.Hnd)
#6
freq=table(newsurvey$W.Hnd)
rel=freq/nrow(newsurvey)
round(rel,2)
#7
c=newsurvey[which(newsurvey$W.Hnd=='Left'),]
cbind(table(c$Sex))
#8
c=newsurvey[which(newsurvey$Sex=='Male'& newsurvey$W.Hnd=='Left'),]
d=c[which(c$Smoke=='Never'),]
d
perc=nrow(d)/nrow(c)*100
perc
#9
range(newsurvey$Age)
#10
round(range(newsurvey$Age))
breaks<-round(seq(17,70,by=10))
Age_split<-cut(newsurvey$Age,breaks,right=F)
Age_split_freq<-table(Age_split)
Age_split_freq
#11
df<-cbind(Age_split_freq)
df
#12
which.max(Age_split_freq)
#13
range(newsurvey$Wr.Hnd)
breaks<-round(seq(13.0,23.2,by=1.0))
Wr.Hnd_split<-cut(newsurvey$Wr.Hnd,breaks,right=F)
Wr.Hnd_split_freq<-table(Wr.Hnd_split)
df<-cbind(Wr.Hnd_split_freq)
df
#14
rel=Wr.Hnd_split_freq/nrow(newsurvey)
cbind(round(rel,3))
#15
mean(newsurvey$Age)
#16
sd(newsurvey$Height)
var(newsurvey$Height)
#17
quantile(newsurvey$Wr.Hnd)
#18
cor(newsurvey$Wr.Hnd, newsurvey$Pulse)
#19
newsurvey%>%
group_by(Exer)%>%
summarise(mean(Age))
#20
newsurvey%>%
group_by(NW.Hnd)%>%
summarise(sd(Height))
#21
summary(newsurvey$Pulse)
|
/RStudio/SEM I/Lab6/6.R
|
no_license
|
r-harini/Code
|
R
| false
| false
| 1,231
|
r
|
#1
library(MASS)
library(dplyr)
rm(list=ls())
#2
sum(is.na(survey))
#3
newsurvey=na.omit(survey)
#4
table(newsurvey$Sex)
#5
table(newsurvey$W.Hnd)
#6
freq=table(newsurvey$W.Hnd)
rel=freq/nrow(newsurvey)
round(rel,2)
#7
c=newsurvey[which(newsurvey$W.Hnd=='Left'),]
cbind(table(c$Sex))
#8
c=newsurvey[which(newsurvey$Sex=='Male'& newsurvey$W.Hnd=='Left'),]
d=c[which(c$Smoke=='Never'),]
d
perc=nrow(d)/nrow(c)*100
perc
#9
range(newsurvey$Age)
#10
round(range(newsurvey$Age))
breaks<-round(seq(17,70,by=10))
Age_split<-cut(newsurvey$Age,breaks,right=F)
Age_split_freq<-table(Age_split)
Age_split_freq
#11
df<-cbind(Age_split_freq)
df
#12
which.max(Age_split_freq)
#13
range(newsurvey$Wr.Hnd)
breaks<-round(seq(13.0,23.2,by=1.0))
Wr.Hnd_split<-cut(newsurvey$Wr.Hnd,breaks,right=F)
Wr.Hnd_split_freq<-table(Wr.Hnd_split)
df<-cbind(Wr.Hnd_split_freq)
df
#14
rel=Wr.Hnd_split_freq/nrow(newsurvey)
cbind(round(rel,3))
#15
mean(newsurvey$Age)
#16
sd(newsurvey$Height)
var(newsurvey$Height)
#17
quantile(newsurvey$Wr.Hnd)
#18
cor(newsurvey$Wr.Hnd, newsurvey$Pulse)
#19
newsurvey%>%
group_by(Exer)%>%
summarise(mean(Age))
#20
newsurvey%>%
group_by(NW.Hnd)%>%
summarise(sd(Height))
#21
summary(newsurvey$Pulse)
|
##' class method bind data
##'
##' binds data
##'
##'
##' @keywords internal
##' @export bind
"bind" <- function(a,...)
{
UseMethod("bind")
}
## default is just to use rbind
##' data binding
##'
##' binds data
##'
##'
##' @keywords internal
##' @export
"bind.default" <- function(...)
{
rbind(...)
}
##' bind trackdata
##'
##' binds different trackdata objects together
##'
##'
##' @param \dots trackdata objects
##' @keywords methods
##' @export
"bind.trackdata" <- function(...)
{
## function to combine datasets into one single datasets
## any number of datasets accepted e.g. dcombine(x, y, z)
## where x, y, z are lists of the form $data, $index, $ftime
mat <- NULL
for(j in list(...)) {
if(is.matrix(j$data))
mat$data <- rbind(mat$data, j$data)
else mat$data <- c(mat$data, j$data)
mat$index <- rbind(mat$index, j$index)
if(!is.null(j$ftime))
mat$ftime <- rbind(mat$ftime, j$ftime)
}
## readjust the index times
diffinds <- mat$index[, 2] - mat$index[, 1] + 1
right <- cumsum(diffinds)
first.left <- diffinds - 1
left <- right - first.left
mat$index <- cbind(left, right)
if( version$major >= 5 ) {
oldClass(mat) <- "trackdata"
} else {
class(mat) <- "trackdata"
}
mat
}
|
/R/bind.R
|
no_license
|
IPS-LMU/emuR
|
R
| false
| false
| 1,276
|
r
|
##' class method bind data
##'
##' binds data
##'
##'
##' @keywords internal
##' @export bind
"bind" <- function(a,...)
{
UseMethod("bind")
}
## default is just to use rbind
##' data binding
##'
##' binds data
##'
##'
##' @keywords internal
##' @export
"bind.default" <- function(...)
{
rbind(...)
}
##' bind trackdata
##'
##' binds different trackdata objects together
##'
##'
##' @param \dots trackdata objects
##' @keywords methods
##' @export
"bind.trackdata" <- function(...)
{
## function to combine datasets into one single datasets
## any number of datasets accepted e.g. dcombine(x, y, z)
## where x, y, z are lists of the form $data, $index, $ftime
mat <- NULL
for(j in list(...)) {
if(is.matrix(j$data))
mat$data <- rbind(mat$data, j$data)
else mat$data <- c(mat$data, j$data)
mat$index <- rbind(mat$index, j$index)
if(!is.null(j$ftime))
mat$ftime <- rbind(mat$ftime, j$ftime)
}
## readjust the index times
diffinds <- mat$index[, 2] - mat$index[, 1] + 1
right <- cumsum(diffinds)
first.left <- diffinds - 1
left <- right - first.left
mat$index <- cbind(left, right)
if( version$major >= 5 ) {
oldClass(mat) <- "trackdata"
} else {
class(mat) <- "trackdata"
}
mat
}
|
#' APIS function that assigns with observed data
#'
#' This function performs the APIS procedure
#' @param off.genotype Offspring genotypes | Matrix (n*p)
#' where n = number of individuals
#' p = number of markers
#' rownames(offspring) = labels of offspring
#' marker coding = "All1/All2" example: "A/A", "A/B", "NA/NA" (for missing genotype)
#' @param sire.genotype Sire genotypes | Matrix (n*p)
#' where n = number of individuals
#' p = number of markers
#' rownames(sire) = labels of sires
#' marker coding = "All1/All2" example: "A/A", "A/B", "NA/NA" (for missing genotype)
#' @param dam.genotype Dam genotypes | Matrix (n*p)
#' where n = number of individuals
#' p = number of markers
#' rownames(dam) = labels of dams
#' marker coding = "All1/All2" example: "A/A", "A/B", "NA/NA" (for missing genotype)
#' @param error (default: 0) The assignment error rate accepted by the user
#' @param exclusion.threshold (default: ncol(off.genotype)) Threshold for exclusion (number of mismatches allowed)
#' @param preselect.Parent (default: FALSE) Preselection of parents. Can be FALSE, an integer or a vector of two integers (number of sires, numbers of dams)
#' @param nb.cores (default: 2) Number of cores to use. If you have more than 2 cores, you can use the "parallel" function detectCores()
#' @keywords assignment APIS
#' @return pedigree
#' @return a log file
#' @useDynLib APIS
#' @import doSNOW
#' @import foreach
#' @import parallel
#' @import doParallel
#' @import ggplot2
#' @import gridExtra
#' @examples
#' data("APIS_offspring")
#' data("APIS_sire")
#' data("APIS_dam")
#'
#' result <- APIS(off.genotype = APIS_offspring,
#' sire.genotype = APIS_sire,
#' dam.genotype = APIS_dam,
#' error = 0.05)
#' @export
APIS <- function(off.genotype, sire.genotype, dam.genotype, error = 0,
exclusion.threshold = ncol(off.genotype), preselect.Parent = F, nb.cores = 2) {
# Check inputs
# Check if all genotypes matrices have the same number of markers
if (ncol(off.genotype) == ncol(sire.genotype) & ncol(off.genotype) == ncol(dam.genotype) & ncol(sire.genotype) == ncol(dam.genotype)) {
cat("genotype matrices : OK")
cat('\n')
} else {
stop("Your genotype matrices do not have the same number of markers")
}
# Check if the number of mismatches allowed is lower than the number of markers and positive
if ((0 <= exclusion.threshold) && (exclusion.threshold <= ncol(off.genotype))) {
cat("exclusion threshold : OK")
cat('\n')
} else {
stop("The exclusion threshold is greater than the number of markers")
}
# Check if the user-defined assignment error rate limit is a percentage
if ((0 <= error) && (error <= 100) && (is.numeric(error))) {
cat("assignment error rate : OK")
cat('\n')
} else {
stop("The assignment error rate limit is NEGATIVE")
}
# Check if all offspring markers have at least one genotype
offspring.markerGeno <- apply(off.genotype, 2, function(X) {if (length(X[which(X == "NA/NA")]) == length(X)) {
return(T)
} else {
return(F)
}})
marker_nonGeno <- which(offspring.markerGeno == T)
if (length(marker_nonGeno) == 0) {
cat("All the markers are genotyped")
cat('\n')
} else {
off.genotype <- off.genotype[, -marker_nonGeno]
cat(paste0("marker(s) ", marker_nonGeno, "have not genotypes "))
}
# Calculate the theoretical assignment power
P <- assignmentPower(sire = sire.genotype, dam = dam.genotype)
P2 <- substr(as.character(100 * P), 1, 6)
cat("The assignment power of your marker set is ", P2, "%", sep = "")
cat('\n')
if (P >= 0.99) {
cat("Theoretical assignment power : OK")
cat('\n')
} else {
message("WARNING! Your marker set is not enough powerfull!")
}
# Assign with APIS
assignResult <- assignmentFortran(offspring = off.genotype,
sire = sire.genotype,
dam = dam.genotype,
thresh = exclusion.threshold,
preselect.Parent = preselect.Parent,
nb.cores = nb.cores)
apisResult <- setThreshold(ped.log = assignResult$log.mendel, ped.exclu = assignResult$exclu, nb.mrk = assignResult$nb.mrk, error = error)
pedigree <- apisResult$pedigree
log <- apisResult$log
# Give recommendations according to the data set and the results
cat('--------------------------------------', sep = '\n')
cat(' APIS SUMMARY', sep = '\n')
cat('--------------------------------------', sep = '\n')
cat('Theoretical assignment power of the marker set : ', P2, "%", sep = "")
cat('\n')
cat('Assignment error rate accepted : ', error)
cat('\n')
assignmentRate <- length(pedigree$sire[which(is.na(pedigree$sire) == F)]) / nrow(pedigree)
AR <- substr(as.character(100 * assignmentRate), 1, 6)
cat('Assignment rate : ', AR, '%', sep = "")
cat('\n')
# Return outputs
output <- list(pedigree = pedigree, log = log, error = error)
}
# ----------------------------------------------------------------------------------------------------------------
#' Assignment function to obtain the average Mendelian transmission probabilities using a Fortran library
#'
#' This function calculates the average Mendelian transmission probabilities
#' @param offspring Offspring genotypes | Matrix (n*p) where n = number of individuals, p = number of markers
#' rownames(offspring) = labels of offspring
#' marker coding = "All1/All2" example: "A/A", "A/B", "NA/NA" (for missing genotype)
#' @param sire Sire genotypes | Matrix (n*p) where n = number of individuals, p = number of markers
#' rownames(sire) = labels of sires
#' marker coding = "All1/All2" example: "A/A", "A/B", "NA/NA" (for missing genotype)
#' @param dam Dam genotypes | Matrix (n*p) where n = number of individuals, p = number of markers
#' rownames(dam) = labels of dams
#' marker coding = "All1/All2" example: "A/A", "A/B", "NA/NA" (for missing genotype)
#' @param thresh (default: ncol(offspring) Threshold for exclusion (number of mismatches allowed)
#' @param preselect.Parent (default: FALSE) Preselection of parents. Can be FALSE, an integer or a vector of two integers (number of sires, numbers of dams)
#' @param nb.cores (default: 2) Number of cores to use. If you have more than 2 cores, you can use the "parallel" function detectCores()
#' @keywords assignment
#' @return intermidiate pedigree
#' @return log file for Mendelian transmission probabilities
#' @return log file for exclusion
#' @useDynLib APIS
#' @import doSNOW
#' @import foreach
#' @import parallel
#' @import doParallel
#' @importFrom "stats" "median" "quantile"
#' @importFrom "utils" "setTxtProgressBar" "txtProgressBar"
#' @examples
#' data("APIS_offspring")
#' data("APIS_sire")
#' data("APIS_dam")
#'
#' assignment <- assignmentFortran(APIS_offspring, APIS_sire, APIS_dam)
#' @export
assignmentFortran <- function(offspring, sire, dam, thresh = ncol(offspring),
preselect.Parent = F, nb.cores = 2) {
# DESCRIPTION
# Function to calculate average Mendelian transmission probabilities
# Stop if different number of markers are provided
if (ncol(offspring)!=ncol(sire)&ncol(offspring)!=ncol(dam))
stop('Genotypes must have the same number of markers')
# Create the results matrix
offspring.name <- rownames(offspring)
genotyped.mrk <- as.numeric(apply(offspring,1, function(X) {length(X[X!='NA/NA'])}))
# Parameters
e <- 0.01 # Genotyping error of 1%
i <- NULL
off <- NULL
P <- NULL
# Estimate allele frequencies
cat('Estimation of allele frequencies')
cat('\n')
Freq <- allFreq(offspring)
x.col <- ncol(offspring)
iterations <- nrow(offspring)
# Variables for parent selection
parent.genotype <- rbind(sire, dam)
parent.sex <- c(rep("M", times = nrow(sire)), rep("F", times = nrow(dam)))
cat('Recoding datasets')
cat('\n')
# Recode for Fortran subroutine
recodeFortran <- function(mrk, list.mrk) {
tmp <- unlist(strsplit(mrk, '/'))
all1 <- list.mrk[which(list.mrk[,1] == tmp[1]),2]
all2 <- list.mrk[which(list.mrk[,1] == tmp[2]),2]
return(c(all1, all2))
}
recodeFreq <- function(name.col, list.mrk) {
tmp <- unlist(strsplit(name.col, '_'))[2]
return(list.mrk[which(list.mrk[,1] == tmp), 2])
}
variant <- unique(unlist(strsplit(as.vector(rbind(offspring, sire, dam)), '/')))
variant <- variant[-which(variant == "NA")]
variant.corres <- data.frame(variant = as.character(variant),
recode = c(1:length(variant)))
variant.corres$variant <- as.character(variant.corres$variant)
variant.corres <- rbind(variant.corres, c(as.character("NA"), 0))
cat("Number of cores used :", nb.cores)
cat("\n")
cl <- parallel::makeCluster(nb.cores)
doSNOW::registerDoSNOW(cl)
pb.recodeOff <- txtProgressBar(min = 0, max = iterations, char = '><> ', style = 3)
progress <- function(n) {setTxtProgressBar(pb.recodeOff, n)}
opts <- list(progress = progress)
recode.off <- foreach(i = 1:iterations, .combine = rbind, .options.snow = opts) %dopar% {
tmp <- as.numeric(as.vector(sapply(offspring[i,, drop = F], recodeFortran, list.mrk = variant.corres)))
}
cat('\n')
pb.recodeSire <- txtProgressBar(min = 0, max = nrow(sire), char = '><> ', style = 3)
progress <- function(n) {setTxtProgressBar(pb.recodeSire, n)}
opts <- list(progress = progress)
recode.sire <- foreach(i = 1:nrow(sire), .combine = rbind, .options.snow = opts) %dopar% {
tmp <- as.numeric(as.vector(sapply(sire[i,, drop = F], recodeFortran, list.mrk = variant.corres)))
}
cat('\n')
pb.recodeDam <- txtProgressBar(min = 0, max = nrow(dam), char = '><> ', style = 3)
progress <- function(n) {setTxtProgressBar(pb.recodeDam, n)}
opts <- list(progress = progress)
recode.dam <- foreach(i = 1:nrow(dam), .combine = rbind, .options.snow = opts) %dopar% {
tmp <- as.numeric(as.vector(sapply(dam[i,, drop = F], recodeFortran, list.mrk = variant.corres)))
}
parallel::stopCluster(cl)
cat('\n')
if (class(recode.off) != "matrix") {
recode.off <- t(as.matrix(recode.off))
} else {
}
if (class(recode.sire) != "matrix") {
recode.sire <- t(as.matrix(recode.sire))
} else {
}
if (class(recode.dam) != "matrix") {
recode.dam <- t(as.matrix(recode.dam))
} else {
}
rownames(recode.off) <- rownames(offspring)
rownames(recode.sire) <- rownames(sire)
rownames(recode.dam) <- rownames(dam)
Freq <- Freq[,-which(colnames(Freq) == "Freq_NA")]
Freq <- Freq[, c((floor(ncol(Freq)/2)+2):ncol(Freq))]
colnames(Freq) <- sapply(colnames(Freq), recodeFreq, list.mrk = variant.corres)
Freq <- rbind(colnames(Freq), Freq)
add.freq <- as.numeric(variant.corres$recode[-which(variant.corres$recode %in% Freq[1,])])
add.freq <- add.freq[-which(add.freq == 0)]
add.freqMatrix <- matrix(data = c(add.freq, rep(0, times = length(add.freq) * (nrow(Freq) - 1))),
ncol = length(add.freq), nrow = nrow(Freq), byrow = T)
Freq <- apply(Freq, 2, as.numeric)
Freq <- cbind(Freq, add.freqMatrix)
Freq <- Freq[,order(Freq[1,])]
Freq <- Freq[-1,]
# Assignment Process
cat('Assignment')
cat('\n')
# Set up the cluster for parallel iteration
cl <- parallel::makeCluster(nb.cores)
doSNOW::registerDoSNOW(cl)
pb.assignment <- txtProgressBar(min = 0, max = iterations, char = "><(((*> ", style = 3)
progress <- function(n) {setTxtProgressBar(pb.assignment, n)}
opts <- list(progress = progress)
A <- foreach(off = 1:iterations, .multicombine = T,
.packages = c('foreach', 'doParallel', 'doSNOW'), .options.snow = opts) %dopar% { # For each offspring
tmp <- recode.off[off,]
if (preselect.Parent == F) {
potential.sire <- rownames(sire)
potential.dam <- rownames(dam)
} else {
potential.parents <- selectParents(offspring[off,], parent.genotype = parent.genotype,
parent.sex = parent.sex, n.Parent = preselect.Parent)
potential.sire <- potential.parents$sire_toKeep
potential.dam <- potential.parents$dam_toKeep
}
tmp.sire <- recode.sire[which(rownames(recode.sire) %in% potential.sire),]
tmp.dam <- recode.dam[which(rownames(recode.dam) %in% potential.dam),]
# Create temporary results
res <- matrix(NA, nrow = (length(potential.sire)*length(potential.dam)), ncol = 4)
colnames(res) <- c('sire', 'dam', 'score_exclu', 'P_mendel')
res[,1] <- rep(potential.sire, each = length(potential.dam))
res[,2] <- rep(potential.dam, times = length(potential.sire))
# Prepare Fortran inputs
nMrk = as.integer(x.col)
nSires = as.integer(length(potential.sire))
nDams = as.integer(length(potential.dam))
nVariant = as.integer(ncol(Freq))
output_sires = vector(mode = 'integer', length = nSires*nDams)
output_dams = vector(mode = 'integer', length = nSires*nDams)
output_score = vector(mode = 'numeric', length = nSires*nDams)
output_miss = vector(mode = 'numeric', length = nSires*nDams)
outputFortran <- .Fortran("likelihoodCalculation", as.integer(tmp), as.integer(tmp.sire),
as.integer(tmp.dam), as.integer(nMrk), as.integer(nVariant), as.integer(nSires), as.integer(nDams),
as.double(Freq), output_sires, output_dams, as.double(output_score), as.integer(output_miss))
res[,3] <- as.integer(outputFortran[[12]])
res[,4] <- exp(as.numeric(as.numeric(outputFortran[[11]]))/genotyped.mrk[off])
#Working on the results
res <- as.data.frame(res)
res$sire <- as.character(res$sire)
res$dam <- as.character(res$dam)
res$score_exclu <- as.numeric(as.character(res$score_exclu))
res$P_mendel <- as.numeric(as.character(res$P_mendel))
# Order by Mendelian transmission probabilities
res2 <- res[order(res[,4], res[,3], decreasing = T),]
delta_P12 <- res2[1,4] - res2[2,4]
delta_P23 <- res2[2,4] - res2[3,4]
p_fin <- res2[1,1]
m_fin <- res2[1,2]
out.log <- unlist(c(offspring.name[off], genotyped.mrk[off], p_fin, m_fin, res2[1, 3:4], res2[2,1:4], delta_P12, res2[3,1:4], delta_P23))
#Order by mismatches
res2 <- res[order(res[,3], -res[,4], decreasing = F),]
out.exclu <- unlist(c(offspring.name[off], genotyped.mrk[off], res2[1,1:3], res2[2,1:3], res2[3,1:3]))
a <- list(out.log, out.exclu)
}
parallel::stopCluster(cl)
cat('\n')
# Create a log
ped.log <- as.data.frame(t(as.data.frame(lapply(A, function(X) {t <- X[[1]]}))))
colnames(ped.log) <- c('offspring', 'mrk_genotype', 'sire1', 'dam1', 'mismatch1', 'mendel1',
'sire2', 'dam2', 'mismatch2', 'mendel2', 'delta_Pmendel12',
'sire3', 'dam3', 'mismatch3', 'mendel3', 'delta_Pmendel23')
rownames(ped.log) <- c(1:nrow(ped.log))
ped.log[,] <- sapply(ped.log[,c(1:ncol(ped.log))], as.character)
ped.log[,c(2, 5:6, 9:11, 14:16)] <- sapply(ped.log[,c(2, 5:6, 9:11, 14:16)], as.numeric)
# Create a data frame from results by exclusion
ped.exclu <- as.data.frame(t(as.data.frame(lapply(A, function(X) {t <- X[[2]]}))))
colnames(ped.exclu) <- c('off', 'mrk_genotype','sire1', 'dam1', 'mismatch1',
'sire2', 'dam2', 'mismatch2',
'sire3', 'dam3', 'mismatch3')
rownames(ped.exclu) <- c(1:nrow(ped.exclu))
ped.exclu[,c(2,5,8,11)] <- sapply(ped.exclu[,c(2,5,8,11)], as.numeric)
# Create pedigree
ped <- ped.log[,c(1,3:4)]
# Return the output
return(list(pedigree = ped, log.mendel = ped.log, log.exclu = ped.exclu, nb.mrk = ncol(offspring)))
}
# -------------------------------------------------------------------------------------------------------------------
#' Set the APIS threshold
#'
#' This function calculates the threshold for APIS
#' @param ped.log log.like for assignment function
#' @param ped.exclu log.exclu for assignment function
#' @param nb.mrk Number of markers
#' @param error (default: NULL) The assignment error rate accepted by the user
#' @keywords assignment
#' @return pedigree
#' @return log file
#' @import ggplot2
#' @import gridExtra
#' @importFrom "stats" "median" "quantile"
#' @export
setThreshold <- function(ped.log, ped.exclu, nb.mrk, error = NULL) {
cat('===================================================', sep = '\n')
cat(' ___ _____ _ _____ ', sep = '\n')
cat(' / | | _ \\ | | / ___/ ', sep = '\n')
cat(' / /| | | |_| | | | | |___ ', sep = '\n')
cat(' / / | | | ___/ | | \\ __ \\ ', sep = '\n')
cat(' / / | | | | | | ___| | ', sep = '\n')
cat(' /_/ |_| |_| |_| /_____/ ', sep = '\n')
cat('\n')
cat('---------------------------------------------------', sep = '\n')
cat('AUTO-ADAPTIVE PARENTAGE INFERENCE SOFTWARE', sep = '\n')
cat('---------------------------------------------------', sep = '\n')
P <- NULL
# Create the pedigree output
ped <- as.data.frame(matrix(NA, ncol = 3, nrow = nrow(ped.log)))
colnames(ped) <- c('off', 'sire', 'dam')
ped[,1] <- ped.log[,1]
# Plot of Mendelian transmission probability distributions
mendel <- rep(NA, times = 2*nrow(ped.log))
mendel[seq(1, length(mendel), 2)] <- ped.log$mendel1
mendel[seq(2, length(mendel), 2)] <- ped.log$mendel2
data.mendel <- data.frame(mendel = mendel,
P = rep(c("P1", "P2"), times = nrow(ped.log)))
delta <- rep(NA, times = 2*nrow(ped.log))
delta[seq(1, length(delta), 2)] <- ped.log$delta_Pmendel12
delta[seq(2, length(delta), 2)] <- ped.log$delta_Pmendel23
data.delta <- data.frame(delta = delta,
P = rep(c("delta1", "delta2"), times = nrow(ped.log)))
miss <- rep(NA, times = 2*nrow(ped.log))
miss[seq(1, length(miss), 2)] <- ped.log$mismatch1
miss[seq(2, length(miss), 2)] <- ped.log$mismatch2
data.miss <- data.frame(miss = miss,
P = rep(c("mismatch1", "mismatch2"), times = nrow(ped.log)))
if (is.null(error)) {
error <- as.numeric(readline(prompt = 'What assignment error rate do you accept : '))
} else {
error <- error
}
# Calculate the median of P2m(:)
med_0 <- median(ped.log$mendel2)
mendel2_o <- sort(ped.log$mendel2)
mendel1_o <- sort(ped.log$mendel1)
N1_0 <- length(ped.log$mendel1[which(ped.log$mendel1<=med_0)])
N0 <- length(ped.log$mendel1)
while(TRUE) {
seuil <- min(ped.log$mendel2)
N2_l <- round(length(ped.log$mendel2[which(ped.log$mendel2<=seuil)]) -
(length(ped.log$mendel3[which(ped.log$mendel3<=seuil)]) * ((2*N1_0)/N0)))
vu <- c()
cpt <- 1
while(N2_l<((N0-(2*N1_0))/2) & cpt<=nrow(ped.log)) {
seuil <- mendel2_o[cpt]
N2_l <- round(length(ped.log$mendel2[which(ped.log$mendel2<=seuil)]) -
(length(ped.log$mendel3[which(ped.log$mendel3<=seuil)]) * ((2*N1_0)/N0)))
cpt <- cpt+1
}
med_1 <- seuil
N1_0 <- length(ped.log$mendel1[which(ped.log$mendel1<=med_0)])
N1_1 <- length(ped.log$mendel1[which(ped.log$mendel1<=med_1)])
diffN <- N1_1 - N1_0
if (diffN<=1) {
break
} else {
med_0 <- med_1
}
}
N1_1min <- length(which(ped.log$mendel1<=median(ped.log$mendel2)))
N1_1 <- ifelse(test = N1_1>round(nrow(ped.log)/2), yes = round(nrow(ped.log)/2), no = N1_1)
cat('Estimated number of offspring with at least one missing parent : between',2*N1_1min,'and',2*N1_1)
cat('\n')
#####-----------------------------------------------------
##### THRESHOLD
#####-----------------------------------------------------
# If the number of offspring with at least one missing parent is LOWER than the user-defined error
if ((2*N1_1)<=round(error*nrow(ped.log))) {
cat('--------------------------------------', sep = '\n')
cat(' BEST MENDELIAN PROBABILITY', sep = '\n')
cat('--------------------------------------', sep = '\n')
ped[,2:3] <- ped.log[,3:4]
ped$assign <- 'assign'
thresh.mendel <- min(ped.log$delta_Pmendel12)
} else {
# If the number of offspring with at least one missing parent is GREATER than the user-defined error
cat('--------------------------------------', sep = '\n')
cat(' DELTA OF MENDELIAN PROBABILITY', sep = '\n')
cat('--------------------------------------', sep = '\n')
s.delta23 <- sort(ped.log$delta_Pmendel23, decreasing = T)
thresh.mendel <- quantile(s.delta23[1:(nrow(ped.log) - 2*N1_1min)], probs = (1-error), type = 5, na.rm = T)
cat('Threshold for delta :', thresh.mendel)
cat('\n')
ped[,2:3] <- ped.log[,c(3:4)]
ped$assign <- ifelse(test = ped.log$delta_Pmendel12 >= thresh.mendel, yes = 'assign', no = 'no.assign')
}
# Reformate outputs
ped.final <- ped
ped.final[which(ped.final$assign == 'no.assign'), 2:3] <- c(NA, NA)
ped.final <- ped.final[,-4]
# Plot the distributions
plot_mendel <- ggplot2::ggplot(data = data.mendel, aes(x = mendel, fill = P)) +
geom_histogram(data = subset(data.mendel, P == 'P2'), bins = 30) +
geom_histogram(data = subset(data.mendel, P == 'P1'), alpha = 0.8, bins = 30) +
xlab(label = "average Mendelian tranmission probability") +
ylab(label = "number of individuals") +
theme(axis.title.x = element_text(margin = margin(20, 0, 0, 0))) +
theme(axis.title.y = element_text(margin = margin(0, 20, 0, 0))) +
guides(fill = guide_legend(title = "Mendelian probability"))
plot_delta <- ggplot2::ggplot(data = data.delta, aes(x = delta, fill = P)) +
geom_histogram(data = subset(data.delta, P == 'delta2'), bins = 30) +
geom_histogram(data = subset(data.delta, P == 'delta1'), alpha = 0.8, bins = 30) +
geom_vline(xintercept = thresh.mendel) +
xlab(label = "delta") +
ylab(label = "number of individuals") +
theme(axis.title.x = element_text(margin = margin(20, 0, 0, 0))) +
theme(axis.title.y = element_text(margin = margin(0, 20, 0, 0))) +
guides(fill = guide_legend(title = "Delta"))
plot_miss <- ggplot2::ggplot(data = data.miss, aes(x = miss, fill = P)) +
geom_histogram(data = subset(data.miss, P == 'mismatch2'), bins = 30) +
geom_histogram(data = subset(data.miss, P == 'mismatch1'), alpha = 0.8, bins = 30) +
xlab(label = "number of mismatches") +
ylab(label = "number of individuals") +
theme(axis.title.x = element_text(margin = margin(20, 0, 0, 0))) +
theme(axis.title.y = element_text(margin = margin(0, 20, 0, 0))) +
guides(fill = guide_legend(title = "missmatches"))
gridExtra::grid.arrange(plot_delta, plot_mendel, plot_miss, nrow = 3, ncol = 1)
# Return the output
return(list(pedigree = ped.final, log = ped.log, error = error))
}
# -------------------------------------------------------------------------------------------------------------------
#' Estimate the allele frequencies
#'
#' This function estimates allele frequencies
#' @param genotype A matrix of genotypes (n*p)
#' n = number of individuals
#' p = number of markers (coded as "All1/All2", ex: "A/A" or "NA/NA" for missing genotype)
#' @keywords allele frequencies
#' @examples
#' data("APIS_offspring")
#' freq <- allFreq(APIS_offspring)
#' @return allele frequencies
#' @export
allFreq <- function(genotype) {
# DESCRIPTION
# Estimate allele frequencies based on genotype matrix
# Create the genotype matrix for new coding genotypes (2 columns)
mat.geno <- matrix(NA, nrow = nrow(genotype), ncol = 2*ncol(genotype))
imp <- seq(1,ncol(mat.geno),2)
# Divide each genotype (coded A/A) into 2 columns
for (i in c(1:ncol(genotype))) {
tmp <- strsplit(genotype[,i], split = '/', fixed = T)
M <- t(mapply(FUN = function(X) {X}, tmp))
mat.geno[,(imp[i]:(imp[i]+1))] <- M
}
# List of the different alleles
variant <- sort(unique(unlist(as.list(apply(mat.geno,2,unique)))))
# Create the results matrix
mat.res <- matrix(0, nrow = ncol(genotype), ncol = length(variant))
rownames(mat.res) <- colnames(genotype)
colnames(mat.res) <- variant
for (n in 1:nrow(mat.res)) {
tmp <- table(mat.geno[,(imp[n]:(imp[n]+1))])
mat.res[n,match(names(tmp), colnames(mat.res))] <- tmp
}
# Calculte the allele frequencies
mat.freq <- mat.res/(rowSums(mat.res[,which(colnames(mat.res)!='NA')]))
colnames(mat.freq) <- paste0('Freq_',colnames(mat.res))
# Merge the results
res <- cbind(mat.res, tot = rowSums(mat.res), mat.freq)
# Return the result
return(res)
}
# ------------------------------------------------------------------------------------------------------------------
#' calculte the theoretical assignment power
#'
#' This function calculates the theoretical assignment power of the marker set
#' @param sire Sire genotypes | Matrix (n*p) where n = number of individuals, p = number of markers
#' rownames(sire) = labels of sires
#' marker coding = "All1/All2" example: "A/A", "A/B", "NA/NA" (for missing genotype)
#' @param dam Dam genotypes | Matrix (n*p) where n = number of individuals, p = number of markers
#' rownames(dam) = labels of dams
#' marker coding = "All1/All2" example: "A/A", "A/B", "NA/NA" (for missing genotype)
#' @return Theoretical assignment power of the marker set
#' @examples
#' data("APIS_sire")
#' data("APIS_dam")
#' assignmentPower(APIS_sire, APIS_dam)
#' @keywords assignment exclusion power
#' @export
assignmentPower <- function(sire, dam) {
# DESCRIPTION
# This function calculates the theoretical assignment power as proposed in Vandeputte, M (2012)
pop <- rbind(sire, dam)
# Importe the allFreq function and calculate the allele frequencies
freq <- allFreq(as.matrix(pop))
col <- which(colnames(freq)=='tot')
freq.calc <- as.data.frame(freq[,((col+1):ncol(freq))])
test.NA <- which(colnames(freq.calc) == "Freq_NA")
if (length(test.NA) != 0) {
freq.calc <- freq.calc[,-test.NA]
}
mcol <- ncol(freq.calc)
# Calculate Q1 and Q3 for each marker
freq.calc$Q1i <- 1 - 2*rowSums(freq.calc[,1:mcol]^2) +
rowSums(freq.calc[,1:mcol]^3) + 2*rowSums(freq.calc[,1:mcol]^4) -
2*rowSums(freq.calc[,1:mcol]^2)^2 - 3*rowSums(freq.calc[,1:mcol]^5) +
3*rowSums(freq.calc[,1:mcol]^3)*rowSums(freq.calc[,1:mcol]^2)
freq.calc$Q3i <- 1 + 4*rowSums(freq.calc[,1:mcol]^4) -
4*rowSums(freq.calc[,1:mcol]^5) - 3*rowSums(freq.calc[,1:mcol]^6) -
8*rowSums(freq.calc[,1:mcol]^2)^2 + 2*rowSums(freq.calc[,1:mcol]^3)^2 +
8*rowSums(freq.calc[,1:mcol]^3)*rowSums(freq.calc[,1:mcol]^2)
# Calculate the global Q1 and Q3
Q1 <- 1 - prod(1-freq.calc$Q1i)
Q3 <- 1 - prod(1-freq.calc$Q3i)
# Calculate the assignment power
Pu <- Q1^(nrow(dam)+nrow(sire)-2)*Q3^((nrow(dam)-1)*(nrow(sire)-1))
# Return the result
return(Pu)
}
# ------------------------------------------------------------------------------------------------------------------
#' Establish personal threshold
#'
#' This function allows the user to set up his own threshold
#' @param APIS.result APIS function output
#' @param method the method for the new threshold | 'delta' for deltas,
#' 'Pmendel' for Mendelian porbabilities, 'exclusion' for mismatches
#' @param threshold personal threshold | default values are implemented
#' @return new pedigree from the new threshold
#' @examples
#' data("APIS_offspring")
#' data("APIS_sire")
#' data("APIS_dam")
#'
#' result <- APIS(off.genotype = APIS_offspring,
#' sire.genotype = APIS_sire,
#' dam.genotype = APIS_dam,
#' error = 0.05)
#'
#' new.result <- personalThreshold(result, method = 'Pmendel')
#' @keywords assignment APIS threshold
#' @import ggplot2
#' @import gridExtra
#' @importFrom "stats" "median" "quantile"
#' @export
personalThreshold <- function(APIS.result, method, threshold = NULL) {
# Get the result
pedigree <- APIS.result$pedigree
log <- APIS.result$log
error <- APIS.result$error
# Check the method
if (method == "delta") {
# DO the delta threshold
col_toKeep <- 11
if (is.null(threshold)) {
return(APIS.result)
} else {
threshold <- threshold
}
} else if (method == "Pmendel") {
# DO the Pmendel threshold
col_toKeep <- 6
if (is.null(threshold)) {
threshold <- quantile(x = log[,10], probs = (1 - error), type = 5)
} else {
threshold <- threshold
}
} else if (method == "exclusion") {
# DO the exclusion threshold
col_toKeep <- 5
if (is.null(threshold)) {
threshold <- ceiling(0.05 * max(log$mrk_genotype))
} else {
threshold <- threshold
}
} else {
stop("Invalid method")
}
# Re-assgin the offspring
tmp <- log[,c(1,3:4)]
if (method == 'exclusion') {
tmp$assign <- ifelse(test = log[,col_toKeep] <= threshold, yes = 'assign', no = 'no.assign')
} else {
tmp$assign <- ifelse(test = log[,col_toKeep] >= threshold, yes = 'assign', no = 'no.assign')
}
tmp[which(tmp$assign == 'no.assign'), 2:3] <- c(NA, NA)
pedigree <- tmp[,1:3]
# Plot results
mendel <- rep(NA, times = 2*nrow(log))
mendel[seq(1, length(mendel), 2)] <- log$mendel1
mendel[seq(2, length(mendel), 2)] <- log$mendel2
data.mendel <- data.frame(mendel = mendel,
P = rep(c("P1", "P2"), times = nrow(log)))
delta <- rep(NA, times = 2*nrow(log))
delta[seq(1, length(delta), 2)] <- log$delta_Pmendel12
delta[seq(2, length(delta), 2)] <- log$delta_Pmendel23
data.delta <- data.frame(delta = delta,
P = rep(c("delta1", "delta2"), times = nrow(log)))
miss <- rep(NA, times = 2*nrow(log))
miss[seq(1, length(miss), 2)] <- log$mismatch1
miss[seq(2, length(miss), 2)] <- log$mismatch2
data.miss <- data.frame(miss = miss,
P = rep(c("mismatch1", "mismatch2"), times = nrow(log)))
plot_mendel <- ggplot2::ggplot(data = data.mendel, aes(x = mendel, fill = P)) +
geom_histogram(data = subset(data.mendel, P == 'P2'), bins = 30) +
geom_histogram(data = subset(data.mendel, P == 'P1'), alpha = 0.8, bins = 30) +
xlab(label = "average Mendelian tranmission probability") +
ylab(label = "number of individuals") +
theme(axis.title.x = element_text(margin = margin(20, 0, 0, 0))) +
theme(axis.title.y = element_text(margin = margin(0, 20, 0, 0))) +
guides(fill = guide_legend(title = "Mendelian probability"))
plot_delta <- ggplot2::ggplot(data = data.delta, aes(x = delta, fill = P)) +
geom_histogram(data = subset(data.delta, P == 'delta2'), bins = 30) +
geom_histogram(data = subset(data.delta, P == 'delta1'), alpha = 0.8, bins = 30) +
xlab(label = "delta") +
ylab(label = "number of individuals") +
theme(axis.title.x = element_text(margin = margin(20, 0, 0, 0))) +
theme(axis.title.y = element_text(margin = margin(0, 20, 0, 0))) +
guides(fill = guide_legend(title = "Delta"))
plot_miss <- ggplot2::ggplot(data = data.miss, aes(x = miss, fill = P)) +
geom_histogram(data = subset(data.miss, P == 'mismatch2'), bins = 30) +
geom_histogram(data = subset(data.miss, P == 'mismatch1'), alpha = 0.8, bins = 30) +
xlab(label = "number of mismatches") +
ylab(label = "number of individuals") +
theme(axis.title.x = element_text(margin = margin(20, 0, 0, 0))) +
theme(axis.title.y = element_text(margin = margin(0, 20, 0, 0))) +
guides(fill = guide_legend(title = "missmatches"))
if (method == 'delta') {
# Add delta threshold
plot_delta <- plot_delta +
geom_vline(xintercept = threshold)
} else if (method == 'Pmendel') {
# Add Pmendel threshold
plot_mendel <- plot_mendel +
geom_vline(xintercept = threshold)
} else {
# Add mismatches threshold
plot_miss <- plot_miss +
geom_vline(xintercept = threshold)
}
gridExtra::grid.arrange(plot_delta, plot_mendel, plot_miss, nrow = 3, ncol = 1)
# Give recommendations according to the data set and the results
cat('--------------------------------------', sep = '\n')
cat(' APIS SUMMARY', sep = '\n')
cat('--------------------------------------', sep = '\n')
cat("Method for personal threshold : ", method, sep = "")
cat('\n')
cat('Threshold : ', threshold, sep = "")
cat('\n')
assignmentRate <- length(pedigree$sire[which(is.na(pedigree$sire) == F)]) / nrow(pedigree)
AR <- substr(as.character(100 * assignmentRate), 1, 6)
cat('Assignment rate : ', AR, '%', sep = "")
cat('\n')
return(list(pedigree = pedigree, log = log, error = error, threshold = threshold))
}
# ------------------------------------------------------------------------------------------------------------------
#' Select most likely parents for potent parent pairs tests
#'
#' This function allows the selection of the most likely parents for assignment, reducing computation time
#' @param off.genotype genotype of one offspring
#' @param parent.genotype genotype matrix of parent genotypes
#' @param parent.sex vector of parents sex
#' @param n.Parent vector of number of sires and dams to select
#' @return list of potential sires and dams
#' @keywords assignment APIS threshold
#' @export
selectParents <- function(off.genotype, parent.genotype, parent.sex, n.Parent) {
# Initialize the variables
if (length(n.Parent) == 1) {
sire.keep <- n.Parent
dam.keep <- n.Parent
} else {
sire.keep <- n.Parent[1]
dam.keep <- n.Parent[2]
}
i <- 1
output <- data.frame(parent = rownames(parent.genotype),
sex = parent.sex,
mismatch = NA)
output$parent <- as.character(output$parent)
# Create the probability tables
off.geno <- strsplit(off.genotype, split = '/')
probability_table <- vector('list', length(off.genotype))
for (m in c(1:length(off.genotype))) {
# If the offspring is homozygous
if (off.geno[[m]][1] == off.geno[[m]][2]) {
probability_table[[m]] <- c(0, 0, 1, NA)
} else {
probability_table[[m]] <- c(0, 0, 0, 0, 0, 1, NA)
}
}
# Loop over all the parents
for (p in output$parent) {
p.geno <- parent.genotype[which(rownames(parent.genotype) %in% p),]
p.geno <- strsplit(p.geno, split = '/')
# Probability
parent_probability <- rep(NA, length(p.geno))
# Loop over all the markers
for (m in c(1:length(off.genotype))) {
off.mrk <- off.geno[[m]]
p.mrk <- p.geno[[m]]
if (off.mrk[1] == off.mrk[2] & off.mrk[1] == 'NA') { # If offspring is NA/NA
parent_probability[m] <- 1
} else if (off.mrk[1] == off.mrk[2] & off.mrk[1] != 'NA') { # If offspring is homozygous
# Check parent genotype
if (p.mrk[1] == p.mrk[2] & p.mrk[1] == 'NA') { # parent NA/NA
parent_probability[m] <- probability_table[[m]][4]
} else if (p.mrk[1] == p.mrk[2] & p.mrk[1] != 'NA') {
if (p.mrk[1] == off.mrk[1]) { # parent A/A
parent_probability[m] <- probability_table[[m]][1]
} else { # parent C/C
parent_probability[m] <- probability_table[[m]][3]
}
} else { # parent heterozygous
if ((p.mrk[1] != off.mrk[1] & p.mrk[2] != off.mrk[1]) & (p.mrk[1] != off.mrk[2] & p.mrk[2] != off.mrk[2])) {
parent_probability[m] <- probability_table[[m]][3]
} else {
parent_probability[m] <- probability_table[[m]][2]
}
}
} else { # If offspring is heterozygous
if (p.mrk[1] == p.mrk[2]) { # parent homzygous
if (p.mrk[1] == 'NA') { # parent NA/NA
parent_probability[m] <- probability_table[[m]][7]
} else if (p.mrk[1] == off.mrk[1]) { # parent A/A
parent_probability[m] <- probability_table[[m]][1]
} else if (p.mrk[1] == off.mrk[2]) { # parent B/B
parent_probability[m] <- probability_table[[m]][3]
} else { # parent C/C
parent_probability[m] <- probability_table[[m]][6]
}
} else { # parent heterozygous
if ((p.mrk[1] == off.mrk[1] | p.mrk[2] == off.mrk[1]) & (p.mrk[1] == off.mrk[2] | p.mrk[2] == off.mrk[2])) {
# parent A/B
parent_probability[m] <- probability_table[[m]][2]
} else if ((p.mrk[1] == off.mrk[1] | p.mrk[2] == off.mrk[1]) & (p.mrk[1] != off.mrk[2] | p.mrk[2] != off.mrk[2])) {
# parent A/C
parent_probability[m] <- probability_table[[m]][4]
} else if ((p.mrk[1] != off.mrk[1] | p.mrk[2] != off.mrk[1]) & (p.mrk[1] == off.mrk[2] | p.mrk[2] == off.mrk[2])) {
# parent B/C
parent_probability[m] <- probability_table[[m]][5]
} else {
# parent C/C
parent_probability[m] <- probability_table[[m]][6]
}
}
}
}
# Calculte the average probability for the parent
# Write the result
output[i, 3] <- sum(parent_probability, na.rm = T)
i <- i + 1
}
output <- output[order(output$mismatch),]
sire_toKeep <- output[which(output$sex == 'M'), ]
s.keep <- sire_toKeep[which(sire_toKeep$mismatch >= min(output$mismatch) & sire_toKeep$mismatch <= (min(output$mismatch) + 2)),]
select.sire <- ifelse(test = sire.keep > nrow(sire_toKeep), yes = nrow(sire_toKeep), no = sire.keep)
if (nrow(s.keep) < sire.keep) {
s.keep <- rbind(s.keep, sire_toKeep[c((nrow(s.keep) + 1):select.sire),])
} else {
s.keep <- s.keep
}
sire_toKeep <- s.keep$parent
dam_toKeep <- output[which(output$sex == 'F'), ]
d.keep <- dam_toKeep[which(dam_toKeep$mismatch >= min(output$mismatch) & dam_toKeep$mismatch <= (min(output$mismatch) + 2)),]
select.dam <- ifelse(test = dam.keep > nrow(dam_toKeep), yes = nrow(dam_toKeep), no = dam.keep)
if (nrow(d.keep) < dam.keep) {
d.keep <- rbind(d.keep, dam_toKeep[c((nrow(d.keep) + 1):select.dam),])
} else {
d.keep <- d.keep
}
dam_toKeep <- d.keep$parent
# Return the most likely parents
return(list(sire_toKeep = sire_toKeep, dam_toKeep = dam_toKeep))
}
|
/R/APIS_functions.R
|
no_license
|
rgriot/APIS
|
R
| false
| false
| 38,689
|
r
|
#' APIS function that assigns with observed data
#'
#' This function performs the APIS procedure
#' @param off.genotype Offspring genotypes | Matrix (n*p)
#' where n = number of individuals
#' p = number of markers
#' rownames(offspring) = labels of offspring
#' marker coding = "All1/All2" example: "A/A", "A/B", "NA/NA" (for missing genotype)
#' @param sire.genotype Sire genotypes | Matrix (n*p)
#' where n = number of individuals
#' p = number of markers
#' rownames(sire) = labels of sires
#' marker coding = "All1/All2" example: "A/A", "A/B", "NA/NA" (for missing genotype)
#' @param dam.genotype Dam genotypes | Matrix (n*p)
#' where n = number of individuals
#' p = number of markers
#' rownames(dam) = labels of dams
#' marker coding = "All1/All2" example: "A/A", "A/B", "NA/NA" (for missing genotype)
#' @param error (default: 0) The assignment error rate accepted by the user
#' @param exclusion.threshold (default: ncol(off.genotype)) Threshold for exclusion (number of mismatches allowed)
#' @param preselect.Parent (default: FALSE) Preselection of parents. Can be FALSE, an integer or a vector of two integers (number of sires, numbers of dams)
#' @param nb.cores (default: 2) Number of cores to use. If you have more than 2 cores, you can use the "parallel" function detectCores()
#' @keywords assignment APIS
#' @return pedigree
#' @return a log file
#' @useDynLib APIS
#' @import doSNOW
#' @import foreach
#' @import parallel
#' @import doParallel
#' @import ggplot2
#' @import gridExtra
#' @examples
#' data("APIS_offspring")
#' data("APIS_sire")
#' data("APIS_dam")
#'
#' result <- APIS(off.genotype = APIS_offspring,
#' sire.genotype = APIS_sire,
#' dam.genotype = APIS_dam,
#' error = 0.05)
#' @export
APIS <- function(off.genotype, sire.genotype, dam.genotype, error = 0,
exclusion.threshold = ncol(off.genotype), preselect.Parent = F, nb.cores = 2) {
# Check inputs
# Check if all genotypes matrices have the same number of markers
if (ncol(off.genotype) == ncol(sire.genotype) & ncol(off.genotype) == ncol(dam.genotype) & ncol(sire.genotype) == ncol(dam.genotype)) {
cat("genotype matrices : OK")
cat('\n')
} else {
stop("Your genotype matrices do not have the same number of markers")
}
# Check if the number of mismatches allowed is lower than the number of markers and positive
if ((0 <= exclusion.threshold) && (exclusion.threshold <= ncol(off.genotype))) {
cat("exclusion threshold : OK")
cat('\n')
} else {
stop("The exclusion threshold is greater than the number of markers")
}
# Check if the user-defined assignment error rate limit is a percentage
if ((0 <= error) && (error <= 100) && (is.numeric(error))) {
cat("assignment error rate : OK")
cat('\n')
} else {
stop("The assignment error rate limit is NEGATIVE")
}
# Check if all offspring markers have at least one genotype
offspring.markerGeno <- apply(off.genotype, 2, function(X) {if (length(X[which(X == "NA/NA")]) == length(X)) {
return(T)
} else {
return(F)
}})
marker_nonGeno <- which(offspring.markerGeno == T)
if (length(marker_nonGeno) == 0) {
cat("All the markers are genotyped")
cat('\n')
} else {
off.genotype <- off.genotype[, -marker_nonGeno]
cat(paste0("marker(s) ", marker_nonGeno, "have not genotypes "))
}
# Calculate the theoretical assignment power
P <- assignmentPower(sire = sire.genotype, dam = dam.genotype)
P2 <- substr(as.character(100 * P), 1, 6)
cat("The assignment power of your marker set is ", P2, "%", sep = "")
cat('\n')
if (P >= 0.99) {
cat("Theoretical assignment power : OK")
cat('\n')
} else {
message("WARNING! Your marker set is not enough powerfull!")
}
# Assign with APIS
assignResult <- assignmentFortran(offspring = off.genotype,
sire = sire.genotype,
dam = dam.genotype,
thresh = exclusion.threshold,
preselect.Parent = preselect.Parent,
nb.cores = nb.cores)
apisResult <- setThreshold(ped.log = assignResult$log.mendel, ped.exclu = assignResult$exclu, nb.mrk = assignResult$nb.mrk, error = error)
pedigree <- apisResult$pedigree
log <- apisResult$log
# Give recommendations according to the data set and the results
cat('--------------------------------------', sep = '\n')
cat(' APIS SUMMARY', sep = '\n')
cat('--------------------------------------', sep = '\n')
cat('Theoretical assignment power of the marker set : ', P2, "%", sep = "")
cat('\n')
cat('Assignment error rate accepted : ', error)
cat('\n')
assignmentRate <- length(pedigree$sire[which(is.na(pedigree$sire) == F)]) / nrow(pedigree)
AR <- substr(as.character(100 * assignmentRate), 1, 6)
cat('Assignment rate : ', AR, '%', sep = "")
cat('\n')
# Return outputs
output <- list(pedigree = pedigree, log = log, error = error)
}
# ----------------------------------------------------------------------------------------------------------------
#' Assignment function to obtain the average Mendelian transmission probabilities using a Fortran library
#'
#' This function calculates the average Mendelian transmission probabilities
#' @param offspring Offspring genotypes | Matrix (n*p) where n = number of individuals, p = number of markers
#' rownames(offspring) = labels of offspring
#' marker coding = "All1/All2" example: "A/A", "A/B", "NA/NA" (for missing genotype)
#' @param sire Sire genotypes | Matrix (n*p) where n = number of individuals, p = number of markers
#' rownames(sire) = labels of sires
#' marker coding = "All1/All2" example: "A/A", "A/B", "NA/NA" (for missing genotype)
#' @param dam Dam genotypes | Matrix (n*p) where n = number of individuals, p = number of markers
#' rownames(dam) = labels of dams
#' marker coding = "All1/All2" example: "A/A", "A/B", "NA/NA" (for missing genotype)
#' @param thresh (default: ncol(offspring) Threshold for exclusion (number of mismatches allowed)
#' @param preselect.Parent (default: FALSE) Preselection of parents. Can be FALSE, an integer or a vector of two integers (number of sires, numbers of dams)
#' @param nb.cores (default: 2) Number of cores to use. If you have more than 2 cores, you can use the "parallel" function detectCores()
#' @keywords assignment
#' @return intermidiate pedigree
#' @return log file for Mendelian transmission probabilities
#' @return log file for exclusion
#' @useDynLib APIS
#' @import doSNOW
#' @import foreach
#' @import parallel
#' @import doParallel
#' @importFrom "stats" "median" "quantile"
#' @importFrom "utils" "setTxtProgressBar" "txtProgressBar"
#' @examples
#' data("APIS_offspring")
#' data("APIS_sire")
#' data("APIS_dam")
#'
#' assignment <- assignmentFortran(APIS_offspring, APIS_sire, APIS_dam)
#' @export
assignmentFortran <- function(offspring, sire, dam, thresh = ncol(offspring),
preselect.Parent = F, nb.cores = 2) {
# DESCRIPTION
# Function to calculate average Mendelian transmission probabilities
# Stop if different number of markers are provided
if (ncol(offspring)!=ncol(sire)&ncol(offspring)!=ncol(dam))
stop('Genotypes must have the same number of markers')
# Create the results matrix
offspring.name <- rownames(offspring)
genotyped.mrk <- as.numeric(apply(offspring,1, function(X) {length(X[X!='NA/NA'])}))
# Parameters
e <- 0.01 # Genotyping error of 1%
i <- NULL
off <- NULL
P <- NULL
# Estimate allele frequencies
cat('Estimation of allele frequencies')
cat('\n')
Freq <- allFreq(offspring)
x.col <- ncol(offspring)
iterations <- nrow(offspring)
# Variables for parent selection
parent.genotype <- rbind(sire, dam)
parent.sex <- c(rep("M", times = nrow(sire)), rep("F", times = nrow(dam)))
cat('Recoding datasets')
cat('\n')
# Recode for Fortran subroutine
recodeFortran <- function(mrk, list.mrk) {
tmp <- unlist(strsplit(mrk, '/'))
all1 <- list.mrk[which(list.mrk[,1] == tmp[1]),2]
all2 <- list.mrk[which(list.mrk[,1] == tmp[2]),2]
return(c(all1, all2))
}
recodeFreq <- function(name.col, list.mrk) {
tmp <- unlist(strsplit(name.col, '_'))[2]
return(list.mrk[which(list.mrk[,1] == tmp), 2])
}
variant <- unique(unlist(strsplit(as.vector(rbind(offspring, sire, dam)), '/')))
variant <- variant[-which(variant == "NA")]
variant.corres <- data.frame(variant = as.character(variant),
recode = c(1:length(variant)))
variant.corres$variant <- as.character(variant.corres$variant)
variant.corres <- rbind(variant.corres, c(as.character("NA"), 0))
cat("Number of cores used :", nb.cores)
cat("\n")
cl <- parallel::makeCluster(nb.cores)
doSNOW::registerDoSNOW(cl)
pb.recodeOff <- txtProgressBar(min = 0, max = iterations, char = '><> ', style = 3)
progress <- function(n) {setTxtProgressBar(pb.recodeOff, n)}
opts <- list(progress = progress)
recode.off <- foreach(i = 1:iterations, .combine = rbind, .options.snow = opts) %dopar% {
tmp <- as.numeric(as.vector(sapply(offspring[i,, drop = F], recodeFortran, list.mrk = variant.corres)))
}
cat('\n')
pb.recodeSire <- txtProgressBar(min = 0, max = nrow(sire), char = '><> ', style = 3)
progress <- function(n) {setTxtProgressBar(pb.recodeSire, n)}
opts <- list(progress = progress)
recode.sire <- foreach(i = 1:nrow(sire), .combine = rbind, .options.snow = opts) %dopar% {
tmp <- as.numeric(as.vector(sapply(sire[i,, drop = F], recodeFortran, list.mrk = variant.corres)))
}
cat('\n')
pb.recodeDam <- txtProgressBar(min = 0, max = nrow(dam), char = '><> ', style = 3)
progress <- function(n) {setTxtProgressBar(pb.recodeDam, n)}
opts <- list(progress = progress)
recode.dam <- foreach(i = 1:nrow(dam), .combine = rbind, .options.snow = opts) %dopar% {
tmp <- as.numeric(as.vector(sapply(dam[i,, drop = F], recodeFortran, list.mrk = variant.corres)))
}
parallel::stopCluster(cl)
cat('\n')
if (class(recode.off) != "matrix") {
recode.off <- t(as.matrix(recode.off))
} else {
}
if (class(recode.sire) != "matrix") {
recode.sire <- t(as.matrix(recode.sire))
} else {
}
if (class(recode.dam) != "matrix") {
recode.dam <- t(as.matrix(recode.dam))
} else {
}
rownames(recode.off) <- rownames(offspring)
rownames(recode.sire) <- rownames(sire)
rownames(recode.dam) <- rownames(dam)
Freq <- Freq[,-which(colnames(Freq) == "Freq_NA")]
Freq <- Freq[, c((floor(ncol(Freq)/2)+2):ncol(Freq))]
colnames(Freq) <- sapply(colnames(Freq), recodeFreq, list.mrk = variant.corres)
Freq <- rbind(colnames(Freq), Freq)
add.freq <- as.numeric(variant.corres$recode[-which(variant.corres$recode %in% Freq[1,])])
add.freq <- add.freq[-which(add.freq == 0)]
add.freqMatrix <- matrix(data = c(add.freq, rep(0, times = length(add.freq) * (nrow(Freq) - 1))),
ncol = length(add.freq), nrow = nrow(Freq), byrow = T)
Freq <- apply(Freq, 2, as.numeric)
Freq <- cbind(Freq, add.freqMatrix)
Freq <- Freq[,order(Freq[1,])]
Freq <- Freq[-1,]
# Assignment Process
cat('Assignment')
cat('\n')
# Set up the cluster for parallel iteration
cl <- parallel::makeCluster(nb.cores)
doSNOW::registerDoSNOW(cl)
pb.assignment <- txtProgressBar(min = 0, max = iterations, char = "><(((*> ", style = 3)
progress <- function(n) {setTxtProgressBar(pb.assignment, n)}
opts <- list(progress = progress)
A <- foreach(off = 1:iterations, .multicombine = T,
.packages = c('foreach', 'doParallel', 'doSNOW'), .options.snow = opts) %dopar% { # For each offspring
tmp <- recode.off[off,]
if (preselect.Parent == F) {
potential.sire <- rownames(sire)
potential.dam <- rownames(dam)
} else {
potential.parents <- selectParents(offspring[off,], parent.genotype = parent.genotype,
parent.sex = parent.sex, n.Parent = preselect.Parent)
potential.sire <- potential.parents$sire_toKeep
potential.dam <- potential.parents$dam_toKeep
}
tmp.sire <- recode.sire[which(rownames(recode.sire) %in% potential.sire),]
tmp.dam <- recode.dam[which(rownames(recode.dam) %in% potential.dam),]
# Create temporary results
res <- matrix(NA, nrow = (length(potential.sire)*length(potential.dam)), ncol = 4)
colnames(res) <- c('sire', 'dam', 'score_exclu', 'P_mendel')
res[,1] <- rep(potential.sire, each = length(potential.dam))
res[,2] <- rep(potential.dam, times = length(potential.sire))
# Prepare Fortran inputs
nMrk = as.integer(x.col)
nSires = as.integer(length(potential.sire))
nDams = as.integer(length(potential.dam))
nVariant = as.integer(ncol(Freq))
output_sires = vector(mode = 'integer', length = nSires*nDams)
output_dams = vector(mode = 'integer', length = nSires*nDams)
output_score = vector(mode = 'numeric', length = nSires*nDams)
output_miss = vector(mode = 'numeric', length = nSires*nDams)
outputFortran <- .Fortran("likelihoodCalculation", as.integer(tmp), as.integer(tmp.sire),
as.integer(tmp.dam), as.integer(nMrk), as.integer(nVariant), as.integer(nSires), as.integer(nDams),
as.double(Freq), output_sires, output_dams, as.double(output_score), as.integer(output_miss))
res[,3] <- as.integer(outputFortran[[12]])
res[,4] <- exp(as.numeric(as.numeric(outputFortran[[11]]))/genotyped.mrk[off])
#Working on the results
res <- as.data.frame(res)
res$sire <- as.character(res$sire)
res$dam <- as.character(res$dam)
res$score_exclu <- as.numeric(as.character(res$score_exclu))
res$P_mendel <- as.numeric(as.character(res$P_mendel))
# Order by Mendelian transmission probabilities
res2 <- res[order(res[,4], res[,3], decreasing = T),]
delta_P12 <- res2[1,4] - res2[2,4]
delta_P23 <- res2[2,4] - res2[3,4]
p_fin <- res2[1,1]
m_fin <- res2[1,2]
out.log <- unlist(c(offspring.name[off], genotyped.mrk[off], p_fin, m_fin, res2[1, 3:4], res2[2,1:4], delta_P12, res2[3,1:4], delta_P23))
#Order by mismatches
res2 <- res[order(res[,3], -res[,4], decreasing = F),]
out.exclu <- unlist(c(offspring.name[off], genotyped.mrk[off], res2[1,1:3], res2[2,1:3], res2[3,1:3]))
a <- list(out.log, out.exclu)
}
parallel::stopCluster(cl)
cat('\n')
# Create a log
ped.log <- as.data.frame(t(as.data.frame(lapply(A, function(X) {t <- X[[1]]}))))
colnames(ped.log) <- c('offspring', 'mrk_genotype', 'sire1', 'dam1', 'mismatch1', 'mendel1',
'sire2', 'dam2', 'mismatch2', 'mendel2', 'delta_Pmendel12',
'sire3', 'dam3', 'mismatch3', 'mendel3', 'delta_Pmendel23')
rownames(ped.log) <- c(1:nrow(ped.log))
ped.log[,] <- sapply(ped.log[,c(1:ncol(ped.log))], as.character)
ped.log[,c(2, 5:6, 9:11, 14:16)] <- sapply(ped.log[,c(2, 5:6, 9:11, 14:16)], as.numeric)
# Create a data frame from results by exclusion
ped.exclu <- as.data.frame(t(as.data.frame(lapply(A, function(X) {t <- X[[2]]}))))
colnames(ped.exclu) <- c('off', 'mrk_genotype','sire1', 'dam1', 'mismatch1',
'sire2', 'dam2', 'mismatch2',
'sire3', 'dam3', 'mismatch3')
rownames(ped.exclu) <- c(1:nrow(ped.exclu))
ped.exclu[,c(2,5,8,11)] <- sapply(ped.exclu[,c(2,5,8,11)], as.numeric)
# Create pedigree
ped <- ped.log[,c(1,3:4)]
# Return the output
return(list(pedigree = ped, log.mendel = ped.log, log.exclu = ped.exclu, nb.mrk = ncol(offspring)))
}
# -------------------------------------------------------------------------------------------------------------------
#' Set the APIS threshold
#'
#' This function calculates the threshold for APIS
#' @param ped.log log.like for assignment function
#' @param ped.exclu log.exclu for assignment function
#' @param nb.mrk Number of markers
#' @param error (default: NULL) The assignment error rate accepted by the user
#' @keywords assignment
#' @return pedigree
#' @return log file
#' @import ggplot2
#' @import gridExtra
#' @importFrom "stats" "median" "quantile"
#' @export
setThreshold <- function(ped.log, ped.exclu, nb.mrk, error = NULL) {
cat('===================================================', sep = '\n')
cat(' ___ _____ _ _____ ', sep = '\n')
cat(' / | | _ \\ | | / ___/ ', sep = '\n')
cat(' / /| | | |_| | | | | |___ ', sep = '\n')
cat(' / / | | | ___/ | | \\ __ \\ ', sep = '\n')
cat(' / / | | | | | | ___| | ', sep = '\n')
cat(' /_/ |_| |_| |_| /_____/ ', sep = '\n')
cat('\n')
cat('---------------------------------------------------', sep = '\n')
cat('AUTO-ADAPTIVE PARENTAGE INFERENCE SOFTWARE', sep = '\n')
cat('---------------------------------------------------', sep = '\n')
P <- NULL
# Create the pedigree output
ped <- as.data.frame(matrix(NA, ncol = 3, nrow = nrow(ped.log)))
colnames(ped) <- c('off', 'sire', 'dam')
ped[,1] <- ped.log[,1]
# Plot of Mendelian transmission probability distributions
mendel <- rep(NA, times = 2*nrow(ped.log))
mendel[seq(1, length(mendel), 2)] <- ped.log$mendel1
mendel[seq(2, length(mendel), 2)] <- ped.log$mendel2
data.mendel <- data.frame(mendel = mendel,
P = rep(c("P1", "P2"), times = nrow(ped.log)))
delta <- rep(NA, times = 2*nrow(ped.log))
delta[seq(1, length(delta), 2)] <- ped.log$delta_Pmendel12
delta[seq(2, length(delta), 2)] <- ped.log$delta_Pmendel23
data.delta <- data.frame(delta = delta,
P = rep(c("delta1", "delta2"), times = nrow(ped.log)))
miss <- rep(NA, times = 2*nrow(ped.log))
miss[seq(1, length(miss), 2)] <- ped.log$mismatch1
miss[seq(2, length(miss), 2)] <- ped.log$mismatch2
data.miss <- data.frame(miss = miss,
P = rep(c("mismatch1", "mismatch2"), times = nrow(ped.log)))
if (is.null(error)) {
error <- as.numeric(readline(prompt = 'What assignment error rate do you accept : '))
} else {
error <- error
}
# Calculate the median of P2m(:)
med_0 <- median(ped.log$mendel2)
mendel2_o <- sort(ped.log$mendel2)
mendel1_o <- sort(ped.log$mendel1)
N1_0 <- length(ped.log$mendel1[which(ped.log$mendel1<=med_0)])
N0 <- length(ped.log$mendel1)
while(TRUE) {
seuil <- min(ped.log$mendel2)
N2_l <- round(length(ped.log$mendel2[which(ped.log$mendel2<=seuil)]) -
(length(ped.log$mendel3[which(ped.log$mendel3<=seuil)]) * ((2*N1_0)/N0)))
vu <- c()
cpt <- 1
while(N2_l<((N0-(2*N1_0))/2) & cpt<=nrow(ped.log)) {
seuil <- mendel2_o[cpt]
N2_l <- round(length(ped.log$mendel2[which(ped.log$mendel2<=seuil)]) -
(length(ped.log$mendel3[which(ped.log$mendel3<=seuil)]) * ((2*N1_0)/N0)))
cpt <- cpt+1
}
med_1 <- seuil
N1_0 <- length(ped.log$mendel1[which(ped.log$mendel1<=med_0)])
N1_1 <- length(ped.log$mendel1[which(ped.log$mendel1<=med_1)])
diffN <- N1_1 - N1_0
if (diffN<=1) {
break
} else {
med_0 <- med_1
}
}
N1_1min <- length(which(ped.log$mendel1<=median(ped.log$mendel2)))
N1_1 <- ifelse(test = N1_1>round(nrow(ped.log)/2), yes = round(nrow(ped.log)/2), no = N1_1)
cat('Estimated number of offspring with at least one missing parent : between',2*N1_1min,'and',2*N1_1)
cat('\n')
#####-----------------------------------------------------
##### THRESHOLD
#####-----------------------------------------------------
# If the number of offspring with at least one missing parent is LOWER than the user-defined error
if ((2*N1_1)<=round(error*nrow(ped.log))) {
cat('--------------------------------------', sep = '\n')
cat(' BEST MENDELIAN PROBABILITY', sep = '\n')
cat('--------------------------------------', sep = '\n')
ped[,2:3] <- ped.log[,3:4]
ped$assign <- 'assign'
thresh.mendel <- min(ped.log$delta_Pmendel12)
} else {
# If the number of offspring with at least one missing parent is GREATER than the user-defined error
cat('--------------------------------------', sep = '\n')
cat(' DELTA OF MENDELIAN PROBABILITY', sep = '\n')
cat('--------------------------------------', sep = '\n')
s.delta23 <- sort(ped.log$delta_Pmendel23, decreasing = T)
thresh.mendel <- quantile(s.delta23[1:(nrow(ped.log) - 2*N1_1min)], probs = (1-error), type = 5, na.rm = T)
cat('Threshold for delta :', thresh.mendel)
cat('\n')
ped[,2:3] <- ped.log[,c(3:4)]
ped$assign <- ifelse(test = ped.log$delta_Pmendel12 >= thresh.mendel, yes = 'assign', no = 'no.assign')
}
# Reformate outputs
ped.final <- ped
ped.final[which(ped.final$assign == 'no.assign'), 2:3] <- c(NA, NA)
ped.final <- ped.final[,-4]
# Plot the distributions
plot_mendel <- ggplot2::ggplot(data = data.mendel, aes(x = mendel, fill = P)) +
geom_histogram(data = subset(data.mendel, P == 'P2'), bins = 30) +
geom_histogram(data = subset(data.mendel, P == 'P1'), alpha = 0.8, bins = 30) +
xlab(label = "average Mendelian tranmission probability") +
ylab(label = "number of individuals") +
theme(axis.title.x = element_text(margin = margin(20, 0, 0, 0))) +
theme(axis.title.y = element_text(margin = margin(0, 20, 0, 0))) +
guides(fill = guide_legend(title = "Mendelian probability"))
plot_delta <- ggplot2::ggplot(data = data.delta, aes(x = delta, fill = P)) +
geom_histogram(data = subset(data.delta, P == 'delta2'), bins = 30) +
geom_histogram(data = subset(data.delta, P == 'delta1'), alpha = 0.8, bins = 30) +
geom_vline(xintercept = thresh.mendel) +
xlab(label = "delta") +
ylab(label = "number of individuals") +
theme(axis.title.x = element_text(margin = margin(20, 0, 0, 0))) +
theme(axis.title.y = element_text(margin = margin(0, 20, 0, 0))) +
guides(fill = guide_legend(title = "Delta"))
plot_miss <- ggplot2::ggplot(data = data.miss, aes(x = miss, fill = P)) +
geom_histogram(data = subset(data.miss, P == 'mismatch2'), bins = 30) +
geom_histogram(data = subset(data.miss, P == 'mismatch1'), alpha = 0.8, bins = 30) +
xlab(label = "number of mismatches") +
ylab(label = "number of individuals") +
theme(axis.title.x = element_text(margin = margin(20, 0, 0, 0))) +
theme(axis.title.y = element_text(margin = margin(0, 20, 0, 0))) +
guides(fill = guide_legend(title = "missmatches"))
gridExtra::grid.arrange(plot_delta, plot_mendel, plot_miss, nrow = 3, ncol = 1)
# Return the output
return(list(pedigree = ped.final, log = ped.log, error = error))
}
# -------------------------------------------------------------------------------------------------------------------
#' Estimate the allele frequencies
#'
#' This function estimates allele frequencies
#' @param genotype A matrix of genotypes (n*p)
#' n = number of individuals
#' p = number of markers (coded as "All1/All2", ex: "A/A" or "NA/NA" for missing genotype)
#' @keywords allele frequencies
#' @examples
#' data("APIS_offspring")
#' freq <- allFreq(APIS_offspring)
#' @return allele frequencies
#' @export
allFreq <- function(genotype) {
# DESCRIPTION
# Estimate allele frequencies based on genotype matrix
# Create the genotype matrix for new coding genotypes (2 columns)
mat.geno <- matrix(NA, nrow = nrow(genotype), ncol = 2*ncol(genotype))
imp <- seq(1,ncol(mat.geno),2)
# Divide each genotype (coded A/A) into 2 columns
for (i in c(1:ncol(genotype))) {
tmp <- strsplit(genotype[,i], split = '/', fixed = T)
M <- t(mapply(FUN = function(X) {X}, tmp))
mat.geno[,(imp[i]:(imp[i]+1))] <- M
}
# List of the different alleles
variant <- sort(unique(unlist(as.list(apply(mat.geno,2,unique)))))
# Create the results matrix
mat.res <- matrix(0, nrow = ncol(genotype), ncol = length(variant))
rownames(mat.res) <- colnames(genotype)
colnames(mat.res) <- variant
for (n in 1:nrow(mat.res)) {
tmp <- table(mat.geno[,(imp[n]:(imp[n]+1))])
mat.res[n,match(names(tmp), colnames(mat.res))] <- tmp
}
# Calculte the allele frequencies
mat.freq <- mat.res/(rowSums(mat.res[,which(colnames(mat.res)!='NA')]))
colnames(mat.freq) <- paste0('Freq_',colnames(mat.res))
# Merge the results
res <- cbind(mat.res, tot = rowSums(mat.res), mat.freq)
# Return the result
return(res)
}
# ------------------------------------------------------------------------------------------------------------------
#' calculte the theoretical assignment power
#'
#' This function calculates the theoretical assignment power of the marker set
#' @param sire Sire genotypes | Matrix (n*p) where n = number of individuals, p = number of markers
#' rownames(sire) = labels of sires
#' marker coding = "All1/All2" example: "A/A", "A/B", "NA/NA" (for missing genotype)
#' @param dam Dam genotypes | Matrix (n*p) where n = number of individuals, p = number of markers
#' rownames(dam) = labels of dams
#' marker coding = "All1/All2" example: "A/A", "A/B", "NA/NA" (for missing genotype)
#' @return Theoretical assignment power of the marker set
#' @examples
#' data("APIS_sire")
#' data("APIS_dam")
#' assignmentPower(APIS_sire, APIS_dam)
#' @keywords assignment exclusion power
#' @export
assignmentPower <- function(sire, dam) {
# DESCRIPTION
# This function calculates the theoretical assignment power as proposed in Vandeputte, M (2012)
pop <- rbind(sire, dam)
# Importe the allFreq function and calculate the allele frequencies
freq <- allFreq(as.matrix(pop))
col <- which(colnames(freq)=='tot')
freq.calc <- as.data.frame(freq[,((col+1):ncol(freq))])
test.NA <- which(colnames(freq.calc) == "Freq_NA")
if (length(test.NA) != 0) {
freq.calc <- freq.calc[,-test.NA]
}
mcol <- ncol(freq.calc)
# Calculate Q1 and Q3 for each marker
freq.calc$Q1i <- 1 - 2*rowSums(freq.calc[,1:mcol]^2) +
rowSums(freq.calc[,1:mcol]^3) + 2*rowSums(freq.calc[,1:mcol]^4) -
2*rowSums(freq.calc[,1:mcol]^2)^2 - 3*rowSums(freq.calc[,1:mcol]^5) +
3*rowSums(freq.calc[,1:mcol]^3)*rowSums(freq.calc[,1:mcol]^2)
freq.calc$Q3i <- 1 + 4*rowSums(freq.calc[,1:mcol]^4) -
4*rowSums(freq.calc[,1:mcol]^5) - 3*rowSums(freq.calc[,1:mcol]^6) -
8*rowSums(freq.calc[,1:mcol]^2)^2 + 2*rowSums(freq.calc[,1:mcol]^3)^2 +
8*rowSums(freq.calc[,1:mcol]^3)*rowSums(freq.calc[,1:mcol]^2)
# Calculate the global Q1 and Q3
Q1 <- 1 - prod(1-freq.calc$Q1i)
Q3 <- 1 - prod(1-freq.calc$Q3i)
# Calculate the assignment power
Pu <- Q1^(nrow(dam)+nrow(sire)-2)*Q3^((nrow(dam)-1)*(nrow(sire)-1))
# Return the result
return(Pu)
}
# ------------------------------------------------------------------------------------------------------------------
#' Establish personal threshold
#'
#' This function allows the user to set up his own threshold
#' @param APIS.result APIS function output
#' @param method the method for the new threshold | 'delta' for deltas,
#' 'Pmendel' for Mendelian porbabilities, 'exclusion' for mismatches
#' @param threshold personal threshold | default values are implemented
#' @return new pedigree from the new threshold
#' @examples
#' data("APIS_offspring")
#' data("APIS_sire")
#' data("APIS_dam")
#'
#' result <- APIS(off.genotype = APIS_offspring,
#' sire.genotype = APIS_sire,
#' dam.genotype = APIS_dam,
#' error = 0.05)
#'
#' new.result <- personalThreshold(result, method = 'Pmendel')
#' @keywords assignment APIS threshold
#' @import ggplot2
#' @import gridExtra
#' @importFrom "stats" "median" "quantile"
#' @export
personalThreshold <- function(APIS.result, method, threshold = NULL) {
# Get the result
pedigree <- APIS.result$pedigree
log <- APIS.result$log
error <- APIS.result$error
# Check the method
if (method == "delta") {
# DO the delta threshold
col_toKeep <- 11
if (is.null(threshold)) {
return(APIS.result)
} else {
threshold <- threshold
}
} else if (method == "Pmendel") {
# DO the Pmendel threshold
col_toKeep <- 6
if (is.null(threshold)) {
threshold <- quantile(x = log[,10], probs = (1 - error), type = 5)
} else {
threshold <- threshold
}
} else if (method == "exclusion") {
# DO the exclusion threshold
col_toKeep <- 5
if (is.null(threshold)) {
threshold <- ceiling(0.05 * max(log$mrk_genotype))
} else {
threshold <- threshold
}
} else {
stop("Invalid method")
}
# Re-assgin the offspring
tmp <- log[,c(1,3:4)]
if (method == 'exclusion') {
tmp$assign <- ifelse(test = log[,col_toKeep] <= threshold, yes = 'assign', no = 'no.assign')
} else {
tmp$assign <- ifelse(test = log[,col_toKeep] >= threshold, yes = 'assign', no = 'no.assign')
}
tmp[which(tmp$assign == 'no.assign'), 2:3] <- c(NA, NA)
pedigree <- tmp[,1:3]
# Plot results
mendel <- rep(NA, times = 2*nrow(log))
mendel[seq(1, length(mendel), 2)] <- log$mendel1
mendel[seq(2, length(mendel), 2)] <- log$mendel2
data.mendel <- data.frame(mendel = mendel,
P = rep(c("P1", "P2"), times = nrow(log)))
delta <- rep(NA, times = 2*nrow(log))
delta[seq(1, length(delta), 2)] <- log$delta_Pmendel12
delta[seq(2, length(delta), 2)] <- log$delta_Pmendel23
data.delta <- data.frame(delta = delta,
P = rep(c("delta1", "delta2"), times = nrow(log)))
miss <- rep(NA, times = 2*nrow(log))
miss[seq(1, length(miss), 2)] <- log$mismatch1
miss[seq(2, length(miss), 2)] <- log$mismatch2
data.miss <- data.frame(miss = miss,
P = rep(c("mismatch1", "mismatch2"), times = nrow(log)))
plot_mendel <- ggplot2::ggplot(data = data.mendel, aes(x = mendel, fill = P)) +
geom_histogram(data = subset(data.mendel, P == 'P2'), bins = 30) +
geom_histogram(data = subset(data.mendel, P == 'P1'), alpha = 0.8, bins = 30) +
xlab(label = "average Mendelian tranmission probability") +
ylab(label = "number of individuals") +
theme(axis.title.x = element_text(margin = margin(20, 0, 0, 0))) +
theme(axis.title.y = element_text(margin = margin(0, 20, 0, 0))) +
guides(fill = guide_legend(title = "Mendelian probability"))
plot_delta <- ggplot2::ggplot(data = data.delta, aes(x = delta, fill = P)) +
geom_histogram(data = subset(data.delta, P == 'delta2'), bins = 30) +
geom_histogram(data = subset(data.delta, P == 'delta1'), alpha = 0.8, bins = 30) +
xlab(label = "delta") +
ylab(label = "number of individuals") +
theme(axis.title.x = element_text(margin = margin(20, 0, 0, 0))) +
theme(axis.title.y = element_text(margin = margin(0, 20, 0, 0))) +
guides(fill = guide_legend(title = "Delta"))
plot_miss <- ggplot2::ggplot(data = data.miss, aes(x = miss, fill = P)) +
geom_histogram(data = subset(data.miss, P == 'mismatch2'), bins = 30) +
geom_histogram(data = subset(data.miss, P == 'mismatch1'), alpha = 0.8, bins = 30) +
xlab(label = "number of mismatches") +
ylab(label = "number of individuals") +
theme(axis.title.x = element_text(margin = margin(20, 0, 0, 0))) +
theme(axis.title.y = element_text(margin = margin(0, 20, 0, 0))) +
guides(fill = guide_legend(title = "missmatches"))
if (method == 'delta') {
# Add delta threshold
plot_delta <- plot_delta +
geom_vline(xintercept = threshold)
} else if (method == 'Pmendel') {
# Add Pmendel threshold
plot_mendel <- plot_mendel +
geom_vline(xintercept = threshold)
} else {
# Add mismatches threshold
plot_miss <- plot_miss +
geom_vline(xintercept = threshold)
}
gridExtra::grid.arrange(plot_delta, plot_mendel, plot_miss, nrow = 3, ncol = 1)
# Give recommendations according to the data set and the results
cat('--------------------------------------', sep = '\n')
cat(' APIS SUMMARY', sep = '\n')
cat('--------------------------------------', sep = '\n')
cat("Method for personal threshold : ", method, sep = "")
cat('\n')
cat('Threshold : ', threshold, sep = "")
cat('\n')
assignmentRate <- length(pedigree$sire[which(is.na(pedigree$sire) == F)]) / nrow(pedigree)
AR <- substr(as.character(100 * assignmentRate), 1, 6)
cat('Assignment rate : ', AR, '%', sep = "")
cat('\n')
return(list(pedigree = pedigree, log = log, error = error, threshold = threshold))
}
# ------------------------------------------------------------------------------------------------------------------
#' Select most likely parents for potent parent pairs tests
#'
#' This function allows the selection of the most likely parents for assignment, reducing computation time
#' @param off.genotype genotype of one offspring
#' @param parent.genotype genotype matrix of parent genotypes
#' @param parent.sex vector of parents sex
#' @param n.Parent vector of number of sires and dams to select
#' @return list of potential sires and dams
#' @keywords assignment APIS threshold
#' @export
selectParents <- function(off.genotype, parent.genotype, parent.sex, n.Parent) {
# Initialize the variables
if (length(n.Parent) == 1) {
sire.keep <- n.Parent
dam.keep <- n.Parent
} else {
sire.keep <- n.Parent[1]
dam.keep <- n.Parent[2]
}
i <- 1
output <- data.frame(parent = rownames(parent.genotype),
sex = parent.sex,
mismatch = NA)
output$parent <- as.character(output$parent)
# Create the probability tables
off.geno <- strsplit(off.genotype, split = '/')
probability_table <- vector('list', length(off.genotype))
for (m in c(1:length(off.genotype))) {
# If the offspring is homozygous
if (off.geno[[m]][1] == off.geno[[m]][2]) {
probability_table[[m]] <- c(0, 0, 1, NA)
} else {
probability_table[[m]] <- c(0, 0, 0, 0, 0, 1, NA)
}
}
# Loop over all the parents
for (p in output$parent) {
p.geno <- parent.genotype[which(rownames(parent.genotype) %in% p),]
p.geno <- strsplit(p.geno, split = '/')
# Probability
parent_probability <- rep(NA, length(p.geno))
# Loop over all the markers
for (m in c(1:length(off.genotype))) {
off.mrk <- off.geno[[m]]
p.mrk <- p.geno[[m]]
if (off.mrk[1] == off.mrk[2] & off.mrk[1] == 'NA') { # If offspring is NA/NA
parent_probability[m] <- 1
} else if (off.mrk[1] == off.mrk[2] & off.mrk[1] != 'NA') { # If offspring is homozygous
# Check parent genotype
if (p.mrk[1] == p.mrk[2] & p.mrk[1] == 'NA') { # parent NA/NA
parent_probability[m] <- probability_table[[m]][4]
} else if (p.mrk[1] == p.mrk[2] & p.mrk[1] != 'NA') {
if (p.mrk[1] == off.mrk[1]) { # parent A/A
parent_probability[m] <- probability_table[[m]][1]
} else { # parent C/C
parent_probability[m] <- probability_table[[m]][3]
}
} else { # parent heterozygous
if ((p.mrk[1] != off.mrk[1] & p.mrk[2] != off.mrk[1]) & (p.mrk[1] != off.mrk[2] & p.mrk[2] != off.mrk[2])) {
parent_probability[m] <- probability_table[[m]][3]
} else {
parent_probability[m] <- probability_table[[m]][2]
}
}
} else { # If offspring is heterozygous
if (p.mrk[1] == p.mrk[2]) { # parent homzygous
if (p.mrk[1] == 'NA') { # parent NA/NA
parent_probability[m] <- probability_table[[m]][7]
} else if (p.mrk[1] == off.mrk[1]) { # parent A/A
parent_probability[m] <- probability_table[[m]][1]
} else if (p.mrk[1] == off.mrk[2]) { # parent B/B
parent_probability[m] <- probability_table[[m]][3]
} else { # parent C/C
parent_probability[m] <- probability_table[[m]][6]
}
} else { # parent heterozygous
if ((p.mrk[1] == off.mrk[1] | p.mrk[2] == off.mrk[1]) & (p.mrk[1] == off.mrk[2] | p.mrk[2] == off.mrk[2])) {
# parent A/B
parent_probability[m] <- probability_table[[m]][2]
} else if ((p.mrk[1] == off.mrk[1] | p.mrk[2] == off.mrk[1]) & (p.mrk[1] != off.mrk[2] | p.mrk[2] != off.mrk[2])) {
# parent A/C
parent_probability[m] <- probability_table[[m]][4]
} else if ((p.mrk[1] != off.mrk[1] | p.mrk[2] != off.mrk[1]) & (p.mrk[1] == off.mrk[2] | p.mrk[2] == off.mrk[2])) {
# parent B/C
parent_probability[m] <- probability_table[[m]][5]
} else {
# parent C/C
parent_probability[m] <- probability_table[[m]][6]
}
}
}
}
# Calculte the average probability for the parent
# Write the result
output[i, 3] <- sum(parent_probability, na.rm = T)
i <- i + 1
}
output <- output[order(output$mismatch),]
sire_toKeep <- output[which(output$sex == 'M'), ]
s.keep <- sire_toKeep[which(sire_toKeep$mismatch >= min(output$mismatch) & sire_toKeep$mismatch <= (min(output$mismatch) + 2)),]
select.sire <- ifelse(test = sire.keep > nrow(sire_toKeep), yes = nrow(sire_toKeep), no = sire.keep)
if (nrow(s.keep) < sire.keep) {
s.keep <- rbind(s.keep, sire_toKeep[c((nrow(s.keep) + 1):select.sire),])
} else {
s.keep <- s.keep
}
sire_toKeep <- s.keep$parent
dam_toKeep <- output[which(output$sex == 'F'), ]
d.keep <- dam_toKeep[which(dam_toKeep$mismatch >= min(output$mismatch) & dam_toKeep$mismatch <= (min(output$mismatch) + 2)),]
select.dam <- ifelse(test = dam.keep > nrow(dam_toKeep), yes = nrow(dam_toKeep), no = dam.keep)
if (nrow(d.keep) < dam.keep) {
d.keep <- rbind(d.keep, dam_toKeep[c((nrow(d.keep) + 1):select.dam),])
} else {
d.keep <- d.keep
}
dam_toKeep <- d.keep$parent
# Return the most likely parents
return(list(sire_toKeep = sire_toKeep, dam_toKeep = dam_toKeep))
}
|
#-----------------------------------------------
# Load packages and prepare data
#-----------------------------------------------
# load packages
# Don't forget to install required packages if needed...
library(resemble);library(prospectr)
library(ggplot2)
library(foreach); library(reshape2)
# load dataset from the "Chimiométrie 2006" challenge
# see ?NIRsoil for an explanation of the variables
data(NIRsoil)
# Filter the data using the Savitzky and Golay smoothing filter with a window size of
# 11 spectral variables and a polynomial order of 3 (no differentiation).
NIRsoil$spc_sg <- savitzkyGolay(X = NIRsoil$spc, p = 3, w = 11, m = 0)
# bin spectra every 10 nm
NIRsoil$spc_sg <- binning(X=NIRsoil$spc, bin.size=5)
wavelength <- as.numeric(colnames(NIRsoil$spc)) # store wavelength position
# Select dependent variable
dep <- "Nt" # Total Nitrogen
# Remove NA values in dependent variable
anyna <- is.na(NIRsoil[,dep])
NIRsoil <- NIRsoil[!anyna,]
# Select cal & val dataset
Yr <- NIRsoil[as.logical(NIRsoil$train),dep] # dependent variable (calibration set)
Xr <- NIRsoil$spc_sg[as.logical(NIRsoil$train),] # spectral matrix (calibration set)
Yu <- NIRsoil[!as.logical(NIRsoil$train),dep] # dependent variable (validation set)
Xu <- NIRsoil$spc_sg[!as.logical(NIRsoil$train),] # spectra matrix (validation set)
#-----------------------------------------------
# Computing spectral dissimilarities
#-----------------------------------------------
# Compute distances
euDis <- fDiss(Xr = Xr,method="euclid") # euclidean distance
cosineDis <- fDiss(Xr = Xr, method="cosine") # cosine distance
corDis <- corDiss(Xr = Xr) # correlation distance
mcorDis <- corDiss(Xr = Xr, ws = 41) # moving correlation distance
pcaDis <- orthoDiss(Xr,pcSelection=list("cumvar",.99),method="pca",local=F)$dissimilarity # default parameter in orthoDiss
# Computation of a principal component dissimilarity matrix using the
# "opc" method for the selection of the principal components
opcDis <- orthoDiss(Xr,Yr = Yr,pcSelection = list("opc", 40),method = "pca")$dissimilarity
# Computation of a partial least squares (PLS) dissimilarity matrix
plsDis <- orthoDiss(Xr,Yr = Yr,pcSelection = list("manual", 10),method = "pls")$dissimilarity
# Select one sample with the smallest mahalanobis distance to the centre of the data
# First Project in the PC space
pca <- prcomp(Xr,.center=T) # pca...
sc <- scale(pca$x,center=T,scale=T) # standardized score
pvar <- cumsum(pca$sdev^2/sum(pca$sdev^2)) # cumulative explained variance
pc <- max(which(pvar < .99)) + 1 # number of PC's accounting at least for .99 % of the variation
sc <- sc[,1:pc] # truncated score matrix
mahal <- mahalanobis(sc,center=colMeans(sc),cov=cov(sc)) # mahalanobis distance
id <- which.min(mahal) # index of the sample closest to the centre of the data
# Plot the closest sample to the mean
p <- ggplot(data=melt(Xr),aes(x=as.numeric(as.character(Var2)),y=value,group=Var1)) + geom_line(col="grey80",alpha=.5) + theme_bw() + labs(x="wavelength /nm",y="Absorbance") + scale_x_continuous(breaks=seq(1000,2500,250))
p + geom_line(data=melt(Xr[id,,drop=F]),col="red")
# Get distances to the selected sample and stack data
d <- as.data.frame(cbind(euDis[,id],cosineDis[,id],corDis[,id],mcorDis[,id],pcaDis[,id],opcDis[,id],plsDis[,id]))
colnames(d) <- paste0(c("euclidean","cosine","correlation","moving correlation","mahalanobis","opc","opc with pls")," distance")
# for each dissimilarity measure, find the 10 closest neighbours to the selected sample
knn <- sapply(d,function(x)order(x))[1:11,]
knn
# Plot
# merge spectra into one data.frame and format for ggplot2
spc <- foreach(i = 1:ncol(knn),.combine=rbind)%do%{
data.frame(melt(Xr[knn[,i],]),d=colnames(knn)[i])
}
p <- ggplot(data=spc,aes(x=as.numeric(as.character(Var2)),y=value,group=Var1)) + geom_line(col="grey80") + facet_wrap(~d) + theme_bw() + labs(x="wavelength /nm",y="Absorbance") + scale_x_continuous(breaks=seq(1000,2500,250))
p + geom_line(data=melt(Xr[id,,drop=F]),col="red")
# Now we evaluate whether dissimarities are representative of the sample compositional variation
# using simEval
euSE <- simEval(d = euDis, sideInf = Yr)
cosineSE <- simEval(d = cosineDis, sideInf = Yr)
corSE <- simEval(d = corDis, sideInf = Yr)
mcorSE <- simEval(d = mcorDis, sideInf = Yr)
pcaSE <- simEval(d = pcaDis, sideInf = Yr)
opcSE <- simEval(d = opcDis, sideInf = Yr)
plsSE <- simEval(d = plsDis, sideInf = Yr)
sim <- list(euSE,cosineSE,corSE,mcorSE,pcaSE,opcSE,plsSE)
names(sim) <- paste0(c("euclidean","cosine","correlation","moving correlation","mahalanobis","opc","opc with pls")," distance")
# stack rmsd and r (correlation) and plot
simplot <- (do.call(rbind,lapply(sim,function(x)x$eval)))
simplot$d <- factor(rownames(simplot),levels=rownames(simplot))
ggplot(data=melt(simplot),aes(y=value,x=d,fill=d)) + geom_bar(stat="identity") + facet_wrap(~variable,scale="free_y") + theme_bw() + labs(x="dissimilarities",y="") + scale_fill_brewer("Dissimilarities",palette="Set1") + scale_x_discrete(labels=abbreviate) + theme(legend.position="top")
# the 'opc' distance is the most representative of the soil compositional variation
# this is expected since 'opc' minimizes the distance between the closest neighbours and the training samples in the compositional space
#-----------------------------------------------
# Predict sample compostion with 'mbl'
# see ?mbl for other examples
#-----------------------------------------------
# Ex 1 : A simple Memory-based learning approach using partial least square regression and correlation distance
# First, set the parameters controlling the mbl function
ctrl <- mblControl(sm = "cor", # dissimilarity
center = TRUE, # Does the predictors need to be centered ?
scaled = TRUE, # Does the predictors need to be scaled ?
valMethod = "none", # No internal validation method
range.pred.lim = TRUE, # Are the predictions constrained by the range of the response variable in each local model ?
progress = TRUE # progress bar
)
# Run mbl
mbl_cor <- mbl(Yr = Yr, Xr = Xr, Yu = Yu, Xu = Xu, mblCtrl = ctrl,
dissUsage = "none", # How the distance are used: 'none' = only in the selection of neighbours, 'weights' = distances used to computed sample weight through the tricubic function
k = seq(25, 150, by = 25), # a vector of number of neighbours to be tested
method = "wapls1", # regression method: the prediction is a weighted average of values predicted with increasing pls components
pls.c = c(4,15) # number of pls components
)
mbl_cor
plot(mbl_cor) # plot of the error vs k
# Get predictions and plot pred-obs
preds <- getPredictions(mbl_cor)
predobs <- data.frame(pred=melt(preds),obs=Yu)
predobs$k <- sub(".+_([0-9]+)","\\1",predobs$pred.variable)
p <- ggplot(data=predobs,aes(x=pred.value,y=obs)) + geom_point() + facet_wrap(~k) + theme_bw() + geom_abline(col="red") + labs(x="Predicted Nt",y="Observed Nt")
p
p + geom_text(data=mbl_cor$YuPredictionStats,aes(x=-Inf,y=Inf,label=paste0("rmse = ",round(rmse,2))),vjust=2,hjust=-0.1) # this adds rmse info
# Ex 2 : same as Ex. 1 but with the pc distance
ctrl$sm <- "pc"
ctrl$pcSelection <- list(method="cumvar",value=.999) # select as many PC's as to explain 99.9% of the spectral variation
mbl_pc <- mbl(Yr = Yr, Xr = Xr, Yu = Yu, Xu = Xu, mblCtrl = ctrl,
dissUsage = "none",
k = seq(25, 150, by = 25),
method = "wapls1",
pls.c = c(4,15)
)
mbl_pc
# Ex 3 : same as Ex. 2 but with the pls distance
ctrl$sm <- "pls"
ctrl$pcSelection <- list(method="manual",value=10)
mbl_pls <- mbl(Yr = Yr, Xr = Xr, Yu = Yu, Xu = Xu, mblCtrl = ctrl,
dissUsage = "none",
k = seq(25, 150, by = 25),
method = "wapls1",
pls.c = c(4,15)
)
mbl_pls # this shows some improvement compared to pc distance...
# Ex 4 : same as Ex. 2 but with a user-defined distance matrix
# distances could be for instance computed on the 1st derivative spectra
Xr_der1 <- savitzkyGolay(NIRsoil$spc[as.logical(NIRsoil$train),], p = 5, w = 21, m = 1)
Xu_der1 <- savitzkyGolay(NIRsoil$spc[!as.logical(NIRsoil$train),], p = 5, w = 21, m = 1)
pls_dis <- orthoDiss(Xr=Xr_der1,X2 = Xu_der1, Yr = Yr,method = "pls", pcSelection = list("manual",10)) # pls distance in the 1st derivative space
ctrl$sm <- "none" # we use pls_dis instead...
mbl_user_diss <- mbl(Yr = Yr, Xr = Xr, Yu = Yu, Xu = Xu,mblCtrl = ctrl,
dissimilarityM = pls_dis$dissimilarity, # user-specified dissimilarity matrix
dissUsage = "none",
k = seq(50, 150, by = 25),
method = "wapls1", # we use the 'standard' pls algorithm...
pls.c = c(4,15) # with a max of 15 components
)
mbl_user_diss
# Ex 5 : Let's try with another multivariate regression technique: pls
# the number of pls component for each local model is computed by cross-validation
ctrl$sm <- "pls"
mbl_pls_cv <- mbl(Yr = Yr, Xr = Xr, Yu = Yu, Xu = Xu, mblCtrl = ctrl,
dissUsage = "none",
k = seq(25, 150, by = 25),
method = "pls", # we use the 'standard' pls algorithm...
pls.c = 15 # with a max of 15 components
)
mbl_pls_cv # this shows some improvement compared to wapls1 regression
# Ex 6 : Let's try with another multivariate regression technique: gpr
mbl_gpr <- mbl(Yr = Yr, Xr = Xr, Yu = Yu, Xu = Xu, mblCtrl = ctrl,
dissUsage = "none",
k = seq(25, 150, by = 25),
method = "gpr" # predictions are done through gaussian process
)
mbl_gpr # this is comparable with pls, without the need for tuning the number of components...
# Ex 7: A Memory-based learning approach (the spectrum-based learner), as implemeted in Ramirez-Lopez et al. (2013)
sbl <- mbl(Yr = Yr, Xr = Xr, Yu = Yu, Xu = Xu,
mblCtrl = mblControl(valMethod = "none"), # most default values in mblControl correspond to the spectrum-based learner...,
dissUsage = "predictors", # the distance matrix is used as additional predictor
k = seq(25, 150, by = 25),
method = "gpr" # predictions are done through gaussian process
)
sbl
#Plot all the results together
mymodels <- list(cor = mbl_cor,pc = mbl_pc, pls = mbl_pls, user_def = mbl_user_diss , pls_cv = mbl_pls_cv, gpr = mbl_gpr,sbl = sbl)
tmp <- do.call(rbind,lapply(names(mymodels), function(x) data.frame(name = x, mymodels[[x]]$YuPredictionStats))) # stack model results
ggplot(data=tmp,aes(x=k,y=rmse,colour=name,linetype=name)) + geom_point() + geom_line() + theme_bw() + ylim(c(0.4,0.8)) + scale_x_continuous(breaks=seq(25, 150, by = 25))
# Ex 7 :
# In the previous examples, the selected samples for the local models (ie neighbours) are selected with a fixed number (k)
# but the neighborhood could be defined by distance tresholds
# this is implemented in mbl with the k.diss argument
mbl_kdiss <- mbl(Yr = Yr, Xr = Xr, Yu = Yu, Xu = Xu,
mblCtrl = mblControl(sm = "cor",valMethod = "none"), # correlation distance
dissUsage = "none",
k.diss = c(0.25,0.1,0.05), # samples with a correlation less than 0.75, 0.9, 0.95 with the sample to predict will not be included in the models
k.range = c(20,nrow(Xr)), # the minimum and maximum number of samples to be included in the local calibrations
method = "gpr"
)
# let's see what is the number of samples used in the calibrating models
res <-mbl_kdiss$results[[2]] # prediction table for correlation distance = 0.1
hist(as.numeric(as.factor(res$k)),xlab="k",main="")
# setting the tresholds might be less intuitive for other distances
# we suggest to pre-compute the dissimilarity matrix
# and look at the histogram of the dissimilarity values
# here is an example with opc distance
opc_dis <- orthoDiss(Xr=Xr,X2 = Xu, Yr = Yr,method = "pca", pcSelection = list("opc",40)) # pls distance in the 1st derivative space
ctrl$sm <- "none"
mbl_kdiss <- mbl(Yr = Yr, Xr = Xr, Yu = Yu, Xu = Xu, mblCtrl = ctrl,
dissimilarityM = opc_dis$dissimilarity,
dissUsage = "none",
k.diss = c(0.05,0.1,0.2,0.5), # samples with a pc distance > 0.1, 0.2, 0.5 with the sample to predict will not be included in the models
k.range = c(20,nrow(Xr)), # the minimum and maximum number of samples to be included in the local calibrations
method = "gpr"
)
# Ex 8: Internal validation
# resemble provides a way to make an internal validation of the results:
# 'NNv': the nearest neighbour to the sample to predict is left out of the list of k nearest neighbours and predicted and compared with the actual value to compute a rmse
# 'loc_crossval' : each local model is cross-validated and the mean of the cross-validation results is computed
ctrl <- mblControl(sm="pls",pcSelection = list("manual",10),
valMethod = c("loc_crossval","NNv"),
resampling = 10, # controls the local cross-validation: this is the number of partition,
p = 0.25 # percentage of samples included in each partition
)
mbl_valid <- mbl(Yr = Yr, Xr = Xr, Yu = Yu, Xu = Xu, mblCtrl = ctrl,
dissUsage = "none",
k = seq(25, 150, by = 25),
method = "gpr"
) # note that the algorithm is slower when using internal validation
plot(mbl_valid) # shows the external (against Yr) and internal rmse, which seems a bit optimistic, compared to external validation
# Note that to be correct, external validation should be done with 3 sets:
# a reference (Xr), tuning and test sets
# the tuning set is used to find the best combination of MBL parameters (k, sm, etc..)
# which is then applied to predict the test set
#-----------------------------------------------
# Example of parallel execution
#-----------------------------------------------
# To speed-up mbl, the local models can be run in parallel
# using the 'parallel' package and the parallel back-end for the foreach %dopar% function
# provided by 'doParallel'
library(parallel)
library(doParallel)
# here is the time spent with one core
ctrl <- mblControl(sm="pls",pcSelection = list("manual",10),progress = T)
system.time(mbl(Yr = Yr, Xr = Xr, Yu = Yu, Xu = Xu, mblCtrl = ctrl, dissUsage = "none", k = seq(25, 150, by = 25), method = "gpr"))
# now with 4 cores
cl <- makeCluster(4) # create a set 4 running R copies
registerDoParallel(cl) # register them to work with foreach
system.time(mbl(Yr = Yr, Xr = Xr, Yu = Yu, Xu = Xu, mblCtrl = ctrl, dissUsage = "none", k = seq(25, 150, by = 25), method = "gpr"))
# I get a ~3 x speed-up ...
registerDoSEQ() # un-register
stopCluster(cl) # delete R instances
# another example of parallel execution available in resemble is with the opc method
# opc can be time-intensive because it requires to compute a distance matrix and neareast-neighbours for each pc component
# In orthoProjection, the 'core' argument allows to do that in parallel
# (this will work only on Windows and Linux)
# Let's test this on a large matrix
X <- matrix(rnorm(2*10^6),nrow=20000, ncol=100) # input (spectral) matrix of 20000 rows and 100 columns
y <- rnorm(2*10^4) # 'side' information used in the simEval function
system.time(orthoProjection(Xr = X,Yr=y, method = "pca",pcSelection = list("opc",40))) # this takes 285 sec on my machine
system.time(orthoProjection(Xr = X,Yr=y, method = "pca",pcSelection = list("opc",40),cores=4)) # and 136 sec with 4 cores...
|
/code_ex.R
|
no_license
|
antoinestevens/resemble
|
R
| false
| false
| 16,016
|
r
|
#-----------------------------------------------
# Load packages and prepare data
#-----------------------------------------------
# load packages
# Don't forget to install required packages if needed...
library(resemble);library(prospectr)
library(ggplot2)
library(foreach); library(reshape2)
# load dataset from the "Chimiométrie 2006" challenge
# see ?NIRsoil for an explanation of the variables
data(NIRsoil)
# Filter the data using the Savitzky and Golay smoothing filter with a window size of
# 11 spectral variables and a polynomial order of 3 (no differentiation).
NIRsoil$spc_sg <- savitzkyGolay(X = NIRsoil$spc, p = 3, w = 11, m = 0)
# bin spectra every 10 nm
NIRsoil$spc_sg <- binning(X=NIRsoil$spc, bin.size=5)
wavelength <- as.numeric(colnames(NIRsoil$spc)) # store wavelength position
# Select dependent variable
dep <- "Nt" # Total Nitrogen
# Remove NA values in dependent variable
anyna <- is.na(NIRsoil[,dep])
NIRsoil <- NIRsoil[!anyna,]
# Select cal & val dataset
Yr <- NIRsoil[as.logical(NIRsoil$train),dep] # dependent variable (calibration set)
Xr <- NIRsoil$spc_sg[as.logical(NIRsoil$train),] # spectral matrix (calibration set)
Yu <- NIRsoil[!as.logical(NIRsoil$train),dep] # dependent variable (validation set)
Xu <- NIRsoil$spc_sg[!as.logical(NIRsoil$train),] # spectra matrix (validation set)
#-----------------------------------------------
# Computing spectral dissimilarities
#-----------------------------------------------
# Compute distances
euDis <- fDiss(Xr = Xr,method="euclid") # euclidean distance
cosineDis <- fDiss(Xr = Xr, method="cosine") # cosine distance
corDis <- corDiss(Xr = Xr) # correlation distance
mcorDis <- corDiss(Xr = Xr, ws = 41) # moving correlation distance
pcaDis <- orthoDiss(Xr,pcSelection=list("cumvar",.99),method="pca",local=F)$dissimilarity # default parameter in orthoDiss
# Computation of a principal component dissimilarity matrix using the
# "opc" method for the selection of the principal components
opcDis <- orthoDiss(Xr,Yr = Yr,pcSelection = list("opc", 40),method = "pca")$dissimilarity
# Computation of a partial least squares (PLS) dissimilarity matrix
plsDis <- orthoDiss(Xr,Yr = Yr,pcSelection = list("manual", 10),method = "pls")$dissimilarity
# Select one sample with the smallest mahalanobis distance to the centre of the data
# First Project in the PC space
pca <- prcomp(Xr,.center=T) # pca...
sc <- scale(pca$x,center=T,scale=T) # standardized score
pvar <- cumsum(pca$sdev^2/sum(pca$sdev^2)) # cumulative explained variance
pc <- max(which(pvar < .99)) + 1 # number of PC's accounting at least for .99 % of the variation
sc <- sc[,1:pc] # truncated score matrix
mahal <- mahalanobis(sc,center=colMeans(sc),cov=cov(sc)) # mahalanobis distance
id <- which.min(mahal) # index of the sample closest to the centre of the data
# Plot the closest sample to the mean
p <- ggplot(data=melt(Xr),aes(x=as.numeric(as.character(Var2)),y=value,group=Var1)) + geom_line(col="grey80",alpha=.5) + theme_bw() + labs(x="wavelength /nm",y="Absorbance") + scale_x_continuous(breaks=seq(1000,2500,250))
p + geom_line(data=melt(Xr[id,,drop=F]),col="red")
# Get distances to the selected sample and stack data
d <- as.data.frame(cbind(euDis[,id],cosineDis[,id],corDis[,id],mcorDis[,id],pcaDis[,id],opcDis[,id],plsDis[,id]))
colnames(d) <- paste0(c("euclidean","cosine","correlation","moving correlation","mahalanobis","opc","opc with pls")," distance")
# for each dissimilarity measure, find the 10 closest neighbours to the selected sample
knn <- sapply(d,function(x)order(x))[1:11,]
knn
# Plot
# merge spectra into one data.frame and format for ggplot2
spc <- foreach(i = 1:ncol(knn),.combine=rbind)%do%{
data.frame(melt(Xr[knn[,i],]),d=colnames(knn)[i])
}
p <- ggplot(data=spc,aes(x=as.numeric(as.character(Var2)),y=value,group=Var1)) + geom_line(col="grey80") + facet_wrap(~d) + theme_bw() + labs(x="wavelength /nm",y="Absorbance") + scale_x_continuous(breaks=seq(1000,2500,250))
p + geom_line(data=melt(Xr[id,,drop=F]),col="red")
# Now we evaluate whether dissimarities are representative of the sample compositional variation
# using simEval
euSE <- simEval(d = euDis, sideInf = Yr)
cosineSE <- simEval(d = cosineDis, sideInf = Yr)
corSE <- simEval(d = corDis, sideInf = Yr)
mcorSE <- simEval(d = mcorDis, sideInf = Yr)
pcaSE <- simEval(d = pcaDis, sideInf = Yr)
opcSE <- simEval(d = opcDis, sideInf = Yr)
plsSE <- simEval(d = plsDis, sideInf = Yr)
sim <- list(euSE,cosineSE,corSE,mcorSE,pcaSE,opcSE,plsSE)
names(sim) <- paste0(c("euclidean","cosine","correlation","moving correlation","mahalanobis","opc","opc with pls")," distance")
# stack rmsd and r (correlation) and plot
simplot <- (do.call(rbind,lapply(sim,function(x)x$eval)))
simplot$d <- factor(rownames(simplot),levels=rownames(simplot))
ggplot(data=melt(simplot),aes(y=value,x=d,fill=d)) + geom_bar(stat="identity") + facet_wrap(~variable,scale="free_y") + theme_bw() + labs(x="dissimilarities",y="") + scale_fill_brewer("Dissimilarities",palette="Set1") + scale_x_discrete(labels=abbreviate) + theme(legend.position="top")
# the 'opc' distance is the most representative of the soil compositional variation
# this is expected since 'opc' minimizes the distance between the closest neighbours and the training samples in the compositional space
#-----------------------------------------------
# Predict sample compostion with 'mbl'
# see ?mbl for other examples
#-----------------------------------------------
# Ex 1 : A simple Memory-based learning approach using partial least square regression and correlation distance
# First, set the parameters controlling the mbl function
ctrl <- mblControl(sm = "cor", # dissimilarity
center = TRUE, # Does the predictors need to be centered ?
scaled = TRUE, # Does the predictors need to be scaled ?
valMethod = "none", # No internal validation method
range.pred.lim = TRUE, # Are the predictions constrained by the range of the response variable in each local model ?
progress = TRUE # progress bar
)
# Run mbl
mbl_cor <- mbl(Yr = Yr, Xr = Xr, Yu = Yu, Xu = Xu, mblCtrl = ctrl,
dissUsage = "none", # How the distance are used: 'none' = only in the selection of neighbours, 'weights' = distances used to computed sample weight through the tricubic function
k = seq(25, 150, by = 25), # a vector of number of neighbours to be tested
method = "wapls1", # regression method: the prediction is a weighted average of values predicted with increasing pls components
pls.c = c(4,15) # number of pls components
)
mbl_cor
plot(mbl_cor) # plot of the error vs k
# Get predictions and plot pred-obs
preds <- getPredictions(mbl_cor)
predobs <- data.frame(pred=melt(preds),obs=Yu)
predobs$k <- sub(".+_([0-9]+)","\\1",predobs$pred.variable)
p <- ggplot(data=predobs,aes(x=pred.value,y=obs)) + geom_point() + facet_wrap(~k) + theme_bw() + geom_abline(col="red") + labs(x="Predicted Nt",y="Observed Nt")
p
p + geom_text(data=mbl_cor$YuPredictionStats,aes(x=-Inf,y=Inf,label=paste0("rmse = ",round(rmse,2))),vjust=2,hjust=-0.1) # this adds rmse info
# Ex 2 : same as Ex. 1 but with the pc distance
ctrl$sm <- "pc"
ctrl$pcSelection <- list(method="cumvar",value=.999) # select as many PC's as to explain 99.9% of the spectral variation
mbl_pc <- mbl(Yr = Yr, Xr = Xr, Yu = Yu, Xu = Xu, mblCtrl = ctrl,
dissUsage = "none",
k = seq(25, 150, by = 25),
method = "wapls1",
pls.c = c(4,15)
)
mbl_pc
# Ex 3 : same as Ex. 2 but with the pls distance
ctrl$sm <- "pls"
ctrl$pcSelection <- list(method="manual",value=10)
mbl_pls <- mbl(Yr = Yr, Xr = Xr, Yu = Yu, Xu = Xu, mblCtrl = ctrl,
dissUsage = "none",
k = seq(25, 150, by = 25),
method = "wapls1",
pls.c = c(4,15)
)
mbl_pls # this shows some improvement compared to pc distance...
# Ex 4 : same as Ex. 2 but with a user-defined distance matrix
# distances could be for instance computed on the 1st derivative spectra
Xr_der1 <- savitzkyGolay(NIRsoil$spc[as.logical(NIRsoil$train),], p = 5, w = 21, m = 1)
Xu_der1 <- savitzkyGolay(NIRsoil$spc[!as.logical(NIRsoil$train),], p = 5, w = 21, m = 1)
pls_dis <- orthoDiss(Xr=Xr_der1,X2 = Xu_der1, Yr = Yr,method = "pls", pcSelection = list("manual",10)) # pls distance in the 1st derivative space
ctrl$sm <- "none" # we use pls_dis instead...
mbl_user_diss <- mbl(Yr = Yr, Xr = Xr, Yu = Yu, Xu = Xu,mblCtrl = ctrl,
dissimilarityM = pls_dis$dissimilarity, # user-specified dissimilarity matrix
dissUsage = "none",
k = seq(50, 150, by = 25),
method = "wapls1", # we use the 'standard' pls algorithm...
pls.c = c(4,15) # with a max of 15 components
)
mbl_user_diss
# Ex 5 : Let's try with another multivariate regression technique: pls
# the number of pls component for each local model is computed by cross-validation
ctrl$sm <- "pls"
mbl_pls_cv <- mbl(Yr = Yr, Xr = Xr, Yu = Yu, Xu = Xu, mblCtrl = ctrl,
dissUsage = "none",
k = seq(25, 150, by = 25),
method = "pls", # we use the 'standard' pls algorithm...
pls.c = 15 # with a max of 15 components
)
mbl_pls_cv # this shows some improvement compared to wapls1 regression
# Ex 6 : Let's try with another multivariate regression technique: gpr
mbl_gpr <- mbl(Yr = Yr, Xr = Xr, Yu = Yu, Xu = Xu, mblCtrl = ctrl,
dissUsage = "none",
k = seq(25, 150, by = 25),
method = "gpr" # predictions are done through gaussian process
)
mbl_gpr # this is comparable with pls, without the need for tuning the number of components...
# Ex 7: A Memory-based learning approach (the spectrum-based learner), as implemeted in Ramirez-Lopez et al. (2013)
sbl <- mbl(Yr = Yr, Xr = Xr, Yu = Yu, Xu = Xu,
mblCtrl = mblControl(valMethod = "none"), # most default values in mblControl correspond to the spectrum-based learner...,
dissUsage = "predictors", # the distance matrix is used as additional predictor
k = seq(25, 150, by = 25),
method = "gpr" # predictions are done through gaussian process
)
sbl
#Plot all the results together
mymodels <- list(cor = mbl_cor,pc = mbl_pc, pls = mbl_pls, user_def = mbl_user_diss , pls_cv = mbl_pls_cv, gpr = mbl_gpr,sbl = sbl)
tmp <- do.call(rbind,lapply(names(mymodels), function(x) data.frame(name = x, mymodels[[x]]$YuPredictionStats))) # stack model results
ggplot(data=tmp,aes(x=k,y=rmse,colour=name,linetype=name)) + geom_point() + geom_line() + theme_bw() + ylim(c(0.4,0.8)) + scale_x_continuous(breaks=seq(25, 150, by = 25))
# Ex 7 :
# In the previous examples, the selected samples for the local models (ie neighbours) are selected with a fixed number (k)
# but the neighborhood could be defined by distance tresholds
# this is implemented in mbl with the k.diss argument
mbl_kdiss <- mbl(Yr = Yr, Xr = Xr, Yu = Yu, Xu = Xu,
mblCtrl = mblControl(sm = "cor",valMethod = "none"), # correlation distance
dissUsage = "none",
k.diss = c(0.25,0.1,0.05), # samples with a correlation less than 0.75, 0.9, 0.95 with the sample to predict will not be included in the models
k.range = c(20,nrow(Xr)), # the minimum and maximum number of samples to be included in the local calibrations
method = "gpr"
)
# let's see what is the number of samples used in the calibrating models
res <-mbl_kdiss$results[[2]] # prediction table for correlation distance = 0.1
hist(as.numeric(as.factor(res$k)),xlab="k",main="")
# setting the tresholds might be less intuitive for other distances
# we suggest to pre-compute the dissimilarity matrix
# and look at the histogram of the dissimilarity values
# here is an example with opc distance
opc_dis <- orthoDiss(Xr=Xr,X2 = Xu, Yr = Yr,method = "pca", pcSelection = list("opc",40)) # pls distance in the 1st derivative space
ctrl$sm <- "none"
mbl_kdiss <- mbl(Yr = Yr, Xr = Xr, Yu = Yu, Xu = Xu, mblCtrl = ctrl,
dissimilarityM = opc_dis$dissimilarity,
dissUsage = "none",
k.diss = c(0.05,0.1,0.2,0.5), # samples with a pc distance > 0.1, 0.2, 0.5 with the sample to predict will not be included in the models
k.range = c(20,nrow(Xr)), # the minimum and maximum number of samples to be included in the local calibrations
method = "gpr"
)
# Ex 8: Internal validation
# resemble provides a way to make an internal validation of the results:
# 'NNv': the nearest neighbour to the sample to predict is left out of the list of k nearest neighbours and predicted and compared with the actual value to compute a rmse
# 'loc_crossval' : each local model is cross-validated and the mean of the cross-validation results is computed
ctrl <- mblControl(sm="pls",pcSelection = list("manual",10),
valMethod = c("loc_crossval","NNv"),
resampling = 10, # controls the local cross-validation: this is the number of partition,
p = 0.25 # percentage of samples included in each partition
)
mbl_valid <- mbl(Yr = Yr, Xr = Xr, Yu = Yu, Xu = Xu, mblCtrl = ctrl,
dissUsage = "none",
k = seq(25, 150, by = 25),
method = "gpr"
) # note that the algorithm is slower when using internal validation
plot(mbl_valid) # shows the external (against Yr) and internal rmse, which seems a bit optimistic, compared to external validation
# Note that to be correct, external validation should be done with 3 sets:
# a reference (Xr), tuning and test sets
# the tuning set is used to find the best combination of MBL parameters (k, sm, etc..)
# which is then applied to predict the test set
#-----------------------------------------------
# Example of parallel execution
#-----------------------------------------------
# To speed-up mbl, the local models can be run in parallel
# using the 'parallel' package and the parallel back-end for the foreach %dopar% function
# provided by 'doParallel'
library(parallel)
library(doParallel)
# here is the time spent with one core
ctrl <- mblControl(sm="pls",pcSelection = list("manual",10),progress = T)
system.time(mbl(Yr = Yr, Xr = Xr, Yu = Yu, Xu = Xu, mblCtrl = ctrl, dissUsage = "none", k = seq(25, 150, by = 25), method = "gpr"))
# now with 4 cores
cl <- makeCluster(4) # create a set 4 running R copies
registerDoParallel(cl) # register them to work with foreach
system.time(mbl(Yr = Yr, Xr = Xr, Yu = Yu, Xu = Xu, mblCtrl = ctrl, dissUsage = "none", k = seq(25, 150, by = 25), method = "gpr"))
# I get a ~3 x speed-up ...
registerDoSEQ() # un-register
stopCluster(cl) # delete R instances
# another example of parallel execution available in resemble is with the opc method
# opc can be time-intensive because it requires to compute a distance matrix and neareast-neighbours for each pc component
# In orthoProjection, the 'core' argument allows to do that in parallel
# (this will work only on Windows and Linux)
# Let's test this on a large matrix
X <- matrix(rnorm(2*10^6),nrow=20000, ncol=100) # input (spectral) matrix of 20000 rows and 100 columns
y <- rnorm(2*10^4) # 'side' information used in the simEval function
system.time(orthoProjection(Xr = X,Yr=y, method = "pca",pcSelection = list("opc",40))) # this takes 285 sec on my machine
system.time(orthoProjection(Xr = X,Yr=y, method = "pca",pcSelection = list("opc",40),cores=4)) # and 136 sec with 4 cores...
|
download <- function() {
# create a directory for downloads if it doesnt't exist
if(!file.exists("./downloads")) { dir.create("./downloads/") }
## download & unzip the file
fileUrl <-"https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileUrl, destfile="./downloads/UCI_HAR_Dataset.zip", method ="curl")
## unzip the file
unzip("./downloads/UCI_HAR_Dataset.zip", exdir="./")
}
|
/download.R
|
no_license
|
honto-ming/getdata-011-proj
|
R
| false
| false
| 460
|
r
|
download <- function() {
# create a directory for downloads if it doesnt't exist
if(!file.exists("./downloads")) { dir.create("./downloads/") }
## download & unzip the file
fileUrl <-"https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileUrl, destfile="./downloads/UCI_HAR_Dataset.zip", method ="curl")
## unzip the file
unzip("./downloads/UCI_HAR_Dataset.zip", exdir="./")
}
|
\name{twoSamplePermutationTestProportion}
\alias{twoSamplePermutationTestProportion}
\title{
Randomization (Permutation) Test to Compare Two Proportions (Fisher's Exact Test)
}
\description{
Perform a two-sample randomization (permutation) test to compare two proportions.
This is also called Fisher's exact test.
\bold{Note:} You can perform Fisher's exact test in \R using the function
\code{\link{fisher.test}}.
}
\usage{
twoSamplePermutationTestProportion(x, y, x.and.y = "Binomial Outcomes",
alternative = "two.sided", tol = sqrt(.Machine$double.eps))
}
\arguments{
\item{x, y}{
When \code{x.and.y="Binomial Outcomes"} (the default), \code{x} and \code{y} are
vectors of observations from groups 1 and 2, respectively. The vectors \code{x}
and \code{y} must contain no more than 2 unique values (e.g., \code{0} and \code{1},
\code{FALSE} and \code{TRUE}, \code{"No"} and \code{"Yes"}, etc.). In this case,
the result of \code{sort(unique(x))[2]} is taken to be the value that indicates a
\dQuote{success} for \code{x} and the result of \code{sort(unique(y))[2]} is taken
to be the value that indicates a \dQuote{success} for \code{y}. For example, \cr
\code{x = c(FALSE, TRUE, FALSE, TRUE, TRUE)} indicates 3 successes in 5 trials, and
\code{y = c(1, 0, 0, 0)} indicates 1 success in 4 trials. When \cr
\code{x.and.y="Binomial Outcomes"}, missing (\code{NA}), undefined (\code{NaN}), and
infinite (\code{Inf}, \code{-Inf}) values are allowed but will be removed.
When \code{x.and.y="Number of Successes and Trials"}, \code{x} must be a vector of
length 2 containing the number of successes for groups 1 and 2, respectively, and
\code{y} must be a vector of length 2 that contains the number of trials for groups
1 and 2, respectively. For example, \code{x = c(3, 1)} and \code{y = c(5, 4)}
indicates 3 successes in 5 trials for group 1 and 1 success in 4 trials for
group 2.
}
\item{x.and.y}{
character string indicating the kind of data stored in the vectors \code{x} and
\code{y}. The possible values are \code{x.and.y="Binomial Outcomes"} (the default),
and \cr
\code{x.and.y="Number Successes and Trials"}.
}
\item{alternative}{
character string indicating the kind of alternative hypothesis. The possible values
are \code{"two.sided"} (the default), \code{"less"}, and \code{"greater"}.
}
\item{tol}{
numeric scalar indicating the tolerance to use for computing the p-value for the
two-sample permutation test. The default value is \cr
\code{tol=sqrt(.Machine$double.eps)}. See the DETAILS section below for more
information.
}
}
\details{
\emph{Randomization Tests} \cr
In 1935, R.A. Fisher introduced the idea of a \bold{\emph{randomization test}}
(Manly, 2007, p. 107; Efron and Tibshirani, 1993, Chapter 15), which is based on
trying to answer the question: \dQuote{Did the observed pattern happen by chance,
or does the pattern indicate the null hypothesis is not true?} A randomization
test works by simply enumerating all of the possible outcomes under the null
hypothesis, then seeing where the observed outcome fits in. A randomization test
is also called a \bold{\emph{permutation test}}, because it involves permuting the
observations during the enumeration procedure (Manly, 2007, p. 3).
In the past, randomization tests have not been used as extensively as they are now
because of the \dQuote{large} computing resources needed to enumerate all of the
possible outcomes, especially for large sample sizes. The advent of more powerful
personal computers and software has allowed randomization tests to become much
easier to perform. Depending on the sample size, however, it may still be too
time consuming to enumerate all possible outcomes. In this case, the randomization
test can still be performed by sampling from the randomization distribution, and
comparing the observed outcome to this sampled permutation distribution.
\cr
\emph{Two-Sample Randomization Test for Proportions} \cr
Let \eqn{\underline{x} = x_1, x_2, \ldots, x_{n_1}} be a vector of \eqn{n_1}
independent and identically distributed (i.i.d.) observations
from a \link[stats:Binomial]{binomial distribution} with parameter \code{size=1} and
probability of success \code{prob=}\eqn{p_1}, and let
\eqn{\underline{y} = y_1, y_2, \ldots, y_{n_2}} be a vector of \eqn{n_2}
i.i.d. observations from a \link[stats:Binomial]{binomial distribution} with
parameter \code{size=1} and probability of success \code{prob=}\eqn{p_2}.
Consider the test of the null hypothesis:
\deqn{H_0: p_1 = p_2 \;\;\;\;\;\; (1)}
The three possible alternative hypotheses are the upper one-sided alternative
(\code{alternative="greater"})
\deqn{H_a: p_1 > p_2 \;\;\;\;\;\; (2)}
the lower one-sided alternative (\code{alternative="less"})
\deqn{H_a: p_1 < p_2 \;\;\;\;\;\; (3)}
and the two-sided alternative
\deqn{H_a: p_1 \ne p_2 \;\;\;\;\;\; (4)}
To perform the test of the null hypothesis (1) versus any of the three
alternatives (2)-(4), you can use the two-sample permutation test, which is also
called \link[stats:fisher.test]{Fisher's exact test}. When the observations are
from a B(1, \eqn{p}) distribution, the sample mean is an estimate of \eqn{p}.
Fisher's exact test is simply a permutation test for the difference between two
means from two different groups (see \code{\link{twoSamplePermutationTestLocation}}),
where the underlying populations are binomial with size parameter \code{size=1},
but possibly different values of the \code{prob} parameter \eqn{p}.
Fisher's exact test is usually described in terms of testing hypotheses concerning
a 2 x 2 contingency table (van Bell et al., 2004, p. 157;
Hollander and Wolfe, 1999, p. 473; Sheskin, 2011; Zar, 2010, p. 561).
The probabilities associated with the permutation distribution can be computed by
using the \link[stats:Hypergeometric]{hypergeometric distribution}.
}
\value{
A list of class \code{"permutationTest"} containing the results of the hypothesis
test. See the help file for \code{\link{permutationTest.object}} for details.
}
\references{
Efron, B., and R.J. Tibshirani. (1993). \emph{An Introduction to the Bootstrap}.
Chapman and Hall, New York, Chapter 15.
Hollander, M., and D.A. Wolfe. (1999). \emph{Nonparametric Statistical Methods}.
Second Edition. John Wiley and Sons, New York, p.473.
Manly, B.F.J. (2007). \emph{Randomization, Bootstrap and Monte Carlo Methods in
Biology}. Third Edition. Chapman & Hall, New York, Chapter 6.
Millard, S.P., and N.K. Neerchal. (2001).
\emph{Environmental Statistics with S-PLUS}. CRC Press, Boca Raton, FL,
pp.441--446.
Graham, S.L., K.J. Davis, W.H. Hansen, and C.H. Graham. (1975).
Effects of Prolonged Ethylene Thiourea Ingestion on the Thyroid of the Rat.
\emph{Food and Cosmetics Toxicology}, \bold{13}(5), 493--499.
Rodricks, J.V. (1992). \emph{Calculated Risks: The Toxicity and Human Health
Risks of Chemicals in Our Environment}. Cambridge University
Press, New York.
Rodricks, J.V. (2007). \emph{Calculated Risks: The Toxicity and Human Health
Risks of Chemicals in Our Environment}. Second Edition. Cambridge University
Press, New York.
Sheskin, D.J. (2011). \emph{Handbook of Parametric and Nonparametric
Statistical Procedures} Fifth Edition. CRC Press, Boca Raton, FL.
van Belle, G., L.D. Fisher, Heagerty, P.J., and Lumley, T. (2004).
\emph{Biostatistics: A Methodology for the Health Sciences, 2nd Edition}.
John Wiley & Sons, New York, p. 157.
Zar, J.H. (2010). \emph{Biostatistical Analysis}.
Fifth Edition. Prentice-Hall, Upper Saddle River, NJ,
p. 561.
}
\author{
Steven P. Millard (\email{EnvStats@ProbStatInfo.com})
}
\note{
Sometimes in environmental data analysis we are interested in determining whether
two probabilities or rates or proportions differ from each other. For example,
we may ask the question: \dQuote{Does exposure to pesticide X increase the risk of
developing cancer Y?}, where cancer Y may be liver cancer, stomach cancer, or some
other kind of cancer. One way environmental scientists attempt to answer this kind
of question is by conducting experiments on rodents in which one group (the
\dQuote{treatment} or \dQuote{exposed} group) is exposed to the pesticide and the
other group (the control group) is not. The incidence of cancer Y in the exposed
group is compared with the incidence of cancer Y in the control group. (See
Rodricks (2007) for a discussion of extrapolating results from experiments involving
rodents to consequences in humans and the associated difficulties).
Hypothesis tests you can use to compare proportions or probability of
\dQuote{success} between two groups include Fisher's exact test and the test
based on the normal approximation (see the \R help file for
\code{\link{prop.test}}).
}
\seealso{
\code{\link{permutationTest.object}}, \code{\link{plot.permutationTest}}, \cr
\code{\link{twoSamplePermutationTestLocation}},
\code{\link{oneSamplePermutationTest}},
\link{Hypothesis Tests}, \code{\link[boot]{boot}}.
}
\examples{
# Generate 10 observations from a binomial distribution with parameters
# size=1 and prob=0.3, and 20 observations from a binomial distribution
# with parameters size=1 and prob=0.5. Test the null hypothesis that the
# probability of "success" for the two distributions is the same against the
# alternative that the probability of "success" for group 1 is less than
# the probability of "success" for group 2.
# (Note: the call to set.seed allows you to reproduce this example).
set.seed(23)
dat1 <- rbinom(10, size = 1, prob = 0.3)
dat2 <- rbinom(20, size = 1, prob = 0.5)
test.list <- twoSamplePermutationTestProportion(
dat1, dat2, alternative = "less")
#----------
# Print the results of the test
#------------------------------
test.list
#Results of Hypothesis Test
#--------------------------
#
#Null Hypothesis: p.x - p.y = 0
#
#Alternative Hypothesis: True p.x - p.y is less than 0
#
#Test Name: Two-Sample Permutation Test
# Based on Differences in Proportions
# (Fisher's Exact Test)
#
#Estimated Parameter(s): p.hat.x = 0.60
# p.hat.y = 0.65
#
#Data: x = dat1
# y = dat2
#
#Sample Sizes: nx = 10
# ny = 20
#
#Test Statistic: p.hat.x - p.hat.y = -0.05
#
#P-value: 0.548026
#----------
# Plot the results of the test
#------------------------------
dev.new()
plot(test.list)
#----------
# Compare to the results of fisher.test
#--------------------------------------
x11 <- sum(dat1)
x21 <- length(dat1) - sum(dat1)
x12 <- sum(dat2)
x22 <- length(dat2) - sum(dat2)
mat <- matrix(c(x11, x12, x21, x22), ncol = 2)
fisher.test(mat, alternative = "less")
#Results of Hypothesis Test
#--------------------------
#
#Null Hypothesis: odds ratio = 1
#
#Alternative Hypothesis: True odds ratio is less than 1
#
#Test Name: Fisher's Exact Test for Count Data
#
#Estimated Parameter(s): odds ratio = 0.8135355
#
#Data: mat
#
#P-value: 0.548026
#
#95% Confidence Interval: LCL = 0.000000
# UCL = 4.076077
#==========
# Rodricks (1992, p. 133) presents data from an experiment by
# Graham et al. (1975) in which different groups of rats were exposed to
# various concentration levels of ethylene thiourea (ETU), a decomposition
# product of a certain class of fungicides that can be found in treated foods.
# In the group exposed to a dietary level of 250 ppm of ETU, 16 out of 69 rats
# (23%) developed thyroid tumors, whereas in the control group
# (no exposure to ETU) only 2 out of 72 (3%) rats developed thyroid tumors.
# If we use Fisher's exact test to test the null hypothesis that the proportion
# of rats exposed to 250 ppm of ETU who will develop thyroid tumors over their
# lifetime is no greater than the proportion of rats not exposed to ETU who will
# develop tumors, we get a one-sided upper p-value of 0.0002. Therefore, we
# conclude that the true underlying rate of tumor incidence in the exposed group
# is greater than in the control group.
#
# The data for this example are stored in Graham.et.al.75.etu.df.
# Look at the data
#-----------------
Graham.et.al.75.etu.df
# dose tumors n proportion
#1 0 2 72 0.02777778
#2 5 2 75 0.02666667
#3 25 1 73 0.01369863
#4 125 2 73 0.02739726
#5 250 16 69 0.23188406
#6 500 62 70 0.88571429
# Perform the test for a difference in tumor rates
#-------------------------------------------------
Num.Tumors <- with(Graham.et.al.75.etu.df, tumors[c(5, 1)])
Sample.Sizes <- with(Graham.et.al.75.etu.df, n[c(5, 1)])
test.list <- twoSamplePermutationTestProportion(
x = Num.Tumors, y = Sample.Sizes,
x.and.y="Number Successes and Trials", alternative = "greater")
#----------
# Print the results of the test
#------------------------------
test.list
#Results of Hypothesis Test
#--------------------------
#
#Null Hypothesis: p.x - p.y = 0
#
#Alternative Hypothesis: True p.x - p.y is greater than 0
#
#Test Name: Two-Sample Permutation Test
# Based on Differences in Proportions
# (Fisher's Exact Test)
#
#Estimated Parameter(s): p.hat.x = 0.23188406
# p.hat.y = 0.02777778
#
#Data: x = Num.Tumors
# n = Sample.Sizes
#
#Sample Sizes: nx = 69
# ny = 72
#
#Test Statistic: p.hat.x - p.hat.y = 0.2041063
#
#P-value: 0.0002186462
#----------
# Plot the results of the test
#------------------------------
dev.new()
plot(test.list)
#==========
# Clean up
#---------
rm(test.list, x11, x12, x21, x22, mat, Num.Tumors, Sample.Sizes)
#graphics.off()
}
\keyword{htest}
\keyword{models}
|
/man/twoSamplePermutationTestProportion.Rd
|
no_license
|
alexkowa/EnvStats
|
R
| false
| false
| 15,120
|
rd
|
\name{twoSamplePermutationTestProportion}
\alias{twoSamplePermutationTestProportion}
\title{
Randomization (Permutation) Test to Compare Two Proportions (Fisher's Exact Test)
}
\description{
Perform a two-sample randomization (permutation) test to compare two proportions.
This is also called Fisher's exact test.
\bold{Note:} You can perform Fisher's exact test in \R using the function
\code{\link{fisher.test}}.
}
\usage{
twoSamplePermutationTestProportion(x, y, x.and.y = "Binomial Outcomes",
alternative = "two.sided", tol = sqrt(.Machine$double.eps))
}
\arguments{
\item{x, y}{
When \code{x.and.y="Binomial Outcomes"} (the default), \code{x} and \code{y} are
vectors of observations from groups 1 and 2, respectively. The vectors \code{x}
and \code{y} must contain no more than 2 unique values (e.g., \code{0} and \code{1},
\code{FALSE} and \code{TRUE}, \code{"No"} and \code{"Yes"}, etc.). In this case,
the result of \code{sort(unique(x))[2]} is taken to be the value that indicates a
\dQuote{success} for \code{x} and the result of \code{sort(unique(y))[2]} is taken
to be the value that indicates a \dQuote{success} for \code{y}. For example, \cr
\code{x = c(FALSE, TRUE, FALSE, TRUE, TRUE)} indicates 3 successes in 5 trials, and
\code{y = c(1, 0, 0, 0)} indicates 1 success in 4 trials. When \cr
\code{x.and.y="Binomial Outcomes"}, missing (\code{NA}), undefined (\code{NaN}), and
infinite (\code{Inf}, \code{-Inf}) values are allowed but will be removed.
When \code{x.and.y="Number of Successes and Trials"}, \code{x} must be a vector of
length 2 containing the number of successes for groups 1 and 2, respectively, and
\code{y} must be a vector of length 2 that contains the number of trials for groups
1 and 2, respectively. For example, \code{x = c(3, 1)} and \code{y = c(5, 4)}
indicates 3 successes in 5 trials for group 1 and 1 success in 4 trials for
group 2.
}
\item{x.and.y}{
character string indicating the kind of data stored in the vectors \code{x} and
\code{y}. The possible values are \code{x.and.y="Binomial Outcomes"} (the default),
and \cr
\code{x.and.y="Number Successes and Trials"}.
}
\item{alternative}{
character string indicating the kind of alternative hypothesis. The possible values
are \code{"two.sided"} (the default), \code{"less"}, and \code{"greater"}.
}
\item{tol}{
numeric scalar indicating the tolerance to use for computing the p-value for the
two-sample permutation test. The default value is \cr
\code{tol=sqrt(.Machine$double.eps)}. See the DETAILS section below for more
information.
}
}
\details{
\emph{Randomization Tests} \cr
In 1935, R.A. Fisher introduced the idea of a \bold{\emph{randomization test}}
(Manly, 2007, p. 107; Efron and Tibshirani, 1993, Chapter 15), which is based on
trying to answer the question: \dQuote{Did the observed pattern happen by chance,
or does the pattern indicate the null hypothesis is not true?} A randomization
test works by simply enumerating all of the possible outcomes under the null
hypothesis, then seeing where the observed outcome fits in. A randomization test
is also called a \bold{\emph{permutation test}}, because it involves permuting the
observations during the enumeration procedure (Manly, 2007, p. 3).
In the past, randomization tests have not been used as extensively as they are now
because of the \dQuote{large} computing resources needed to enumerate all of the
possible outcomes, especially for large sample sizes. The advent of more powerful
personal computers and software has allowed randomization tests to become much
easier to perform. Depending on the sample size, however, it may still be too
time consuming to enumerate all possible outcomes. In this case, the randomization
test can still be performed by sampling from the randomization distribution, and
comparing the observed outcome to this sampled permutation distribution.
\cr
\emph{Two-Sample Randomization Test for Proportions} \cr
Let \eqn{\underline{x} = x_1, x_2, \ldots, x_{n_1}} be a vector of \eqn{n_1}
independent and identically distributed (i.i.d.) observations
from a \link[stats:Binomial]{binomial distribution} with parameter \code{size=1} and
probability of success \code{prob=}\eqn{p_1}, and let
\eqn{\underline{y} = y_1, y_2, \ldots, y_{n_2}} be a vector of \eqn{n_2}
i.i.d. observations from a \link[stats:Binomial]{binomial distribution} with
parameter \code{size=1} and probability of success \code{prob=}\eqn{p_2}.
Consider the test of the null hypothesis:
\deqn{H_0: p_1 = p_2 \;\;\;\;\;\; (1)}
The three possible alternative hypotheses are the upper one-sided alternative
(\code{alternative="greater"})
\deqn{H_a: p_1 > p_2 \;\;\;\;\;\; (2)}
the lower one-sided alternative (\code{alternative="less"})
\deqn{H_a: p_1 < p_2 \;\;\;\;\;\; (3)}
and the two-sided alternative
\deqn{H_a: p_1 \ne p_2 \;\;\;\;\;\; (4)}
To perform the test of the null hypothesis (1) versus any of the three
alternatives (2)-(4), you can use the two-sample permutation test, which is also
called \link[stats:fisher.test]{Fisher's exact test}. When the observations are
from a B(1, \eqn{p}) distribution, the sample mean is an estimate of \eqn{p}.
Fisher's exact test is simply a permutation test for the difference between two
means from two different groups (see \code{\link{twoSamplePermutationTestLocation}}),
where the underlying populations are binomial with size parameter \code{size=1},
but possibly different values of the \code{prob} parameter \eqn{p}.
Fisher's exact test is usually described in terms of testing hypotheses concerning
a 2 x 2 contingency table (van Bell et al., 2004, p. 157;
Hollander and Wolfe, 1999, p. 473; Sheskin, 2011; Zar, 2010, p. 561).
The probabilities associated with the permutation distribution can be computed by
using the \link[stats:Hypergeometric]{hypergeometric distribution}.
}
\value{
A list of class \code{"permutationTest"} containing the results of the hypothesis
test. See the help file for \code{\link{permutationTest.object}} for details.
}
\references{
Efron, B., and R.J. Tibshirani. (1993). \emph{An Introduction to the Bootstrap}.
Chapman and Hall, New York, Chapter 15.
Hollander, M., and D.A. Wolfe. (1999). \emph{Nonparametric Statistical Methods}.
Second Edition. John Wiley and Sons, New York, p.473.
Manly, B.F.J. (2007). \emph{Randomization, Bootstrap and Monte Carlo Methods in
Biology}. Third Edition. Chapman & Hall, New York, Chapter 6.
Millard, S.P., and N.K. Neerchal. (2001).
\emph{Environmental Statistics with S-PLUS}. CRC Press, Boca Raton, FL,
pp.441--446.
Graham, S.L., K.J. Davis, W.H. Hansen, and C.H. Graham. (1975).
Effects of Prolonged Ethylene Thiourea Ingestion on the Thyroid of the Rat.
\emph{Food and Cosmetics Toxicology}, \bold{13}(5), 493--499.
Rodricks, J.V. (1992). \emph{Calculated Risks: The Toxicity and Human Health
Risks of Chemicals in Our Environment}. Cambridge University
Press, New York.
Rodricks, J.V. (2007). \emph{Calculated Risks: The Toxicity and Human Health
Risks of Chemicals in Our Environment}. Second Edition. Cambridge University
Press, New York.
Sheskin, D.J. (2011). \emph{Handbook of Parametric and Nonparametric
Statistical Procedures} Fifth Edition. CRC Press, Boca Raton, FL.
van Belle, G., L.D. Fisher, Heagerty, P.J., and Lumley, T. (2004).
\emph{Biostatistics: A Methodology for the Health Sciences, 2nd Edition}.
John Wiley & Sons, New York, p. 157.
Zar, J.H. (2010). \emph{Biostatistical Analysis}.
Fifth Edition. Prentice-Hall, Upper Saddle River, NJ,
p. 561.
}
\author{
Steven P. Millard (\email{EnvStats@ProbStatInfo.com})
}
\note{
Sometimes in environmental data analysis we are interested in determining whether
two probabilities or rates or proportions differ from each other. For example,
we may ask the question: \dQuote{Does exposure to pesticide X increase the risk of
developing cancer Y?}, where cancer Y may be liver cancer, stomach cancer, or some
other kind of cancer. One way environmental scientists attempt to answer this kind
of question is by conducting experiments on rodents in which one group (the
\dQuote{treatment} or \dQuote{exposed} group) is exposed to the pesticide and the
other group (the control group) is not. The incidence of cancer Y in the exposed
group is compared with the incidence of cancer Y in the control group. (See
Rodricks (2007) for a discussion of extrapolating results from experiments involving
rodents to consequences in humans and the associated difficulties).
Hypothesis tests you can use to compare proportions or probability of
\dQuote{success} between two groups include Fisher's exact test and the test
based on the normal approximation (see the \R help file for
\code{\link{prop.test}}).
}
\seealso{
\code{\link{permutationTest.object}}, \code{\link{plot.permutationTest}}, \cr
\code{\link{twoSamplePermutationTestLocation}},
\code{\link{oneSamplePermutationTest}},
\link{Hypothesis Tests}, \code{\link[boot]{boot}}.
}
\examples{
# Generate 10 observations from a binomial distribution with parameters
# size=1 and prob=0.3, and 20 observations from a binomial distribution
# with parameters size=1 and prob=0.5. Test the null hypothesis that the
# probability of "success" for the two distributions is the same against the
# alternative that the probability of "success" for group 1 is less than
# the probability of "success" for group 2.
# (Note: the call to set.seed allows you to reproduce this example).
set.seed(23)
dat1 <- rbinom(10, size = 1, prob = 0.3)
dat2 <- rbinom(20, size = 1, prob = 0.5)
test.list <- twoSamplePermutationTestProportion(
dat1, dat2, alternative = "less")
#----------
# Print the results of the test
#------------------------------
test.list
#Results of Hypothesis Test
#--------------------------
#
#Null Hypothesis: p.x - p.y = 0
#
#Alternative Hypothesis: True p.x - p.y is less than 0
#
#Test Name: Two-Sample Permutation Test
# Based on Differences in Proportions
# (Fisher's Exact Test)
#
#Estimated Parameter(s): p.hat.x = 0.60
# p.hat.y = 0.65
#
#Data: x = dat1
# y = dat2
#
#Sample Sizes: nx = 10
# ny = 20
#
#Test Statistic: p.hat.x - p.hat.y = -0.05
#
#P-value: 0.548026
#----------
# Plot the results of the test
#------------------------------
dev.new()
plot(test.list)
#----------
# Compare to the results of fisher.test
#--------------------------------------
x11 <- sum(dat1)
x21 <- length(dat1) - sum(dat1)
x12 <- sum(dat2)
x22 <- length(dat2) - sum(dat2)
mat <- matrix(c(x11, x12, x21, x22), ncol = 2)
fisher.test(mat, alternative = "less")
#Results of Hypothesis Test
#--------------------------
#
#Null Hypothesis: odds ratio = 1
#
#Alternative Hypothesis: True odds ratio is less than 1
#
#Test Name: Fisher's Exact Test for Count Data
#
#Estimated Parameter(s): odds ratio = 0.8135355
#
#Data: mat
#
#P-value: 0.548026
#
#95% Confidence Interval: LCL = 0.000000
# UCL = 4.076077
#==========
# Rodricks (1992, p. 133) presents data from an experiment by
# Graham et al. (1975) in which different groups of rats were exposed to
# various concentration levels of ethylene thiourea (ETU), a decomposition
# product of a certain class of fungicides that can be found in treated foods.
# In the group exposed to a dietary level of 250 ppm of ETU, 16 out of 69 rats
# (23%) developed thyroid tumors, whereas in the control group
# (no exposure to ETU) only 2 out of 72 (3%) rats developed thyroid tumors.
# If we use Fisher's exact test to test the null hypothesis that the proportion
# of rats exposed to 250 ppm of ETU who will develop thyroid tumors over their
# lifetime is no greater than the proportion of rats not exposed to ETU who will
# develop tumors, we get a one-sided upper p-value of 0.0002. Therefore, we
# conclude that the true underlying rate of tumor incidence in the exposed group
# is greater than in the control group.
#
# The data for this example are stored in Graham.et.al.75.etu.df.
# Look at the data
#-----------------
Graham.et.al.75.etu.df
# dose tumors n proportion
#1 0 2 72 0.02777778
#2 5 2 75 0.02666667
#3 25 1 73 0.01369863
#4 125 2 73 0.02739726
#5 250 16 69 0.23188406
#6 500 62 70 0.88571429
# Perform the test for a difference in tumor rates
#-------------------------------------------------
Num.Tumors <- with(Graham.et.al.75.etu.df, tumors[c(5, 1)])
Sample.Sizes <- with(Graham.et.al.75.etu.df, n[c(5, 1)])
test.list <- twoSamplePermutationTestProportion(
x = Num.Tumors, y = Sample.Sizes,
x.and.y="Number Successes and Trials", alternative = "greater")
#----------
# Print the results of the test
#------------------------------
test.list
#Results of Hypothesis Test
#--------------------------
#
#Null Hypothesis: p.x - p.y = 0
#
#Alternative Hypothesis: True p.x - p.y is greater than 0
#
#Test Name: Two-Sample Permutation Test
# Based on Differences in Proportions
# (Fisher's Exact Test)
#
#Estimated Parameter(s): p.hat.x = 0.23188406
# p.hat.y = 0.02777778
#
#Data: x = Num.Tumors
# n = Sample.Sizes
#
#Sample Sizes: nx = 69
# ny = 72
#
#Test Statistic: p.hat.x - p.hat.y = 0.2041063
#
#P-value: 0.0002186462
#----------
# Plot the results of the test
#------------------------------
dev.new()
plot(test.list)
#==========
# Clean up
#---------
rm(test.list, x11, x12, x21, x22, mat, Num.Tumors, Sample.Sizes)
#graphics.off()
}
\keyword{htest}
\keyword{models}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GetSettledSpecialFixtures.R
\name{GetSettledSpecialFixtures}
\alias{GetSettledSpecialFixtures}
\title{Get Settled Special Fixtures}
\usage{
GetSettledSpecialFixtures(sportid, leagueids = NULL, since = NULL)
}
\arguments{
\item{sportid}{(optional) an integer giving the sport, if missing, a menu of options is presented}
\item{leagueids}{(optional) integer vector with league IDs.}
\item{since}{(optional) numeric This is used to receive incremental updates.
Use the value of last from previous fixtures response.}
}
\value{
a data.frame of settled special fixtures
}
\description{
Get Settled Special Fixtures
}
\examples{
\donttest{
SetCredentials("TESTAPI", "APITEST")
AcceptTermsAndConditions(accepted=TRUE)
# Can be run without arguments
GetSettledSpecialFixtures()}
}
|
/man/GetSettledSpecialFixtures.Rd
|
no_license
|
marcoblume/pinnacle.API
|
R
| false
| true
| 853
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GetSettledSpecialFixtures.R
\name{GetSettledSpecialFixtures}
\alias{GetSettledSpecialFixtures}
\title{Get Settled Special Fixtures}
\usage{
GetSettledSpecialFixtures(sportid, leagueids = NULL, since = NULL)
}
\arguments{
\item{sportid}{(optional) an integer giving the sport, if missing, a menu of options is presented}
\item{leagueids}{(optional) integer vector with league IDs.}
\item{since}{(optional) numeric This is used to receive incremental updates.
Use the value of last from previous fixtures response.}
}
\value{
a data.frame of settled special fixtures
}
\description{
Get Settled Special Fixtures
}
\examples{
\donttest{
SetCredentials("TESTAPI", "APITEST")
AcceptTermsAndConditions(accepted=TRUE)
# Can be run without arguments
GetSettledSpecialFixtures()}
}
|
#############################################################
#DADA2 Pipeline Tutorial (version 1.12)
#notes: - tutorial and data found at: https://benjjneb.github.io/dada2/tutorial.html
# - instructions to install DADA2 provided at: https://benjjneb.github.io/dada2/dada-installation.html
# - running DADA2 requires R version 3.6.0 and Bioconductor version 3.9
# - pipeline requires DADA2, phyloseq and Biostring packages to be installed (and all dependencies)
# - example fastq files and reference databases provided in MiSeq_SOP and tax folders
#Tutorial and data accessed on: 06/05/19
##############################################################
#Loading required packages
library(dada2); packageVersion("dada2") #version: 1.12.1
library(phyloseq); packageVersion("phyloseq") #version: 1.28.0
#Set working directory
setwd("dada2 tutorial") #change this to point to the folder where you have saved the seq data and tax databases
#Listing path to sequence files
path <- "MiSeq_SOP" #folder with the fastq files
list.files(path)
#Getting matched lists of the forward and reverse fastq files and extracting sample names
fnFs <- sort(list.files(path, pattern="_R1_001.fastq", full.names = TRUE))
fnRs <- sort(list.files(path, pattern="_R2_001.fastq", full.names = TRUE))
sample.names <- sapply(strsplit(basename(fnFs), "_"), `[`, 1) #extracting string prior to the first underscore
head(fnFs) #sanity check that the files are in order and the names are as expected
head(fnRs)
sample.names
#Inspect read quality profiles
plotQualityProfile(fnFs[1:2])
plotQualityProfile(fnRs[1:2])
#Filter and trim forward and reverse reads
filtFs <- file.path(path, "filtered", paste0(sample.names, "_F_filt.fastq.gz")) #Place filtered files in new filtered subdirectory
filtRs <- file.path(path, "filtered", paste0(sample.names, "_R_filt.fastq.gz"))
names(filtFs) <- sample.names #add names
names(filtRs) <- sample.names
head(filtFs) #sanity check
out <- filterAndTrim(fnFs, filtFs, fnRs, filtRs, truncLen=c(240,160),
maxN=0, maxEE=c(2,2), truncQ=2, rm.phix=TRUE,
compress=TRUE, multithread=TRUE) #On Windows set multithread=FALSE (may need to throughout)
head(out)
#Learn error rates
errF <- learnErrors(filtFs, multithread=TRUE)
errR <- learnErrors(filtRs, multithread=TRUE)
plotErrors(errF, nominalQ=TRUE)
errF$err_out
#Sample inference
dadaFs <- dada(filtFs, err=errF, multithread=TRUE)
dadaRs <- dada(filtRs, err=errR, multithread=TRUE)
dadaFs[[1]]
#Merge F and R reads
mergers <- mergePairs(dadaFs, filtFs, dadaRs, filtRs, verbose=TRUE)
head(mergers[[1]])
#Construct sequence table
seqtab <- makeSequenceTable(mergers)
dim(seqtab)
#Inspect distribution of sequence lengths
table(nchar(getSequences(seqtab)))
#Filter chimeric reads
seqtab.nochim <- removeBimeraDenovo(seqtab, method="consensus", multithread=TRUE, verbose=TRUE)
dim(seqtab.nochim)
sum(seqtab.nochim)/sum(seqtab) #ensuring retained majority of reads
#Tracking reads through the pipeline
getN <- function(x) sum(getUniques(x))
track <- cbind(out, sapply(dadaFs, getN), sapply(dadaRs, getN), sapply(mergers, getN), rowSums(seqtab.nochim))
colnames(track) <- c("input", "filtered", "denoisedF", "denoisedR", "merged", "nonchim")
rownames(track) <- sample.names
head(track)
#Assigning taxonomy
#For more information and examples see: https://benjjneb.github.io/dada2/assign.html
taxa <- assignTaxonomy(seqtab.nochim, "tax/silva_nr_v132_train_set.fa.gz", multithread=TRUE) #assign taxonomy using RDP classifier and Silva ref database
taxa <- addSpecies(taxa, "tax/silva_species_assignment_v132.fa.gz") #updating with genus/species binomials
taxa.print <- taxa #Removing sequence rownames for display only
rownames(taxa.print) <- NULL
head(taxa.print)
#Evaluate accuracy on mock sample
unqs.mock <- seqtab.nochim["Mock",]
unqs.mock <- sort(unqs.mock[unqs.mock>0], decreasing=TRUE) # Drop ASVs absent in the Mock
cat("DADA2 inferred", length(unqs.mock), "sample sequences present in the Mock community.\n")
mock.ref <- getSequences(file.path(path, "HMP_MOCK.v35.fasta"))
match.ref <- sum(sapply(names(unqs.mock), function(x) any(grepl(x, mock.ref))))
cat("Of those,", sum(match.ref), "were exact matches to the expected reference sequences.\n")
#Create phyloseq object
samples.out <- rownames(seqtab.nochim) #cleaning metadata for import to phyloseq
subject <- sapply(strsplit(samples.out, "D"), `[`, 1)
gender <- substr(subject,1,1)
subject <- substr(subject,2,999)
day <- as.integer(sapply(strsplit(samples.out, "D"), `[`, 2))
samdf <- data.frame(Subject=subject, Gender=gender, Day=day)
samdf$When <- "Early"
samdf$When[samdf$Day>100] <- "Late"
rownames(samdf) <- samples.out
samdf
(ps <- phyloseq(otu_table(seqtab.nochim, taxa_are_rows=FALSE),
sample_data(samdf),
tax_table(taxa)))
(ps <- prune_samples(sample_names(ps) != "Mock", ps)) # Remove mock sample
dna <- Biostrings::DNAStringSet(taxa_names(ps)) #convert ASV names to ASV1, ASV2, ASV...
names(dna) <- taxa_names(ps)
(ps <- merge_phyloseq(ps, dna))
taxa_names(ps) <- paste0("ASV", seq(ntaxa(ps)))
ps
saveRDS(ps, "ps.rds")
|
/DADA2 pipeline tutorial script.R
|
no_license
|
3mila/Introduction-to-Metagenomics-Summer-Workshop-2019
|
R
| false
| false
| 5,414
|
r
|
#############################################################
#DADA2 Pipeline Tutorial (version 1.12)
#notes: - tutorial and data found at: https://benjjneb.github.io/dada2/tutorial.html
# - instructions to install DADA2 provided at: https://benjjneb.github.io/dada2/dada-installation.html
# - running DADA2 requires R version 3.6.0 and Bioconductor version 3.9
# - pipeline requires DADA2, phyloseq and Biostring packages to be installed (and all dependencies)
# - example fastq files and reference databases provided in MiSeq_SOP and tax folders
#Tutorial and data accessed on: 06/05/19
##############################################################
#Loading required packages
library(dada2); packageVersion("dada2") #version: 1.12.1
library(phyloseq); packageVersion("phyloseq") #version: 1.28.0
#Set working directory
setwd("dada2 tutorial") #change this to point to the folder where you have saved the seq data and tax databases
#Listing path to sequence files
path <- "MiSeq_SOP" #folder with the fastq files
list.files(path)
#Getting matched lists of the forward and reverse fastq files and extracting sample names
fnFs <- sort(list.files(path, pattern="_R1_001.fastq", full.names = TRUE))
fnRs <- sort(list.files(path, pattern="_R2_001.fastq", full.names = TRUE))
sample.names <- sapply(strsplit(basename(fnFs), "_"), `[`, 1) #extracting string prior to the first underscore
head(fnFs) #sanity check that the files are in order and the names are as expected
head(fnRs)
sample.names
#Inspect read quality profiles
plotQualityProfile(fnFs[1:2])
plotQualityProfile(fnRs[1:2])
#Filter and trim forward and reverse reads
filtFs <- file.path(path, "filtered", paste0(sample.names, "_F_filt.fastq.gz")) #Place filtered files in new filtered subdirectory
filtRs <- file.path(path, "filtered", paste0(sample.names, "_R_filt.fastq.gz"))
names(filtFs) <- sample.names #add names
names(filtRs) <- sample.names
head(filtFs) #sanity check
out <- filterAndTrim(fnFs, filtFs, fnRs, filtRs, truncLen=c(240,160),
maxN=0, maxEE=c(2,2), truncQ=2, rm.phix=TRUE,
compress=TRUE, multithread=TRUE) #On Windows set multithread=FALSE (may need to throughout)
head(out)
#Learn error rates
errF <- learnErrors(filtFs, multithread=TRUE)
errR <- learnErrors(filtRs, multithread=TRUE)
plotErrors(errF, nominalQ=TRUE)
errF$err_out
#Sample inference
dadaFs <- dada(filtFs, err=errF, multithread=TRUE)
dadaRs <- dada(filtRs, err=errR, multithread=TRUE)
dadaFs[[1]]
#Merge F and R reads
mergers <- mergePairs(dadaFs, filtFs, dadaRs, filtRs, verbose=TRUE)
head(mergers[[1]])
#Construct sequence table
seqtab <- makeSequenceTable(mergers)
dim(seqtab)
#Inspect distribution of sequence lengths
table(nchar(getSequences(seqtab)))
#Filter chimeric reads
seqtab.nochim <- removeBimeraDenovo(seqtab, method="consensus", multithread=TRUE, verbose=TRUE)
dim(seqtab.nochim)
sum(seqtab.nochim)/sum(seqtab) #ensuring retained majority of reads
#Tracking reads through the pipeline
getN <- function(x) sum(getUniques(x))
track <- cbind(out, sapply(dadaFs, getN), sapply(dadaRs, getN), sapply(mergers, getN), rowSums(seqtab.nochim))
colnames(track) <- c("input", "filtered", "denoisedF", "denoisedR", "merged", "nonchim")
rownames(track) <- sample.names
head(track)
#Assigning taxonomy
#For more information and examples see: https://benjjneb.github.io/dada2/assign.html
taxa <- assignTaxonomy(seqtab.nochim, "tax/silva_nr_v132_train_set.fa.gz", multithread=TRUE) #assign taxonomy using RDP classifier and Silva ref database
taxa <- addSpecies(taxa, "tax/silva_species_assignment_v132.fa.gz") #updating with genus/species binomials
taxa.print <- taxa #Removing sequence rownames for display only
rownames(taxa.print) <- NULL
head(taxa.print)
#Evaluate accuracy on mock sample
unqs.mock <- seqtab.nochim["Mock",]
unqs.mock <- sort(unqs.mock[unqs.mock>0], decreasing=TRUE) # Drop ASVs absent in the Mock
cat("DADA2 inferred", length(unqs.mock), "sample sequences present in the Mock community.\n")
mock.ref <- getSequences(file.path(path, "HMP_MOCK.v35.fasta"))
match.ref <- sum(sapply(names(unqs.mock), function(x) any(grepl(x, mock.ref))))
cat("Of those,", sum(match.ref), "were exact matches to the expected reference sequences.\n")
#Create phyloseq object
samples.out <- rownames(seqtab.nochim) #cleaning metadata for import to phyloseq
subject <- sapply(strsplit(samples.out, "D"), `[`, 1)
gender <- substr(subject,1,1)
subject <- substr(subject,2,999)
day <- as.integer(sapply(strsplit(samples.out, "D"), `[`, 2))
samdf <- data.frame(Subject=subject, Gender=gender, Day=day)
samdf$When <- "Early"
samdf$When[samdf$Day>100] <- "Late"
rownames(samdf) <- samples.out
samdf
(ps <- phyloseq(otu_table(seqtab.nochim, taxa_are_rows=FALSE),
sample_data(samdf),
tax_table(taxa)))
(ps <- prune_samples(sample_names(ps) != "Mock", ps)) # Remove mock sample
dna <- Biostrings::DNAStringSet(taxa_names(ps)) #convert ASV names to ASV1, ASV2, ASV...
names(dna) <- taxa_names(ps)
(ps <- merge_phyloseq(ps, dna))
taxa_names(ps) <- paste0("ASV", seq(ntaxa(ps)))
ps
saveRDS(ps, "ps.rds")
|
# ---- nm_fun ----
nm_ <- nm_fun("TEST-drive_ls", user_run = FALSE)
# ---- clean ----
if (CLEAN) {
drive_trash(c(
nm_("list-me"),
nm_("list-a-folder-shortcut"),
nm_("this-should-not-exist"),
nm_("topdir")
))
}
# ---- setup ----
if (SETUP) {
drive_mkdir(nm_("list-me"))
drive_upload(
system.file("DESCRIPTION"),
path = file.path(nm_("list-me"), nm_("DESCRIPTION"))
)
drive_upload(
file.path(R.home("doc"), "html", "about.html"),
path = file.path(nm_("list-me"), nm_("about-html"))
)
shortcut_create(nm_("list-me"), name = nm_("list-a-folder-shortcut"))
## for testing `recursive = TRUE`
top <- drive_mkdir(nm_("topdir"))
drive_upload(
system.file("DESCRIPTION"),
path = top,
name = nm_("apple"),
type = "document",
starred = TRUE
)
folder1_level1 <- drive_mkdir(nm_("folder1-level1"), path = top)
drive_mkdir(nm_("folder2-level1"), path = top)
x <- drive_upload(
system.file("DESCRIPTION"),
path = folder1_level1,
name = nm_("banana"),
type = "document"
)
folder1_level2 <- drive_mkdir(nm_("folder1-level2"), path = folder1_level1)
x <- drive_upload(
system.file("DESCRIPTION"),
path = folder1_level2,
name = nm_("cranberry"),
type = "document",
starred = TRUE
)
}
# ---- tests ----
test_that("drive_ls() errors if `path` does not exist", {
skip_if_no_token()
skip_if_offline()
expect_snapshot(drive_ls(nm_("this-should-not-exist")), error = TRUE)
})
test_that("drive_ls() outputs contents of folder", {
skip_if_no_token()
skip_if_offline()
## path
out <- drive_ls(nm_("list-me"))
expect_dribble(out)
expect_true(setequal(out$name, c(nm_("about-html"), nm_("DESCRIPTION"))))
## dribble
d <- drive_get(nm_("list-me"))
out2 <- drive_ls(d)
expect_identical(out[c("name", "id")], out2[c("name", "id")])
## id
out3 <- drive_ls(d$id)
expect_identical(out[c("name", "id")], out3[c("name", "id")])
})
test_that("drive_ls() list contents of the target of a folder shortcut", {
skip_if_no_token()
skip_if_offline()
target_name <- nm_("list-me")
shortcut_name <- nm_("list-a-folder-shortcut")
direct_ls <- drive_ls(target_name)
local_drive_loud_and_wide()
drive_ls_message <- capture.output(
indirect_ls <- drive_ls(shortcut_name),
type = "message"
)
drive_ls_message <- drive_ls_message %>%
scrub_filepath(target_name) %>%
scrub_filepath(shortcut_name) %>%
scrub_file_id()
expect_snapshot(
write_utf8(drive_ls_message)
)
expect_equal(direct_ls$id, indirect_ls$id)
})
test_that("drive_ls() passes ... through to drive_find()", {
skip_if_no_token()
skip_if_offline()
d <- drive_get(nm_("list-me"))
## does user-specified q get appended to vs clobbered?
## if so, only about-html is listed here
about <- drive_get(nm_("about-html"))
out <- drive_ls(d, q = "fullText contains 'portable'", orderBy = NULL)
expect_identical(
about[c("name", "id")],
out[c("name", "id")]
)
## does a non-q query parameter get passed through?
## if so, files are listed in reverse alphabetical order here
out <- drive_ls(d, orderBy = "name desc")
expect_identical(
out$name,
c(nm_("DESCRIPTION"), nm_("about-html"))
)
})
test_that("`recursive` does its job", {
skip_if_no_token()
skip_if_offline()
out <- drive_ls(nm_("topdir"), recursive = FALSE)
expect_true(
all(
c(nm_("apple"), nm_("folder1-level1"), nm_("folder2-level1"))
%in% out$name
)
)
out <- drive_ls(nm_("topdir"), recursive = TRUE)
expect_true(
all(
c(
nm_("apple"), nm_("folder1-level1"), nm_("folder2-level1"),
nm_("banana"), nm_("folder1-level2"), nm_("cranberry")
) %in% out$name
)
)
out <- drive_ls(nm_("topdir"), q = "starred = true", recursive = TRUE)
expect_true(all(c(nm_("apple"), nm_("cranberry")) %in% out$name))
})
|
/tests/testthat/test-drive_ls.R
|
permissive
|
tidyverse/googledrive
|
R
| false
| false
| 3,903
|
r
|
# ---- nm_fun ----
nm_ <- nm_fun("TEST-drive_ls", user_run = FALSE)
# ---- clean ----
if (CLEAN) {
drive_trash(c(
nm_("list-me"),
nm_("list-a-folder-shortcut"),
nm_("this-should-not-exist"),
nm_("topdir")
))
}
# ---- setup ----
if (SETUP) {
drive_mkdir(nm_("list-me"))
drive_upload(
system.file("DESCRIPTION"),
path = file.path(nm_("list-me"), nm_("DESCRIPTION"))
)
drive_upload(
file.path(R.home("doc"), "html", "about.html"),
path = file.path(nm_("list-me"), nm_("about-html"))
)
shortcut_create(nm_("list-me"), name = nm_("list-a-folder-shortcut"))
## for testing `recursive = TRUE`
top <- drive_mkdir(nm_("topdir"))
drive_upload(
system.file("DESCRIPTION"),
path = top,
name = nm_("apple"),
type = "document",
starred = TRUE
)
folder1_level1 <- drive_mkdir(nm_("folder1-level1"), path = top)
drive_mkdir(nm_("folder2-level1"), path = top)
x <- drive_upload(
system.file("DESCRIPTION"),
path = folder1_level1,
name = nm_("banana"),
type = "document"
)
folder1_level2 <- drive_mkdir(nm_("folder1-level2"), path = folder1_level1)
x <- drive_upload(
system.file("DESCRIPTION"),
path = folder1_level2,
name = nm_("cranberry"),
type = "document",
starred = TRUE
)
}
# ---- tests ----
test_that("drive_ls() errors if `path` does not exist", {
skip_if_no_token()
skip_if_offline()
expect_snapshot(drive_ls(nm_("this-should-not-exist")), error = TRUE)
})
test_that("drive_ls() outputs contents of folder", {
skip_if_no_token()
skip_if_offline()
## path
out <- drive_ls(nm_("list-me"))
expect_dribble(out)
expect_true(setequal(out$name, c(nm_("about-html"), nm_("DESCRIPTION"))))
## dribble
d <- drive_get(nm_("list-me"))
out2 <- drive_ls(d)
expect_identical(out[c("name", "id")], out2[c("name", "id")])
## id
out3 <- drive_ls(d$id)
expect_identical(out[c("name", "id")], out3[c("name", "id")])
})
test_that("drive_ls() list contents of the target of a folder shortcut", {
skip_if_no_token()
skip_if_offline()
target_name <- nm_("list-me")
shortcut_name <- nm_("list-a-folder-shortcut")
direct_ls <- drive_ls(target_name)
local_drive_loud_and_wide()
drive_ls_message <- capture.output(
indirect_ls <- drive_ls(shortcut_name),
type = "message"
)
drive_ls_message <- drive_ls_message %>%
scrub_filepath(target_name) %>%
scrub_filepath(shortcut_name) %>%
scrub_file_id()
expect_snapshot(
write_utf8(drive_ls_message)
)
expect_equal(direct_ls$id, indirect_ls$id)
})
test_that("drive_ls() passes ... through to drive_find()", {
skip_if_no_token()
skip_if_offline()
d <- drive_get(nm_("list-me"))
## does user-specified q get appended to vs clobbered?
## if so, only about-html is listed here
about <- drive_get(nm_("about-html"))
out <- drive_ls(d, q = "fullText contains 'portable'", orderBy = NULL)
expect_identical(
about[c("name", "id")],
out[c("name", "id")]
)
## does a non-q query parameter get passed through?
## if so, files are listed in reverse alphabetical order here
out <- drive_ls(d, orderBy = "name desc")
expect_identical(
out$name,
c(nm_("DESCRIPTION"), nm_("about-html"))
)
})
test_that("`recursive` does its job", {
skip_if_no_token()
skip_if_offline()
out <- drive_ls(nm_("topdir"), recursive = FALSE)
expect_true(
all(
c(nm_("apple"), nm_("folder1-level1"), nm_("folder2-level1"))
%in% out$name
)
)
out <- drive_ls(nm_("topdir"), recursive = TRUE)
expect_true(
all(
c(
nm_("apple"), nm_("folder1-level1"), nm_("folder2-level1"),
nm_("banana"), nm_("folder1-level2"), nm_("cranberry")
) %in% out$name
)
)
out <- drive_ls(nm_("topdir"), q = "starred = true", recursive = TRUE)
expect_true(all(c(nm_("apple"), nm_("cranberry")) %in% out$name))
})
|
#read in data files
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# find row indices for sources (SCC) associated with motor vehicles in SCC data frame
# using Short.Name column, searching for term "veh" as proxy for motor vehicle
mv <- grep("veh",tolower(SCC$Short.Name))
# subset of NEI data frame with motor vehicles in Baltimore
NEI_mv_balt <- merge(subset(NEI,fips == "24510"), SCC[mv,], by = "SCC")
# plot
png("plot5.png")
ggplot(NEI_mv_balt, aes(year,Emissions)) +
stat_summary(fun=sum,geom="line", color="orange") +
stat_summary(fun=sum,geom="point",color="orange") +
theme_classic() +
labs(y= "tons", title = "Baltimore City, MD PM2.5 Motor Vehicle Emissions by Year") +
scale_x_continuous(breaks = seq(1999,2008,3)) +
ylim(0,400)
# turn off graphical device
dev.off()
|
/Course4_ExploratoryDataAnalysis/Project2/plot5.R
|
no_license
|
younger-once/JHU_DataScience_Specialization
|
R
| false
| false
| 840
|
r
|
#read in data files
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# find row indices for sources (SCC) associated with motor vehicles in SCC data frame
# using Short.Name column, searching for term "veh" as proxy for motor vehicle
mv <- grep("veh",tolower(SCC$Short.Name))
# subset of NEI data frame with motor vehicles in Baltimore
NEI_mv_balt <- merge(subset(NEI,fips == "24510"), SCC[mv,], by = "SCC")
# plot
png("plot5.png")
ggplot(NEI_mv_balt, aes(year,Emissions)) +
stat_summary(fun=sum,geom="line", color="orange") +
stat_summary(fun=sum,geom="point",color="orange") +
theme_classic() +
labs(y= "tons", title = "Baltimore City, MD PM2.5 Motor Vehicle Emissions by Year") +
scale_x_continuous(breaks = seq(1999,2008,3)) +
ylim(0,400)
# turn off graphical device
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/psiFuns.R
\name{opt}
\alias{opt}
\title{Tuning parameter for a rho function in the (asymptotic bias-) optimal family}
\usage{
opt(e)
}
\arguments{
\item{e}{the desired efficiency of the corresponding regression
estimator for Gaussian errors}
}
\value{
A vector with named elements containing the corresponding tuning
parameters.
}
\description{
This function computes the tuning constant that yields an MM-regression
estimator with a desired asymptotic efficiency when computed with a
rho function in the corresponding family. The output of this
function can be passed to the functions \link{lmrobdet.control},
\link{mscale} and \link{rho}.
}
\examples{
# Tuning parameters for an 85\%-efficient M-estimator at a Gaussian model
opt(.85)
}
\author{
Kjell Konis
}
|
/man/opt.Rd
|
no_license
|
msalibian/RobStatTM
|
R
| false
| true
| 841
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/psiFuns.R
\name{opt}
\alias{opt}
\title{Tuning parameter for a rho function in the (asymptotic bias-) optimal family}
\usage{
opt(e)
}
\arguments{
\item{e}{the desired efficiency of the corresponding regression
estimator for Gaussian errors}
}
\value{
A vector with named elements containing the corresponding tuning
parameters.
}
\description{
This function computes the tuning constant that yields an MM-regression
estimator with a desired asymptotic efficiency when computed with a
rho function in the corresponding family. The output of this
function can be passed to the functions \link{lmrobdet.control},
\link{mscale} and \link{rho}.
}
\examples{
# Tuning parameters for an 85\%-efficient M-estimator at a Gaussian model
opt(.85)
}
\author{
Kjell Konis
}
|
\name{rhDNase}
\alias{rhDNase}
\docType{data}
\title{rhDNASE data set}
\description{
Results of a randomized trial of rhDNase for the treatment of cystic
fibrosis.
}
\format{
A data frame with 767 observations on the following 8 variables.
\describe{
\item{\code{id}}{subeject id}
\item{\code{inst}}{enrolling institution}
\item{\code{trt}}{treatment arm: 0=placebo, 1= rhDNase}
\item{\code{entry.dt}}{date of entry into the study}
\item{\code{end.dt}}{date of last follow-up}
\item{\code{fev}}{forced expriatory volume at enrollment, a measure
of lung capacity}
\item{\code{ivstart}}{days from enrollment to the start of IV antibiotics}
\item{\code{ivstop}}{days from enrollment to the cessation of
IV antibiotics}
}
}
\details{
In patients with cystic fibrosis, extracellular DNA is released by
leukocytes that accumulate in the airways in response to chronic bacterial
infection.
This excess DNA thickens the mucus, which then cannot be cleared from the
lung by the cilia. The accumulation leads to exacerbations of
respiratory symptoms and progressive deterioration of lung function.
At the time of this study
more than 90\% of cystic fibrosis patients eventually died of lung
disease.
Deoxyribonuclease I (DNase I) is a
human enzyme normally present in the mucus of human lungs that digests
extracellular DNA.
Genentech, Inc. cloned a highly purified recombinant DNase I (rhDNase or
Pulmozyme) which when delivered to the lungs in an aerosolized form cuts
extracellular DNA, reducing the viscoelasticity of airway
secretions and improving clearance.
In 1992 the company
conducted a randomized double-blind trial comparing rhDNase to placebo.
Patients were then monitored for
pulmonary exacerbations, along with measures of lung volume and flow.
The primary endpoint was the time until
first pulmonary exacerbation; however, data on all exacerbations were
collected for 169 days.
The definition of an exacerbation was an infection that required the use
of intravenous (IV) antibiotics. Subjects had 0--5 such episodes during
the trial, those with more than one have multiple rows in the data
set, those with none have NA for the IV start and end times.
A few subjects were infected at the time of enrollment, subject 173 for
instance has a first infection interval of -21 to 7. We do not count this
first infection as an "event", and the subject first enters the risk set
at day 7.
Subjects who have an event are not considered to be at risk for another
event during the course of antibiotics, nor for an additional 6 days
after they end. (If the symptoms reappear immediately after cessation
then from a medical standpoint this would not be a new infection.)
This data set reproduces the data in Therneau and Grambsh, is does not
exactly reproduce those in Therneau and Hamilton due to data set updates.
}
\references{
T. M. Therneau and P. M. Grambsch, Modeling Survival Data: Extending
the Cox Model, Springer, 2000.
T. M. Therneau and S.A. Mamilton,
rhDNase as an example of recurrent event analysis, Statistics
in Medicine, 16:2029-2047, 1997.
}
\examples{
# Build the start-stop data set for analysis, and
# replicate line 2 of table 8.13
first <- subset(rhDNase, !duplicated(id)) #first row for each subject
dnase <- tmerge(first, first, id=id, tstop=as.numeric(end.dt -entry.dt))
# Subjects whose fu ended during the 6 day window are the reason for
# this next line
temp.end <- with(rhDNase, pmin(ivstop+6, end.dt-entry.dt))
dnase <- tmerge(dnase, rhDNase, id=id,
infect=event(ivstart),
end= event(temp.end))
# toss out the non-at-risk intervals, and extra variables
# 3 subjects had an event on their last day of fu, infect=1 and end=1
dnase <- subset(dnase, (infect==1 | end==0), c(id:trt, fev:infect))
agfit <- coxph(Surv(tstart, tstop, infect) ~ trt + fev + cluster(id),
data=dnase)
}
\keyword{datasets}
|
/man/rhDNase.Rd
|
no_license
|
Infie/survival
|
R
| false
| false
| 3,954
|
rd
|
\name{rhDNase}
\alias{rhDNase}
\docType{data}
\title{rhDNASE data set}
\description{
Results of a randomized trial of rhDNase for the treatment of cystic
fibrosis.
}
\format{
A data frame with 767 observations on the following 8 variables.
\describe{
\item{\code{id}}{subeject id}
\item{\code{inst}}{enrolling institution}
\item{\code{trt}}{treatment arm: 0=placebo, 1= rhDNase}
\item{\code{entry.dt}}{date of entry into the study}
\item{\code{end.dt}}{date of last follow-up}
\item{\code{fev}}{forced expriatory volume at enrollment, a measure
of lung capacity}
\item{\code{ivstart}}{days from enrollment to the start of IV antibiotics}
\item{\code{ivstop}}{days from enrollment to the cessation of
IV antibiotics}
}
}
\details{
In patients with cystic fibrosis, extracellular DNA is released by
leukocytes that accumulate in the airways in response to chronic bacterial
infection.
This excess DNA thickens the mucus, which then cannot be cleared from the
lung by the cilia. The accumulation leads to exacerbations of
respiratory symptoms and progressive deterioration of lung function.
At the time of this study
more than 90\% of cystic fibrosis patients eventually died of lung
disease.
Deoxyribonuclease I (DNase I) is a
human enzyme normally present in the mucus of human lungs that digests
extracellular DNA.
Genentech, Inc. cloned a highly purified recombinant DNase I (rhDNase or
Pulmozyme) which when delivered to the lungs in an aerosolized form cuts
extracellular DNA, reducing the viscoelasticity of airway
secretions and improving clearance.
In 1992 the company
conducted a randomized double-blind trial comparing rhDNase to placebo.
Patients were then monitored for
pulmonary exacerbations, along with measures of lung volume and flow.
The primary endpoint was the time until
first pulmonary exacerbation; however, data on all exacerbations were
collected for 169 days.
The definition of an exacerbation was an infection that required the use
of intravenous (IV) antibiotics. Subjects had 0--5 such episodes during
the trial, those with more than one have multiple rows in the data
set, those with none have NA for the IV start and end times.
A few subjects were infected at the time of enrollment, subject 173 for
instance has a first infection interval of -21 to 7. We do not count this
first infection as an "event", and the subject first enters the risk set
at day 7.
Subjects who have an event are not considered to be at risk for another
event during the course of antibiotics, nor for an additional 6 days
after they end. (If the symptoms reappear immediately after cessation
then from a medical standpoint this would not be a new infection.)
This data set reproduces the data in Therneau and Grambsh, is does not
exactly reproduce those in Therneau and Hamilton due to data set updates.
}
\references{
T. M. Therneau and P. M. Grambsch, Modeling Survival Data: Extending
the Cox Model, Springer, 2000.
T. M. Therneau and S.A. Mamilton,
rhDNase as an example of recurrent event analysis, Statistics
in Medicine, 16:2029-2047, 1997.
}
\examples{
# Build the start-stop data set for analysis, and
# replicate line 2 of table 8.13
first <- subset(rhDNase, !duplicated(id)) #first row for each subject
dnase <- tmerge(first, first, id=id, tstop=as.numeric(end.dt -entry.dt))
# Subjects whose fu ended during the 6 day window are the reason for
# this next line
temp.end <- with(rhDNase, pmin(ivstop+6, end.dt-entry.dt))
dnase <- tmerge(dnase, rhDNase, id=id,
infect=event(ivstart),
end= event(temp.end))
# toss out the non-at-risk intervals, and extra variables
# 3 subjects had an event on their last day of fu, infect=1 and end=1
dnase <- subset(dnase, (infect==1 | end==0), c(id:trt, fev:infect))
agfit <- coxph(Surv(tstart, tstop, infect) ~ trt + fev + cluster(id),
data=dnase)
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ml_objective_epistasis.R
\name{ml_objective_epistasis}
\alias{ml_objective_epistasis}
\title{ml_objective for epistasis calculation}
\usage{
ml_objective_epistasis(gamma, MAF_input1, MAF_input2, all_tumors, gene1,
gene2, variant1, variant2, specific_mut_rates1, specific_mut_rates2)
}
\arguments{
\item{gamma}{A selection intensity at which to calculate the likelihood}
\item{all_tumors}{A list of all the tumors we are calculating the likelihood across}
\item{MAF_input}{A data frame that includes columns "Unique_patient_identifier", "Gene_name", and "unique_variant_ID_AA"}
\item{gene}{The gene we want to look at}
\item{variant}{The variant we want to look at}
\item{specific_mut_rates}{A matrix of site and tumor specific mutation rates where the rows correspond to tumors and the columns to variants (produced by mutation_rate_calc)}
}
\value{
A log likelihood value
}
\description{
Objective function that we will be optimizing in order to find the site specific selection intensity that maximizes the likelihood of each tumor having a mutation or not, where the mutation rates are site and tumor specific.
}
|
/man/ml_objective_epistasis.Rd
|
no_license
|
BacemDataScience/cancereffectsizeR
|
R
| false
| true
| 1,201
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ml_objective_epistasis.R
\name{ml_objective_epistasis}
\alias{ml_objective_epistasis}
\title{ml_objective for epistasis calculation}
\usage{
ml_objective_epistasis(gamma, MAF_input1, MAF_input2, all_tumors, gene1,
gene2, variant1, variant2, specific_mut_rates1, specific_mut_rates2)
}
\arguments{
\item{gamma}{A selection intensity at which to calculate the likelihood}
\item{all_tumors}{A list of all the tumors we are calculating the likelihood across}
\item{MAF_input}{A data frame that includes columns "Unique_patient_identifier", "Gene_name", and "unique_variant_ID_AA"}
\item{gene}{The gene we want to look at}
\item{variant}{The variant we want to look at}
\item{specific_mut_rates}{A matrix of site and tumor specific mutation rates where the rows correspond to tumors and the columns to variants (produced by mutation_rate_calc)}
}
\value{
A log likelihood value
}
\description{
Objective function that we will be optimizing in order to find the site specific selection intensity that maximizes the likelihood of each tumor having a mutation or not, where the mutation rates are site and tumor specific.
}
|
#' Create a vector of pi Dirichlet priors with specified values for one or more collections
#'
#' This handles a case in which the user provides a data frame for \code{pi_prior}. The
#' data frame lists desired Dirichlet parameter priors for at least one reference collection,
#' and/or a default value for all unspecified collections.
#'
#' Input checking is currently done in the early stages of \code{infer_mixture} in order to
#' throw errors before long processing times, and avoid re-checking during \code{bootstrap_rho}.
#'
#' @param P A data frame of one or more desired pi prior parameters. One column, "collection",
#' is a character vector, with valid values including the names of any reference collections,
#' or the special value "DEFAULT_PI". The second column, "pi_param" is the prior value to be
#' used for each collection.
#' @param C a tibble with a column "collection" collection names
#' @keywords internal
#' @export
custom_pi_prior <- function(P, C) {
if(!("DEFAULT_PI") %in% P$collection) {
default <- 1/nrow(C)
} else default <- P$pi_param[P$collection == "DEFAULT_PI"]
ret <- dplyr::left_join(C, P) %>%
tidyr::replace_na(list(pi_param = default))
ret$pi_param
}
|
/R/custom_pi_prior.R
|
no_license
|
eriqande/rubias
|
R
| false
| false
| 1,207
|
r
|
#' Create a vector of pi Dirichlet priors with specified values for one or more collections
#'
#' This handles a case in which the user provides a data frame for \code{pi_prior}. The
#' data frame lists desired Dirichlet parameter priors for at least one reference collection,
#' and/or a default value for all unspecified collections.
#'
#' Input checking is currently done in the early stages of \code{infer_mixture} in order to
#' throw errors before long processing times, and avoid re-checking during \code{bootstrap_rho}.
#'
#' @param P A data frame of one or more desired pi prior parameters. One column, "collection",
#' is a character vector, with valid values including the names of any reference collections,
#' or the special value "DEFAULT_PI". The second column, "pi_param" is the prior value to be
#' used for each collection.
#' @param C a tibble with a column "collection" collection names
#' @keywords internal
#' @export
custom_pi_prior <- function(P, C) {
if(!("DEFAULT_PI") %in% P$collection) {
default <- 1/nrow(C)
} else default <- P$pi_param[P$collection == "DEFAULT_PI"]
ret <- dplyr::left_join(C, P) %>%
tidyr::replace_na(list(pi_param = default))
ret$pi_param
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{data_states}
\alias{data_states}
\title{FWS data on 7yr ESA implementation plan}
\format{A data frame with 1120 rows and 12 variables
\describe{
\item{\code{Package.Name}}{Species grouping}
\item{\code{Species.name}}{Name of species, either common or scientific}
\item{\code{Action.Type}}{Type of action}
\item{\code{Lead.RO}}{Primary FWS regional office responsible for species assessment}
\item{\code{Priority.Bin.Ranking}}{}
\item{\code{Timeframe}}{Fiscal Year for status review}
\item{\code{Current.Candidate}}{Current candidate for listing?}
\item{\code{Range}}{List of states and territories in which species is found}
\item{\code{LPN}}{Listing Priority Number; 1 - 12}
\item{\code{Priority}}{Status review priority bin; 1-5}
\item{\code{State}}{Individual states in which species is found}
\item{\code{OriginRow}}{Row number of species record in \code{data}}
}}
\source{
\url{https://www.fws.gov/endangered/esa-library}
}
\usage{
data_states
}
\description{
Data identical to \code{data_clean}, with unnested lists of states for each species.
Each record represents a single species by state combination.
}
\keyword{datasets}
|
/man/data_states.Rd
|
no_license
|
mjevans26/esapriorities
|
R
| false
| true
| 1,241
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{data_states}
\alias{data_states}
\title{FWS data on 7yr ESA implementation plan}
\format{A data frame with 1120 rows and 12 variables
\describe{
\item{\code{Package.Name}}{Species grouping}
\item{\code{Species.name}}{Name of species, either common or scientific}
\item{\code{Action.Type}}{Type of action}
\item{\code{Lead.RO}}{Primary FWS regional office responsible for species assessment}
\item{\code{Priority.Bin.Ranking}}{}
\item{\code{Timeframe}}{Fiscal Year for status review}
\item{\code{Current.Candidate}}{Current candidate for listing?}
\item{\code{Range}}{List of states and territories in which species is found}
\item{\code{LPN}}{Listing Priority Number; 1 - 12}
\item{\code{Priority}}{Status review priority bin; 1-5}
\item{\code{State}}{Individual states in which species is found}
\item{\code{OriginRow}}{Row number of species record in \code{data}}
}}
\source{
\url{https://www.fws.gov/endangered/esa-library}
}
\usage{
data_states
}
\description{
Data identical to \code{data_clean}, with unnested lists of states for each species.
Each record represents a single species by state combination.
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/merge.R
\name{merge}
\alias{merge}
\alias{merge,SilacProteinExperiment,ANY-method}
\alias{merge,SilacPeptideExperiment,ANY-method}
\alias{merge,SilacProteomicsExperiment,ANY-method}
\title{Merge}
\usage{
\S4method{merge}{SilacProteinExperiment,ANY}(x, y, by, by.x = by, by.y = by, all = TRUE, ...)
\S4method{merge}{SilacPeptideExperiment,ANY}(x, y, by, by.x = by, by.y = by, all = TRUE, ...)
\S4method{merge}{SilacProteomicsExperiment,ANY}(
x,
y,
by.prot,
by.prot.x = by.prot,
by.prot.y = by.prot,
by.pept,
by.pept.x = by.pept,
by.pept.y = by.pept,
all = TRUE,
...
)
}
\arguments{
\item{x}{A \code{SilacProteinExperiment}, \code{SilacPeptideExperiment} or a
\code{SilacProteomicsExperiment} object.}
\item{y}{A \code{SilacProteinExperiment}, \code{SilacPeptideExperiment} or a
\code{SilacProteomicsExperiment} object.}
\item{by, by.x, by.y}{A \code{character} indicating the columns used for the
merging.}
\item{all}{A \code{logical} indicating if all proteins/peptides should
be returned or only the intersect.}
\item{...}{Further parameters passed into \code{base::merge}.}
\item{by.prot, by.prot.x, by.prot.y}{For \code{SilacProteomicsExperiment}
objects a \code{character} indicating the columns used for the merging of the
protein level.}
\item{by.pept, by.pept.x, by.pept.y}{For \code{SilacProteomicsExperiment}
objects a \code{character} indicating the columns used for the merging of the
protein level.}
}
\value{
A \code{SilacProteinExperiment}, \code{SilacPeptideExperiment} or a
\code{SilacProteomicsExperiment} object.
}
\description{
Merges two objects of the same class:
\code{SilacProteinExperiment}, \code{SilacPeptideExperiment} or
\code{SilacProteomicsExperiment}.
}
\details{
This function is designed to be able to merge different samples
from different experiments since it is probable that not the exact same
proteins are found in both experiments and therefore \code{cbind} cannot be
used. It uses the merge base function to merge the rowData data frames and
merges the assays based on such merge. The colData \code{data.frame} are
joined.
For a \code{SilacProteomicsExperiment} object it gets a bit more complicated
since it is possible that some peptides that were assigned to one protein in
one experiment are assigned to another one in another experiment. Therefore
the linkerDf \code{data.frame} is recalculated.
}
\examples{
data('wormsPE')
merge(wormsPE[1:10, 1:3], wormsPE[3:10, 4:5])
}
|
/man/merge.Rd
|
no_license
|
marcpaga/pulsedSilac
|
R
| false
| true
| 2,525
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/merge.R
\name{merge}
\alias{merge}
\alias{merge,SilacProteinExperiment,ANY-method}
\alias{merge,SilacPeptideExperiment,ANY-method}
\alias{merge,SilacProteomicsExperiment,ANY-method}
\title{Merge}
\usage{
\S4method{merge}{SilacProteinExperiment,ANY}(x, y, by, by.x = by, by.y = by, all = TRUE, ...)
\S4method{merge}{SilacPeptideExperiment,ANY}(x, y, by, by.x = by, by.y = by, all = TRUE, ...)
\S4method{merge}{SilacProteomicsExperiment,ANY}(
x,
y,
by.prot,
by.prot.x = by.prot,
by.prot.y = by.prot,
by.pept,
by.pept.x = by.pept,
by.pept.y = by.pept,
all = TRUE,
...
)
}
\arguments{
\item{x}{A \code{SilacProteinExperiment}, \code{SilacPeptideExperiment} or a
\code{SilacProteomicsExperiment} object.}
\item{y}{A \code{SilacProteinExperiment}, \code{SilacPeptideExperiment} or a
\code{SilacProteomicsExperiment} object.}
\item{by, by.x, by.y}{A \code{character} indicating the columns used for the
merging.}
\item{all}{A \code{logical} indicating if all proteins/peptides should
be returned or only the intersect.}
\item{...}{Further parameters passed into \code{base::merge}.}
\item{by.prot, by.prot.x, by.prot.y}{For \code{SilacProteomicsExperiment}
objects a \code{character} indicating the columns used for the merging of the
protein level.}
\item{by.pept, by.pept.x, by.pept.y}{For \code{SilacProteomicsExperiment}
objects a \code{character} indicating the columns used for the merging of the
protein level.}
}
\value{
A \code{SilacProteinExperiment}, \code{SilacPeptideExperiment} or a
\code{SilacProteomicsExperiment} object.
}
\description{
Merges two objects of the same class:
\code{SilacProteinExperiment}, \code{SilacPeptideExperiment} or
\code{SilacProteomicsExperiment}.
}
\details{
This function is designed to be able to merge different samples
from different experiments since it is probable that not the exact same
proteins are found in both experiments and therefore \code{cbind} cannot be
used. It uses the merge base function to merge the rowData data frames and
merges the assays based on such merge. The colData \code{data.frame} are
joined.
For a \code{SilacProteomicsExperiment} object it gets a bit more complicated
since it is possible that some peptides that were assigned to one protein in
one experiment are assigned to another one in another experiment. Therefore
the linkerDf \code{data.frame} is recalculated.
}
\examples{
data('wormsPE')
merge(wormsPE[1:10, 1:3], wormsPE[3:10, 4:5])
}
|
require(DT)
render_tables <- function(data, currency = NULL, percentage = NULL, round = NULL, dom = 'ftirp'){
DT::renderDataTable(expr = {
if(!is.null(currency)){
currency <- currency[currency %in% colnames(data())]
} else if(!is.null(percentage)){
percentage <- percentage[percentage %in% colnames(data())]
} else if(!is.null(round)){
round <- round[round %in% colnames(data())]
}
DT::datatable(data()
, rownames = FALSE
, escape = FALSE
, options = list(
pageLength = 25
, dom = dom
, scrollX = TRUE
) # Reference for dom: https://datatables.net/reference/option/dom
) %>%
formatCurrency(currency, digits = 0) %>%
formatPercentage(percentage, digits = 1) %>%
formatRound(round, digits = 0)
}, options = list(
lengthChange = TRUE
))
}
|
/6. Viz/shiny/src/functions/render_table.R
|
permissive
|
ds4a82/secop-analysis
|
R
| false
| false
| 960
|
r
|
require(DT)
render_tables <- function(data, currency = NULL, percentage = NULL, round = NULL, dom = 'ftirp'){
DT::renderDataTable(expr = {
if(!is.null(currency)){
currency <- currency[currency %in% colnames(data())]
} else if(!is.null(percentage)){
percentage <- percentage[percentage %in% colnames(data())]
} else if(!is.null(round)){
round <- round[round %in% colnames(data())]
}
DT::datatable(data()
, rownames = FALSE
, escape = FALSE
, options = list(
pageLength = 25
, dom = dom
, scrollX = TRUE
) # Reference for dom: https://datatables.net/reference/option/dom
) %>%
formatCurrency(currency, digits = 0) %>%
formatPercentage(percentage, digits = 1) %>%
formatRound(round, digits = 0)
}, options = list(
lengthChange = TRUE
))
}
|
timeshift=read.csv("chap15e1KneesWhoSayNight.csv")
stripchart(timeshift$shift~timeshift$treatment,vertical=TRUE,method="jitter",xlab="Treatment",ylab="Shift (h)")
###Run an Anova
#Create a linear model first.
timeshift.lm=lm(timeshift$shift~timeshift$treatment)
#Then create the Anova table.
anova(timeshift.lm)
###Planned (a priori) comparisons: In Ex. 15.1, compare the mean of the knee group to the mean of the control group
###using the equation at the top of page 473.
mean.knee=mean(timeshift$shift[timeshift$treatment=="knee"])
mean.control=mean(timeshift$shift[timeshift$treatment=="control"])
difference=mean.knee-mean.control
###Calculate the sample sizes.
n.knee=sum(timeshift$treatment=="knee")
n.control=sum(timeshift$treatment=="control")
MS.error=0.4955445 #Copy from the Anova table, or try anova(timeshift.lm)[[3]][2]
SE.difference=sqrt(MS.error*(1/n.knee+1/n.control))
t.difference=difference/SE.difference
df.difference=length(timeshift$shift)-3 #df=N-#groups
p.value=2*pt(q=t.difference,df=df.difference,lower.tail=TRUE)
print(p.value)
###Alternatively...
summary(timeshift.lm) #This extracts different information from the lm. "Intercept" is the mean for the first (alphabetical) group.
#The p-values compare the intercept (control) to 0 and each of the other 2 to the intercept.
#If you want to compare to another group, rearrange the order using the levels() function.
###Also, note the R-squared value in the output of summary().
###Unplanned (post-hoc) comparisons: Tukey's Honestly Significant Difference test
#Use the package multcomp (see WHitlock & Schluter's website),
#OR use aov instead of lm:
timeshift.aov=aov(timeshift$shift~timeshift$treatment)
summary(timeshift.aov) #This should be identical to the output from anova(timeshift.lm) above.
TukeyHSD(timeshift.aov) #All (post-hoc) pairwise comparisons.
################################
###Check the assumptions of normality and equal variance.
#Normality test (also try qqnorm):
shapiro.test(timeshift$shift[timeshift$treatment=="control"])
shapiro.test(timeshift$shift[timeshift$treatment=="knee"])
shapiro.test(timeshift$shift[timeshift$treatment=="eyes"])
#Equal variance (if you have the car package installed):
library(car)
leveneTest(timeshift$shift,group=timeshift$treatment,center=mean)
###Non-parameteric test: Kruskal-Wallis. (Not necessary in this case, but this is the command.)
kruskal.test(timeshift$shift~timeshift$treatment)
################################
###Practice problem chap. 15, #1
caffeine=read.csv("chap15q01HoneybeeCaffeine.csv")
head(caffeine)
stripchart(caffeine$consumptionDifferenceFromControl~caffeine$ppmCaffeine,vertical=TRUE,xlab="Caffeine concentration (ppm)",ylab="Difference in amount of nectar taken (g)")
caff.lm=lm(caffeine$consumptionDifferenceFromControl~caffeine$ppmCaffeine)
anova(caff.lm) #***What's wrong with this?
###Try this:
caff.lm=lm(caffeine$consumptionDifferenceFromControl~factor(caffeine$ppmCaffeine))
anova(caff.lm)
###Which groups differ from each other? (Post-hoc comparisons with the Tukey test require aov())
caff.aov=aov(caffeine$consumptionDifferenceFromControl~factor(caffeine$ppmCaffeine))
summary(caff.aov) #This should be the same as anova(caff.lm).
TukeyHSD(caff.aov)
|
/anova.part2.R
|
no_license
|
AHdeRojas/Experimental-Design-and-Biometry-R-
|
R
| false
| false
| 3,306
|
r
|
timeshift=read.csv("chap15e1KneesWhoSayNight.csv")
stripchart(timeshift$shift~timeshift$treatment,vertical=TRUE,method="jitter",xlab="Treatment",ylab="Shift (h)")
###Run an Anova
#Create a linear model first.
timeshift.lm=lm(timeshift$shift~timeshift$treatment)
#Then create the Anova table.
anova(timeshift.lm)
###Planned (a priori) comparisons: In Ex. 15.1, compare the mean of the knee group to the mean of the control group
###using the equation at the top of page 473.
mean.knee=mean(timeshift$shift[timeshift$treatment=="knee"])
mean.control=mean(timeshift$shift[timeshift$treatment=="control"])
difference=mean.knee-mean.control
###Calculate the sample sizes.
n.knee=sum(timeshift$treatment=="knee")
n.control=sum(timeshift$treatment=="control")
MS.error=0.4955445 #Copy from the Anova table, or try anova(timeshift.lm)[[3]][2]
SE.difference=sqrt(MS.error*(1/n.knee+1/n.control))
t.difference=difference/SE.difference
df.difference=length(timeshift$shift)-3 #df=N-#groups
p.value=2*pt(q=t.difference,df=df.difference,lower.tail=TRUE)
print(p.value)
###Alternatively...
summary(timeshift.lm) #This extracts different information from the lm. "Intercept" is the mean for the first (alphabetical) group.
#The p-values compare the intercept (control) to 0 and each of the other 2 to the intercept.
#If you want to compare to another group, rearrange the order using the levels() function.
###Also, note the R-squared value in the output of summary().
###Unplanned (post-hoc) comparisons: Tukey's Honestly Significant Difference test
#Use the package multcomp (see WHitlock & Schluter's website),
#OR use aov instead of lm:
timeshift.aov=aov(timeshift$shift~timeshift$treatment)
summary(timeshift.aov) #This should be identical to the output from anova(timeshift.lm) above.
TukeyHSD(timeshift.aov) #All (post-hoc) pairwise comparisons.
################################
###Check the assumptions of normality and equal variance.
#Normality test (also try qqnorm):
shapiro.test(timeshift$shift[timeshift$treatment=="control"])
shapiro.test(timeshift$shift[timeshift$treatment=="knee"])
shapiro.test(timeshift$shift[timeshift$treatment=="eyes"])
#Equal variance (if you have the car package installed):
library(car)
leveneTest(timeshift$shift,group=timeshift$treatment,center=mean)
###Non-parameteric test: Kruskal-Wallis. (Not necessary in this case, but this is the command.)
kruskal.test(timeshift$shift~timeshift$treatment)
################################
###Practice problem chap. 15, #1
caffeine=read.csv("chap15q01HoneybeeCaffeine.csv")
head(caffeine)
stripchart(caffeine$consumptionDifferenceFromControl~caffeine$ppmCaffeine,vertical=TRUE,xlab="Caffeine concentration (ppm)",ylab="Difference in amount of nectar taken (g)")
caff.lm=lm(caffeine$consumptionDifferenceFromControl~caffeine$ppmCaffeine)
anova(caff.lm) #***What's wrong with this?
###Try this:
caff.lm=lm(caffeine$consumptionDifferenceFromControl~factor(caffeine$ppmCaffeine))
anova(caff.lm)
###Which groups differ from each other? (Post-hoc comparisons with the Tukey test require aov())
caff.aov=aov(caffeine$consumptionDifferenceFromControl~factor(caffeine$ppmCaffeine))
summary(caff.aov) #This should be the same as anova(caff.lm).
TukeyHSD(caff.aov)
|
#' @param notification_id \code{character}; the Salesforce Id assigned to a created
#' analytics notification. It will start with \code{"0Au"}.
|
/man-roxygen/notification_id.R
|
permissive
|
StevenMMortimer/salesforcer
|
R
| false
| false
| 145
|
r
|
#' @param notification_id \code{character}; the Salesforce Id assigned to a created
#' analytics notification. It will start with \code{"0Au"}.
|
#!/usr/bin/env Rscript
# Written by:
# Gustavo Burin <gustavoburin@usp.br>
# In collaboration with:
# Equipe Observatório COVID-19 BR
#####################################
#####################################
### B-Diversitree is a module that runs a bayesian implementation of the Musse, Geosse and Classe models present in the package Diversitree.
###
###
###
###
#####################################
#####################################
# Setting locale to Brasil
Sys.setlocale(category = "LC_TIME", locale = "pt_BR.UTF-8")
# Loading Libraries Required
options(warn=1)
suppressPackageStartupMessages(library("optparse"))
suppressPackageStartupMessages(library("rmarkdown"))
suppressPackageStartupMessages(library("knitr"))
suppressPackageStartupMessages(library("dplyr"))
suppressPackageStartupMessages(library("ggplot2"))
suppressPackageStartupMessages(library("patchwork"))
knitr::opts_chunk$set(echo = FALSE, warning=FALSE, message=FALSE)
source("https://raw.githubusercontent.com/covid19br/covid19br.github.io/master/_src/fct/funcoes.R")
#####################################
#####################################
#####################################
# Parsing Command Line Options
option_list <- list(
make_option("--u", default = "p",
help = ("Unidade (país/estado/cidade) de interesse para gerar relatório. p para Brasil, e para Estado, c para cidades."),
metavar = "Unidade"),
make_option("--n", default = "Brasil",
help = ("Sigla da unidade de interesse."),
metavar = "Nome_Unidade"),
make_option("--d", default = "t",
help = ("Data para confecção do relatório. O padrão é t para hoje (today), com formato AAAA-MM-DD"),
metavar = "Data")##,
## make_option("--p", type="integer", default=100,
## help=("Print frequency [default %default]."),
## metavar="Prnt_freq"),
## make_option("--d", type="double", default=0.05,
## help=("Window size update parameters [default %default]."),
## metavar="D-Range"),
## make_option("--r", type="integer", default=5,
## help=("Rate to use for the calculation of the prior [default %default]."),
## metavar="Rate"),
## make_option("--t", type="integer", default=1,
## help=("Number of trees [default %default]."),
## metavar="Tree"),
## make_option("--rho", default= NULL,
## help=("Taxon sampling (in quotations, space separated) [default %default]."),
## metavar="Sampling"),
## make_option("--w", default="1.5 1.5 2 2",
## help=("Window size for lambda, mu, q and gamma (shape parameters for the hyperpriors) [default %default]." ),metavar="Window_Size"),
## make_option("--c", default="NULL",
## help="Parameters to be constrained. Introduce \"lamdas\", \"mus\" or \"qs\". \n\t\t Alternatively a string representing the desired contraints can be introduced. \n\t\t E.g. For three states under a Musse model: \"1,2,2,4,5,6,7,8,9,10,8,12\" \n\t\t This indicates lamda3 ~ lamda2 and q31 ~ q13 [default %default]",
## metavar="Constraints")
)
parser_object <- OptionParser(usage = "Rscript %prog [Opções] [ARQUIVO]\n",
option_list = option_list,
description = "Script para compilar reports personalizados. Caso deseje gerar um relatório para o país, usar opção --u 'p'; caso queira algum estado em particular, usar opções --u 'e' --n '[NOME_ESTADO]'. Caso deseje usar uma tabela externa, indicar o caminho para o arquivo .csv após as opções. A tabela deverá conter obrigatoriamente ao menos duas colunas: 'day' e 'total.confirmed.cases'")
opt <- parse_args(parser_object, args = commandArgs(trailingOnly = TRUE), positional_arguments=TRUE)
#####################################
# Handling User Input and/or Option Errors
if(opt$options$u != "p" & opt$options$n == "Brasil"){
cat("Erro: informar sigla do Estado desejado.\n\n"); print_help(parser_object); quit(status=1)
}
#####################################
# Defining Variables and Objects
#set.seed(2)
unid <- opt$options$u
nome_unid <- opt$options$n
tempo <- opt$options$d
if(unid=="e")
nome_titulos <- paste("Estado de/da", nome_unid)
if(unid=="m")
nome_titulos <- paste("Município de", nome_unid)
if(unid=="p")
nome_titulos <- "Brasil"
if(length(opt$args) == 0){
dados.full <- read.csv(paste0("./dados/BRnCov19_", ifelse(tempo == "t", format(Sys.Date(), "%Y%m%d"), format(as.Date(tempo), "%Y%m%d")), ".csv"), as.is = TRUE, sep = ",")
#names(dados.full)[grep("dat", names(dados.full))] <- "data"
#names(dados.full)[2] <- "estado"
#names(dados.full)[grep("estad", names(dados.full))] <- "estado"
#dados.full$data <- rep(seq.Date(from = as.Date("2020/01/30", format = "%Y/%m/%d"), to = as.Date("2020/04/06", format = "%Y/%m/%d"), by = 1), times = length(unique(dados.full$estado)))
#dados.full$data <- as.Date(dados.full$data, format = "%d/%m/%Y")
#names(dados.full) <- c("regiao", "estado", "data", "novos.casos", "casos.acumulados", "obitos.novos", "obitos.acumulados")
#write.table(dados.full, file = paste0("./dados/BRnCov19_", format(Sys.Date(), "%Y%m%d"), ".csv"), row.names = FALSE, sep = ",")
#write.table(dados.full, file = "./dados/EstadosCov19.csv", row.names = FALSE, sep = ",")
} else {
dados.full <- read.csv(paste0(opt$args[1]), as.is = TRUE)
#dados.full[rowSums(is.na(dados.full)) != 5,]
}
if(unid == "p"){
dados.br <- read.csv("./dados/BrasilCov19.csv", as.is = TRUE)
#names(dados.br)[1] <- "data"
#write.table(dados.br, file = "./dados/BrasilCov19.csv", row.names = FALSE, sep = ",")
dados.clean <- dados.br[, c("data", "casos.acumulados")]
dados.clean$data <- as.Date(dados.clean$data)
#as.data.frame(aggregate(dados.full$casos.acumulados, by = list(dados.full$data), FUN = sum, na.rm = TRUE))
#dados.full[rowSums(is.na(dados.full)) != 5,]
names(dados.clean) <- c("day", "confirmed.cases")
nconf <- dados.clean[!duplicated(dados.clean),]
nconf.zoo <- zoo(nconf[,-1], as.Date(nconf$day)) %>%
diazero(limite = 1)
## Projecoes
exp.5d <- forecast.exponential(nconf.zoo,
start = length(time(nconf.zoo))-4,
days.forecast = 5)
data.final <- format(time(exp.5d)[5], format="%d de %B")
} else if(unid == "e"){
dados.full <- read.csv("./dados/EstadosCov19.csv", as.is = TRUE)
dados.filter <- dados.full[dados.full$estado == nome_unid,]
dados.clean <- as.data.frame(aggregate(dados.filter$casos.acumulados, by = list(dados.filter$data), FUN = sum, na.rm = TRUE))
## Removendo os últimos dias caso estejam em branco
## if(sum(dados.clean[, 1] >= Sys.Date()) != 0 & sum(dados.clean[, 2] == 0) != 0){
## dados.clean <- dados.clean[-which((dados.clean[, 1] >= Sys.Date()) != 0 & sum(dados.clean[, 2] == 0) != 0),]
## }
names(dados.clean) <- c("day", "confirmed.cases")
nconf <- dados.clean[!duplicated(dados.clean),]
nconf.zoo <- zoo(nconf[,-1], as.Date(nconf$day)) %>%
diazero(limite = 1)
## Projecoes
exp.5d <- forecast.exponential(nconf.zoo,
start = length(time(nconf.zoo))-4,
days.forecast = 5)
data.final <- format(time(exp.5d)[5], format = "%d de %B")
}
render(input = "./projecoes_observatorio_modelo.Rmd",
output_file = paste0("./relatorios_gerados/relatorio_", gsub(" ", "_", nome_unid), "_", ifelse(tempo == "t", format(Sys.time(), '%d-%m-%Y_%Hh%Mmin%Ss'), format(as.Date(tempo), "%d-%m-%Y")), ".pdf"),
encoding = "utf8")
|
/master_report.R
|
no_license
|
covid19br/reports
|
R
| false
| false
| 7,707
|
r
|
#!/usr/bin/env Rscript
# Written by:
# Gustavo Burin <gustavoburin@usp.br>
# In collaboration with:
# Equipe Observatório COVID-19 BR
#####################################
#####################################
### B-Diversitree is a module that runs a bayesian implementation of the Musse, Geosse and Classe models present in the package Diversitree.
###
###
###
###
#####################################
#####################################
# Setting locale to Brasil
Sys.setlocale(category = "LC_TIME", locale = "pt_BR.UTF-8")
# Loading Libraries Required
options(warn=1)
suppressPackageStartupMessages(library("optparse"))
suppressPackageStartupMessages(library("rmarkdown"))
suppressPackageStartupMessages(library("knitr"))
suppressPackageStartupMessages(library("dplyr"))
suppressPackageStartupMessages(library("ggplot2"))
suppressPackageStartupMessages(library("patchwork"))
knitr::opts_chunk$set(echo = FALSE, warning=FALSE, message=FALSE)
source("https://raw.githubusercontent.com/covid19br/covid19br.github.io/master/_src/fct/funcoes.R")
#####################################
#####################################
#####################################
# Parsing Command Line Options
option_list <- list(
make_option("--u", default = "p",
help = ("Unidade (país/estado/cidade) de interesse para gerar relatório. p para Brasil, e para Estado, c para cidades."),
metavar = "Unidade"),
make_option("--n", default = "Brasil",
help = ("Sigla da unidade de interesse."),
metavar = "Nome_Unidade"),
make_option("--d", default = "t",
help = ("Data para confecção do relatório. O padrão é t para hoje (today), com formato AAAA-MM-DD"),
metavar = "Data")##,
## make_option("--p", type="integer", default=100,
## help=("Print frequency [default %default]."),
## metavar="Prnt_freq"),
## make_option("--d", type="double", default=0.05,
## help=("Window size update parameters [default %default]."),
## metavar="D-Range"),
## make_option("--r", type="integer", default=5,
## help=("Rate to use for the calculation of the prior [default %default]."),
## metavar="Rate"),
## make_option("--t", type="integer", default=1,
## help=("Number of trees [default %default]."),
## metavar="Tree"),
## make_option("--rho", default= NULL,
## help=("Taxon sampling (in quotations, space separated) [default %default]."),
## metavar="Sampling"),
## make_option("--w", default="1.5 1.5 2 2",
## help=("Window size for lambda, mu, q and gamma (shape parameters for the hyperpriors) [default %default]." ),metavar="Window_Size"),
## make_option("--c", default="NULL",
## help="Parameters to be constrained. Introduce \"lamdas\", \"mus\" or \"qs\". \n\t\t Alternatively a string representing the desired contraints can be introduced. \n\t\t E.g. For three states under a Musse model: \"1,2,2,4,5,6,7,8,9,10,8,12\" \n\t\t This indicates lamda3 ~ lamda2 and q31 ~ q13 [default %default]",
## metavar="Constraints")
)
parser_object <- OptionParser(usage = "Rscript %prog [Opções] [ARQUIVO]\n",
option_list = option_list,
description = "Script para compilar reports personalizados. Caso deseje gerar um relatório para o país, usar opção --u 'p'; caso queira algum estado em particular, usar opções --u 'e' --n '[NOME_ESTADO]'. Caso deseje usar uma tabela externa, indicar o caminho para o arquivo .csv após as opções. A tabela deverá conter obrigatoriamente ao menos duas colunas: 'day' e 'total.confirmed.cases'")
opt <- parse_args(parser_object, args = commandArgs(trailingOnly = TRUE), positional_arguments=TRUE)
#####################################
# Handling User Input and/or Option Errors
if(opt$options$u != "p" & opt$options$n == "Brasil"){
cat("Erro: informar sigla do Estado desejado.\n\n"); print_help(parser_object); quit(status=1)
}
#####################################
# Defining Variables and Objects
#set.seed(2)
unid <- opt$options$u
nome_unid <- opt$options$n
tempo <- opt$options$d
if(unid=="e")
nome_titulos <- paste("Estado de/da", nome_unid)
if(unid=="m")
nome_titulos <- paste("Município de", nome_unid)
if(unid=="p")
nome_titulos <- "Brasil"
if(length(opt$args) == 0){
dados.full <- read.csv(paste0("./dados/BRnCov19_", ifelse(tempo == "t", format(Sys.Date(), "%Y%m%d"), format(as.Date(tempo), "%Y%m%d")), ".csv"), as.is = TRUE, sep = ",")
#names(dados.full)[grep("dat", names(dados.full))] <- "data"
#names(dados.full)[2] <- "estado"
#names(dados.full)[grep("estad", names(dados.full))] <- "estado"
#dados.full$data <- rep(seq.Date(from = as.Date("2020/01/30", format = "%Y/%m/%d"), to = as.Date("2020/04/06", format = "%Y/%m/%d"), by = 1), times = length(unique(dados.full$estado)))
#dados.full$data <- as.Date(dados.full$data, format = "%d/%m/%Y")
#names(dados.full) <- c("regiao", "estado", "data", "novos.casos", "casos.acumulados", "obitos.novos", "obitos.acumulados")
#write.table(dados.full, file = paste0("./dados/BRnCov19_", format(Sys.Date(), "%Y%m%d"), ".csv"), row.names = FALSE, sep = ",")
#write.table(dados.full, file = "./dados/EstadosCov19.csv", row.names = FALSE, sep = ",")
} else {
dados.full <- read.csv(paste0(opt$args[1]), as.is = TRUE)
#dados.full[rowSums(is.na(dados.full)) != 5,]
}
if(unid == "p"){
dados.br <- read.csv("./dados/BrasilCov19.csv", as.is = TRUE)
#names(dados.br)[1] <- "data"
#write.table(dados.br, file = "./dados/BrasilCov19.csv", row.names = FALSE, sep = ",")
dados.clean <- dados.br[, c("data", "casos.acumulados")]
dados.clean$data <- as.Date(dados.clean$data)
#as.data.frame(aggregate(dados.full$casos.acumulados, by = list(dados.full$data), FUN = sum, na.rm = TRUE))
#dados.full[rowSums(is.na(dados.full)) != 5,]
names(dados.clean) <- c("day", "confirmed.cases")
nconf <- dados.clean[!duplicated(dados.clean),]
nconf.zoo <- zoo(nconf[,-1], as.Date(nconf$day)) %>%
diazero(limite = 1)
## Projecoes
exp.5d <- forecast.exponential(nconf.zoo,
start = length(time(nconf.zoo))-4,
days.forecast = 5)
data.final <- format(time(exp.5d)[5], format="%d de %B")
} else if(unid == "e"){
dados.full <- read.csv("./dados/EstadosCov19.csv", as.is = TRUE)
dados.filter <- dados.full[dados.full$estado == nome_unid,]
dados.clean <- as.data.frame(aggregate(dados.filter$casos.acumulados, by = list(dados.filter$data), FUN = sum, na.rm = TRUE))
## Removendo os últimos dias caso estejam em branco
## if(sum(dados.clean[, 1] >= Sys.Date()) != 0 & sum(dados.clean[, 2] == 0) != 0){
## dados.clean <- dados.clean[-which((dados.clean[, 1] >= Sys.Date()) != 0 & sum(dados.clean[, 2] == 0) != 0),]
## }
names(dados.clean) <- c("day", "confirmed.cases")
nconf <- dados.clean[!duplicated(dados.clean),]
nconf.zoo <- zoo(nconf[,-1], as.Date(nconf$day)) %>%
diazero(limite = 1)
## Projecoes
exp.5d <- forecast.exponential(nconf.zoo,
start = length(time(nconf.zoo))-4,
days.forecast = 5)
data.final <- format(time(exp.5d)[5], format = "%d de %B")
}
render(input = "./projecoes_observatorio_modelo.Rmd",
output_file = paste0("./relatorios_gerados/relatorio_", gsub(" ", "_", nome_unid), "_", ifelse(tempo == "t", format(Sys.time(), '%d-%m-%Y_%Hh%Mmin%Ss'), format(as.Date(tempo), "%d-%m-%Y")), ".pdf"),
encoding = "utf8")
|
corner_number <- function(nrow, ncol, index_vector = NULL, give_indexes = FALSE){
corner_indexes <- c(1, ncol, nrow*ncol,(nrow*ncol) - (ncol -1))
if (give_indexes){
return(corner_indexes)
}else {
corner_number <- 0
for(i in index_vector){
if ( i %in% corner_indexes){
corner_number <- corner_number + 1
}
} # for loop
return(corner_number)
} # else statement
} # corner number function
|
/Simulations/corner_number.R
|
permissive
|
mccabete/SpatialAdjacency
|
R
| false
| false
| 459
|
r
|
corner_number <- function(nrow, ncol, index_vector = NULL, give_indexes = FALSE){
corner_indexes <- c(1, ncol, nrow*ncol,(nrow*ncol) - (ncol -1))
if (give_indexes){
return(corner_indexes)
}else {
corner_number <- 0
for(i in index_vector){
if ( i %in% corner_indexes){
corner_number <- corner_number + 1
}
} # for loop
return(corner_number)
} # else statement
} # corner number function
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{sj.setGeomColors}
\alias{sj.setGeomColors}
\title{Helper function to set geom colors}
\usage{
sj.setGeomColors(plot, geom.colors, pal.len, show.guide = TRUE,
labels = NULL)
}
\arguments{
\item{plot}{a ggplot object where scales (geom colors) should be set}
\item{geom.colors}{the color palette for the scales to be used}
\item{pal.len}{the length of the required colors in \code{geom.colors}}
\item{show.guide}{whether or not legend should be displayed}
\item{labels}{a character vector with legend labels}
}
\description{
Helper function to set geom colors
}
|
/man/sj.setGeomColors.Rd
|
no_license
|
wilpi/devel
|
R
| false
| false
| 626
|
rd
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{sj.setGeomColors}
\alias{sj.setGeomColors}
\title{Helper function to set geom colors}
\usage{
sj.setGeomColors(plot, geom.colors, pal.len, show.guide = TRUE,
labels = NULL)
}
\arguments{
\item{plot}{a ggplot object where scales (geom colors) should be set}
\item{geom.colors}{the color palette for the scales to be used}
\item{pal.len}{the length of the required colors in \code{geom.colors}}
\item{show.guide}{whether or not legend should be displayed}
\item{labels}{a character vector with legend labels}
}
\description{
Helper function to set geom colors
}
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/ggcyto-flowSet.R
\name{as.filterList.data.frame}
\alias{as.filterList.data.frame}
\title{Convert data.frame back to filterList}
\usage{
as.filterList.data.frame(df, pcols = ".rownames")
}
\arguments{
\item{pcols}{the pData columns}
}
\description{
It is used for gating purporse for geom_stats layer
(no longer needed since the data is now not foritfied until print.ggcyo)
}
|
/man/as.filterList.data.frame.Rd
|
no_license
|
kdh4win4/ggcyto
|
R
| false
| false
| 462
|
rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/ggcyto-flowSet.R
\name{as.filterList.data.frame}
\alias{as.filterList.data.frame}
\title{Convert data.frame back to filterList}
\usage{
as.filterList.data.frame(df, pcols = ".rownames")
}
\arguments{
\item{pcols}{the pData columns}
}
\description{
It is used for gating purporse for geom_stats layer
(no longer needed since the data is now not foritfied until print.ggcyo)
}
|
#Module with functions for campare gene expression distributions between controls and treatment
source('cp_plots_pert.R')
#takes as input the gene_id of the gene and the cp_name, the name of the treatment to compare with the control
gene_ttest <- function(mod_df, ctl_df, gene_id, cp_name){
ctl_gene <- ctl_df %>% filter(pr_gene_id == gene_id)
pert_gene <- mod_df %>% filter(pr_gene_id == gene_id & pert_iname == cp_name) #tirar dependencia de mod_list_full
ttest <- (t.test(pert_gene$z_score, ctl_gene$z_score, alternative = "two.sided", paired = FALSE))
return(ttest)
}
#In following sections, we will implement functions to do a batch of t-tests at once.
#In order to better manipulate the results, it is simpler to convert the output to a dataframe
df_ttest <- function(list_ttest, all_cp){
all_test_df <- (rbindlist(list_ttest))
#the dataframe has repeated rows, so we delete them
toDelete <- seq(1, nrow(all_test_df), 2)
test_df <- all_test_df[toDelete,]
test_df <- cbind(all_cp, test_df)
return(test_df)
}
#The following function allows the user to run the t-test for a given gene across all drugs that are available in a give list of sig_id and modulation
test_per_gene <- function(gene, mod_df, ctl_df){
all_cp <-as.data.frame(unique(mod_df$pert_iname))
all_test <- apply(all_cp, 1, gene_ttest, mod_df = mod_df, ctl_df = ctl_df, gene_id = gene)
all_test_df <- df_ttest(all_test, all_cp)
all_test_df$pr_gene_id <- as.numeric(gene)
return(all_test_df)
}
test_all <- function(gene_list, mod_df, ctl_df){
all_tests <- data.frame()
for (i in 1:length(gene_list)) {
new_test <- test_per_gene(gene_list[i], mod_df, ctl_df)
new_test$pr_gene_id <- gene_list[i]
all_tests <- rbind(all_tests, new_test)
print(i)
i=i+1
}
colnames(all_tests)[1] <- "pert_iname"
return(all_tests)
}
#-------FUNCTIONS TO PLOT HEATMAPS WITH THE RESULT OF TESTS------------------
test_wide <- function(tests_df){
ph_df <- tests_df %>% select(pert_iname, pr_gene_id, statistic, p.value, conf.int)
#taking the log of p-value, with different signs for positive or negative t stats
ph_df <- ph_df %>% dplyr::mutate(logp = case_when( statistic > 0 ~ -log10(p.value),
statistic < 0 ~ log10(p.value))
)
#organizing the data for plotting
ph_wide <- ph_df %>%
select(pert_iname, pr_gene_id, logp) %>%
tidyr::spread(pert_iname, logp)
ph_wide <- merge(tcga_genes[,1:2], ph_wide, by = "pr_gene_id")
rownames(ph_wide) <- ph_wide$pr_gene_symbol
ph_wide$pr_gene_id <- NULL
ph_wide$pr_gene_symbol <- NULL
return(ph_wide)
}
plot_heatmap_test <- function(test_tidy, wide_table, clustering_distance_rows = "euclidean",
clustering_distance_cols = "euclidean", filename = "heatmap.png", height = 25, width = 15){
#annotation of category of genes
annot_df <- subset(tcga_genes, pr_gene_symbol %in% rownames(wide_table), c(pr_gene_symbol, Super.Category, Immune.Checkpoint))
rownames(annot_df) <- annot_df$pr_gene_symbol
annot_df$pr_gene_symbol <- NULL
#annotation of classes of drugs
pcl_df <- merge(pcl_custom[,2:3], test_tidy[1], by= "pert_iname") %>% distinct()
pcl_df <- subset(pcl_df, !duplicated(pcl_df [,1]) )
rownames(pcl_df) <- pcl_df$pert_iname
pcl_df$pert_iname <- NULL
# Making sure that 0 is in the center of the color scheme
breaks_list <- seq(-10, 10, by = 0.5)
my_colors <- colorRampPalette(rev(RColorBrewer::brewer.pal(n = 7, name = "RdYlBu")))(length(breaks_list))
#generating the heatmap
wide_table %>% t() %>%
pheatmap::pheatmap(color = my_colors, breaks = breaks_list, clustering_distance_rows = clustering_distance_rows, clustering_distance_cols = clustering_distance_cols,
annotation_row = pcl_df, annotation_col = annot_df, annotation_legend = FALSE)
wide_table %>% t() %>%
pheatmap::pheatmap(color = my_colors, breaks = breaks_list, clustering_distance_rows = clustering_distance_rows, clustering_distance_cols = clustering_distance_cols,
annotation_row = pcl_df, annotation_col = annot_df, filename = filename, height = height, width = width)
}
|
/cp_hypothesis_tests.R
|
no_license
|
heimannch/immunocmap
|
R
| false
| false
| 4,286
|
r
|
#Module with functions for campare gene expression distributions between controls and treatment
source('cp_plots_pert.R')
#takes as input the gene_id of the gene and the cp_name, the name of the treatment to compare with the control
gene_ttest <- function(mod_df, ctl_df, gene_id, cp_name){
ctl_gene <- ctl_df %>% filter(pr_gene_id == gene_id)
pert_gene <- mod_df %>% filter(pr_gene_id == gene_id & pert_iname == cp_name) #tirar dependencia de mod_list_full
ttest <- (t.test(pert_gene$z_score, ctl_gene$z_score, alternative = "two.sided", paired = FALSE))
return(ttest)
}
#In following sections, we will implement functions to do a batch of t-tests at once.
#In order to better manipulate the results, it is simpler to convert the output to a dataframe
df_ttest <- function(list_ttest, all_cp){
all_test_df <- (rbindlist(list_ttest))
#the dataframe has repeated rows, so we delete them
toDelete <- seq(1, nrow(all_test_df), 2)
test_df <- all_test_df[toDelete,]
test_df <- cbind(all_cp, test_df)
return(test_df)
}
#The following function allows the user to run the t-test for a given gene across all drugs that are available in a give list of sig_id and modulation
test_per_gene <- function(gene, mod_df, ctl_df){
all_cp <-as.data.frame(unique(mod_df$pert_iname))
all_test <- apply(all_cp, 1, gene_ttest, mod_df = mod_df, ctl_df = ctl_df, gene_id = gene)
all_test_df <- df_ttest(all_test, all_cp)
all_test_df$pr_gene_id <- as.numeric(gene)
return(all_test_df)
}
test_all <- function(gene_list, mod_df, ctl_df){
all_tests <- data.frame()
for (i in 1:length(gene_list)) {
new_test <- test_per_gene(gene_list[i], mod_df, ctl_df)
new_test$pr_gene_id <- gene_list[i]
all_tests <- rbind(all_tests, new_test)
print(i)
i=i+1
}
colnames(all_tests)[1] <- "pert_iname"
return(all_tests)
}
#-------FUNCTIONS TO PLOT HEATMAPS WITH THE RESULT OF TESTS------------------
test_wide <- function(tests_df){
ph_df <- tests_df %>% select(pert_iname, pr_gene_id, statistic, p.value, conf.int)
#taking the log of p-value, with different signs for positive or negative t stats
ph_df <- ph_df %>% dplyr::mutate(logp = case_when( statistic > 0 ~ -log10(p.value),
statistic < 0 ~ log10(p.value))
)
#organizing the data for plotting
ph_wide <- ph_df %>%
select(pert_iname, pr_gene_id, logp) %>%
tidyr::spread(pert_iname, logp)
ph_wide <- merge(tcga_genes[,1:2], ph_wide, by = "pr_gene_id")
rownames(ph_wide) <- ph_wide$pr_gene_symbol
ph_wide$pr_gene_id <- NULL
ph_wide$pr_gene_symbol <- NULL
return(ph_wide)
}
plot_heatmap_test <- function(test_tidy, wide_table, clustering_distance_rows = "euclidean",
clustering_distance_cols = "euclidean", filename = "heatmap.png", height = 25, width = 15){
#annotation of category of genes
annot_df <- subset(tcga_genes, pr_gene_symbol %in% rownames(wide_table), c(pr_gene_symbol, Super.Category, Immune.Checkpoint))
rownames(annot_df) <- annot_df$pr_gene_symbol
annot_df$pr_gene_symbol <- NULL
#annotation of classes of drugs
pcl_df <- merge(pcl_custom[,2:3], test_tidy[1], by= "pert_iname") %>% distinct()
pcl_df <- subset(pcl_df, !duplicated(pcl_df [,1]) )
rownames(pcl_df) <- pcl_df$pert_iname
pcl_df$pert_iname <- NULL
# Making sure that 0 is in the center of the color scheme
breaks_list <- seq(-10, 10, by = 0.5)
my_colors <- colorRampPalette(rev(RColorBrewer::brewer.pal(n = 7, name = "RdYlBu")))(length(breaks_list))
#generating the heatmap
wide_table %>% t() %>%
pheatmap::pheatmap(color = my_colors, breaks = breaks_list, clustering_distance_rows = clustering_distance_rows, clustering_distance_cols = clustering_distance_cols,
annotation_row = pcl_df, annotation_col = annot_df, annotation_legend = FALSE)
wide_table %>% t() %>%
pheatmap::pheatmap(color = my_colors, breaks = breaks_list, clustering_distance_rows = clustering_distance_rows, clustering_distance_cols = clustering_distance_cols,
annotation_row = pcl_df, annotation_col = annot_df, filename = filename, height = height, width = width)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/listOMLFlows.R
\name{listOMLFlows}
\alias{listOMLFlows}
\title{List all registered OpenML flows.}
\usage{
listOMLFlows(tag = NULL, limit = NULL, offset = NULL,
verbosity = NULL)
}
\arguments{
\item{tag}{[\code{character}]\cr
If not \code{NULL} only entries with the corresponding \code{tag}s
are listed.}
\item{limit}{[\code{numeric(1)}]\cr
Optional. The maximum number of entries to return.
Without specifying \code{offset}, it returns the first '\code{limit}' entries.
Setting \code{limit = NULL} returns all available entries.}
\item{offset}{[\code{numeric(1)}]\cr
Optional. The offset to start from.
Should be indices starting from 0, which do not refer to IDs.
Is ignored when no \code{limit} is given.}
\item{verbosity}{[\code{integer(1)}]\cr
Print verbose output on console? Possible values are:\cr
\code{0}: normal output,\cr
\code{1}: info output,\cr
\code{2}: debug output.\cr
Default is set via \code{\link{setOMLConfig}}.}
}
\value{
[\code{data.frame}].
}
\description{
The returned \code{data.frame} contains the flow id \dQuote{fid},
the flow name (\dQuote{full.name} and \dQuote{name}), version information
(\dQuote{version} and \dQuote{external.version}) and the uploader (\dQuote{uploader})
of all registered OpenML flows.
}
\note{
This function is memoised. I.e., if you call this function twice in a running R session,
the first call will query the server and store the results in memory while the second and all subsequent calls will return
the cached results from the first call.
You can reset the cache by calling \code{\link[memoise]{forget}} on the function manually.
}
\examples{
# \dontrun{
# flows = listOMLFlows()
# tail(flows)
# }
}
\seealso{
Other listing functions: \code{\link{chunkOMLlist}},
\code{\link{listOMLDataSetQualities}},
\code{\link{listOMLDataSets}},
\code{\link{listOMLEstimationProcedures}},
\code{\link{listOMLEvaluationMeasures}},
\code{\link{listOMLRuns}}, \code{\link{listOMLSetup}},
\code{\link{listOMLStudies}},
\code{\link{listOMLTaskTypes}},
\code{\link{listOMLTasks}}
Other flow-related functions: \code{\link{convertOMLFlowToMlr}},
\code{\link{deleteOMLObject}}, \code{\link{getOMLFlow}},
\code{\link{makeOMLFlowParameter}},
\code{\link{makeOMLFlow}}, \code{\link{tagOMLObject}}
}
\concept{flow-related functions}
\concept{listing functions}
|
/man/listOMLFlows.Rd
|
permissive
|
annakrystalli/openml-r
|
R
| false
| true
| 2,409
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/listOMLFlows.R
\name{listOMLFlows}
\alias{listOMLFlows}
\title{List all registered OpenML flows.}
\usage{
listOMLFlows(tag = NULL, limit = NULL, offset = NULL,
verbosity = NULL)
}
\arguments{
\item{tag}{[\code{character}]\cr
If not \code{NULL} only entries with the corresponding \code{tag}s
are listed.}
\item{limit}{[\code{numeric(1)}]\cr
Optional. The maximum number of entries to return.
Without specifying \code{offset}, it returns the first '\code{limit}' entries.
Setting \code{limit = NULL} returns all available entries.}
\item{offset}{[\code{numeric(1)}]\cr
Optional. The offset to start from.
Should be indices starting from 0, which do not refer to IDs.
Is ignored when no \code{limit} is given.}
\item{verbosity}{[\code{integer(1)}]\cr
Print verbose output on console? Possible values are:\cr
\code{0}: normal output,\cr
\code{1}: info output,\cr
\code{2}: debug output.\cr
Default is set via \code{\link{setOMLConfig}}.}
}
\value{
[\code{data.frame}].
}
\description{
The returned \code{data.frame} contains the flow id \dQuote{fid},
the flow name (\dQuote{full.name} and \dQuote{name}), version information
(\dQuote{version} and \dQuote{external.version}) and the uploader (\dQuote{uploader})
of all registered OpenML flows.
}
\note{
This function is memoised. I.e., if you call this function twice in a running R session,
the first call will query the server and store the results in memory while the second and all subsequent calls will return
the cached results from the first call.
You can reset the cache by calling \code{\link[memoise]{forget}} on the function manually.
}
\examples{
# \dontrun{
# flows = listOMLFlows()
# tail(flows)
# }
}
\seealso{
Other listing functions: \code{\link{chunkOMLlist}},
\code{\link{listOMLDataSetQualities}},
\code{\link{listOMLDataSets}},
\code{\link{listOMLEstimationProcedures}},
\code{\link{listOMLEvaluationMeasures}},
\code{\link{listOMLRuns}}, \code{\link{listOMLSetup}},
\code{\link{listOMLStudies}},
\code{\link{listOMLTaskTypes}},
\code{\link{listOMLTasks}}
Other flow-related functions: \code{\link{convertOMLFlowToMlr}},
\code{\link{deleteOMLObject}}, \code{\link{getOMLFlow}},
\code{\link{makeOMLFlowParameter}},
\code{\link{makeOMLFlow}}, \code{\link{tagOMLObject}}
}
\concept{flow-related functions}
\concept{listing functions}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sample-covariates.R
\name{summary.eav_covariates}
\alias{summary.eav_covariates}
\title{Provides a summary table of sample covariates.}
\usage{
\method{summary}{eav_covariates}(object, expanded = FALSE, droplevels = TRUE, ...)
}
\arguments{
\item{object}{A sample covariate table, the likes returned from
\code{\link[=fetch_sample_covariates]{fetch_sample_covariates()}}.}
\item{expanded}{includes details (rows) for each covariate per level
(or quantile), depending on the covariates \code{"class"} attribute.}
}
\value{
a tibble of summary sample covariate information with the following
columns:
\itemize{
\item \code{variable}: name of the variable
\item \code{class}: class of variable (real, categorical)
\item \code{nsamples}: the number of samples that have this variable defined
\item \code{level}: the level (or quantile) of the covariate
(included only when \code{expanded == TRUE})
\item \code{ninlevel}: the number of samples with this covariate value
(included only when \code{expanded == TRUE})
}
}
\description{
Sumamrizes a set of sample covariates (returned from
\code{\link[=fetch_sample_covariates]{fetch_sample_covariates()}} at different granulaities.
}
\examples{
fds <- exampleFacileDataSet()
covs <- fetch_sample_covariates(fds)
smry <- summary(covs)
details <- summary(covs, expanded = TRUE)
catdeetz <- covs \%>\%
filter(class == "categorical") \%>\%
summary(expanded = TRUE)
}
|
/man/summary.eav_covariates.Rd
|
permissive
|
jonocarroll/FacileData
|
R
| false
| true
| 1,488
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sample-covariates.R
\name{summary.eav_covariates}
\alias{summary.eav_covariates}
\title{Provides a summary table of sample covariates.}
\usage{
\method{summary}{eav_covariates}(object, expanded = FALSE, droplevels = TRUE, ...)
}
\arguments{
\item{object}{A sample covariate table, the likes returned from
\code{\link[=fetch_sample_covariates]{fetch_sample_covariates()}}.}
\item{expanded}{includes details (rows) for each covariate per level
(or quantile), depending on the covariates \code{"class"} attribute.}
}
\value{
a tibble of summary sample covariate information with the following
columns:
\itemize{
\item \code{variable}: name of the variable
\item \code{class}: class of variable (real, categorical)
\item \code{nsamples}: the number of samples that have this variable defined
\item \code{level}: the level (or quantile) of the covariate
(included only when \code{expanded == TRUE})
\item \code{ninlevel}: the number of samples with this covariate value
(included only when \code{expanded == TRUE})
}
}
\description{
Sumamrizes a set of sample covariates (returned from
\code{\link[=fetch_sample_covariates]{fetch_sample_covariates()}} at different granulaities.
}
\examples{
fds <- exampleFacileDataSet()
covs <- fetch_sample_covariates(fds)
smry <- summary(covs)
details <- summary(covs, expanded = TRUE)
catdeetz <- covs \%>\%
filter(class == "categorical") \%>\%
summary(expanded = TRUE)
}
|
# Load data
tree_lines <- do.call(rbind, strsplit(readLines("./3/input.txt"), ""))
# Part 1
## Clunky solution but it works
get_path <- function(trees, x, y){
its <- (nrow(trees)-1) %/% y
width <- ((its*x) %/% ncol(trees))+1
trees <- matrix(trees, ncol = ncol(trees)*width, nrow = nrow(trees))
mat <- cbind(seq(x, x*its, by = x)+1,
seq(y, y*its, by = y)+1)
path <- apply(mat, 1, function(mat) `[`(trees, mat[2], mat[1]))
return(sum(path == "#"))
}
get_path(tree_lines, 3, 1)
# Part 2
## At least it applies nicely
patterns <- data.frame(x = c(1,3,5,7,1),
y = c(1,1,1,1,2))
prod(apply(patterns, 1, function(x){ get_path(tree_lines, x["x"], x["y"]) }))
|
/3/day_3.R
|
no_license
|
clanfear/advent_2020
|
R
| false
| false
| 711
|
r
|
# Load data
tree_lines <- do.call(rbind, strsplit(readLines("./3/input.txt"), ""))
# Part 1
## Clunky solution but it works
get_path <- function(trees, x, y){
its <- (nrow(trees)-1) %/% y
width <- ((its*x) %/% ncol(trees))+1
trees <- matrix(trees, ncol = ncol(trees)*width, nrow = nrow(trees))
mat <- cbind(seq(x, x*its, by = x)+1,
seq(y, y*its, by = y)+1)
path <- apply(mat, 1, function(mat) `[`(trees, mat[2], mat[1]))
return(sum(path == "#"))
}
get_path(tree_lines, 3, 1)
# Part 2
## At least it applies nicely
patterns <- data.frame(x = c(1,3,5,7,1),
y = c(1,1,1,1,2))
prod(apply(patterns, 1, function(x){ get_path(tree_lines, x["x"], x["y"]) }))
|
## This dataset is publicly available. I extracted it from the 'hake'
## dataset from the 'rcsurplus' package
oldwd <- getwd()
setwd("data-raw")
hake <- read.csv("hake.csv")
str(hake)
hake$cpue <- exp(hake$log_cpue)
all.equal(hake$catch/hake$effort, hake$cpue)
## Keep only CPUE
hake <- hake[, -4]
str(hake)
save(hake, file = "../data/hake.rda")
setwd(oldwd)
|
/data-raw/hake.R
|
no_license
|
fernandomayer/dmfs
|
R
| false
| false
| 359
|
r
|
## This dataset is publicly available. I extracted it from the 'hake'
## dataset from the 'rcsurplus' package
oldwd <- getwd()
setwd("data-raw")
hake <- read.csv("hake.csv")
str(hake)
hake$cpue <- exp(hake$log_cpue)
all.equal(hake$catch/hake$effort, hake$cpue)
## Keep only CPUE
hake <- hake[, -4]
str(hake)
save(hake, file = "../data/hake.rda")
setwd(oldwd)
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
## makeCacheMatrix takes in a matrix and gives out a list of four functions.
## set function sets the variables of parent environment
## get function returns the matrix passed into makeCacheMatrix
## setinv sets the variable m in parent env
## getinv gets the variable m in parent env
## makeCacheMatrix uses the variable in parent environment to cache
## the inverse of matrix.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
#variable m in parent environment is assigned to the calculated inverse of matrix
setinv <- function(inv) m <<- inv
#variable m in parent environment is returned by getinv
getinv <- function() m
#makeinv returns list of four functions
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## Write a short comment describing this function
## cacheSolve calculates the inverse of a matrix and stores the value, if there
## is no cached value. It returns the cached value when there is cached value
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinv()
#check if the inverse is available in m of parent env.
if(!is.null(m)) {
#if m of parent env is cached with the matrix inverse
message("getting cached data")
#return the cached value of m
return(m)
}
data <- x$get()
#ginv is used to inverse matrix of n rows and mcolumns
#solve function is restricted to inversing a square matrix
#library(MASS) needs to be run for ginv function
m <- ginv(data)
x$setinv(m)
m
}
|
/cachematrix.R
|
no_license
|
manasac/ProgrammingAssignment2
|
R
| false
| false
| 1,838
|
r
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
## makeCacheMatrix takes in a matrix and gives out a list of four functions.
## set function sets the variables of parent environment
## get function returns the matrix passed into makeCacheMatrix
## setinv sets the variable m in parent env
## getinv gets the variable m in parent env
## makeCacheMatrix uses the variable in parent environment to cache
## the inverse of matrix.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
#variable m in parent environment is assigned to the calculated inverse of matrix
setinv <- function(inv) m <<- inv
#variable m in parent environment is returned by getinv
getinv <- function() m
#makeinv returns list of four functions
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## Write a short comment describing this function
## cacheSolve calculates the inverse of a matrix and stores the value, if there
## is no cached value. It returns the cached value when there is cached value
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinv()
#check if the inverse is available in m of parent env.
if(!is.null(m)) {
#if m of parent env is cached with the matrix inverse
message("getting cached data")
#return the cached value of m
return(m)
}
data <- x$get()
#ginv is used to inverse matrix of n rows and mcolumns
#solve function is restricted to inversing a square matrix
#library(MASS) needs to be run for ginv function
m <- ginv(data)
x$setinv(m)
m
}
|
`colkd.temp.abs` <-
function () { colkd.temp.abs.v2() }
|
/src/colkd.temp.abs.R
|
no_license
|
jonasbhend/re-calibration
|
R
| false
| false
| 56
|
r
|
`colkd.temp.abs` <-
function () { colkd.temp.abs.v2() }
|
# load the train and test files into memory
testd <- read.table("UCI/test/X_test.txt", header=F,sep="")
train <- read.table("UCI/train/X_train.txt", header=F,sep="")
# now load the header
features <- read.table("UCI/features.txt", header=F,sep="")
# we only want the mean and std columns
f_mean_and_std_only <- features[grep("mean\\(\\)|std\\(\\)",features[, "V2"]),]
# set column names of meta data
colnames(f_mean_and_std_only) <- c("idx", "colName")
#2. Extracts only the measurements on the mean and standard deviation for each measurement.
testd_m_and_s_only <- testd[,f_mean_and_std_only[,"idx"]]
train_m_and_s_only <- train[,f_mean_and_std_only[,"idx"]]
#4. Appropriately labels the data set with descriptive variable names.
colnames(testd_m_and_s_only) <- f_mean_and_std_only[, "colName"]
colnames(train_m_and_s_only) <- f_mean_and_std_only[, "colName"]
# read subject and merge subject into dataframe
testSubj <- read.table("UCI/test/subject_test.txt", header=F,sep="")
testd_m_and_s_only$SubjectID <- testSubj[, "V1"]
trainSubj <- read.table("UCI/train/subject_train.txt", header=F,sep="")
train_m_and_s_only$SubjectID <- trainSubj[, "V1"]
# read activities and merge activities into dataframe
testActv <- read.table("UCI/test/y_test.txt", header=F,sep="")
testd_m_and_s_only$ActivityKey <- testActv[, "V1"]
trainActv <- read.table("UCI/train/y_train.txt", header=F,sep="")
train_m_and_s_only$ActivityKey <- trainActv[, "V1"]
#1. Merges the training and the test sets to create one data set.
mergedData <- rbind(testd_m_and_s_only, train_m_and_s_only)
#4. Appropriately labels the data set with descriptive variable names.
actMap <- read.table("UCI/activity_labels.txt", header=F,sep="")
colnames(actMap) <- c("ActivityKey", "ActivityDescription")
mergedData <- merge(mergedData, actMap, by.x="ActivityKey", by.y="ActivityKey", sort=FALSE)
#5. From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
meanBySubjectAndActivity <- aggregate(mergedData[, 2:67], list(mergedData$SubjectID, mergedData$ActivityDescription), mean)
colnames(meanBySubjectAndActivity)[colnames(meanBySubjectAndActivity)=="Group.1"] <- "SubjectID"
colnames(meanBySubjectAndActivity)[colnames(meanBySubjectAndActivity)=="Group.2"] <- "ActivityDescription"
# write the data down to a file for submission
write.table(meanBySubjectAndActivity, file="meanBySubjectAndActivity.txt", row.name=FALSE)
|
/run_analysis.R
|
no_license
|
davidtam/getData
|
R
| false
| false
| 2,478
|
r
|
# load the train and test files into memory
testd <- read.table("UCI/test/X_test.txt", header=F,sep="")
train <- read.table("UCI/train/X_train.txt", header=F,sep="")
# now load the header
features <- read.table("UCI/features.txt", header=F,sep="")
# we only want the mean and std columns
f_mean_and_std_only <- features[grep("mean\\(\\)|std\\(\\)",features[, "V2"]),]
# set column names of meta data
colnames(f_mean_and_std_only) <- c("idx", "colName")
#2. Extracts only the measurements on the mean and standard deviation for each measurement.
testd_m_and_s_only <- testd[,f_mean_and_std_only[,"idx"]]
train_m_and_s_only <- train[,f_mean_and_std_only[,"idx"]]
#4. Appropriately labels the data set with descriptive variable names.
colnames(testd_m_and_s_only) <- f_mean_and_std_only[, "colName"]
colnames(train_m_and_s_only) <- f_mean_and_std_only[, "colName"]
# read subject and merge subject into dataframe
testSubj <- read.table("UCI/test/subject_test.txt", header=F,sep="")
testd_m_and_s_only$SubjectID <- testSubj[, "V1"]
trainSubj <- read.table("UCI/train/subject_train.txt", header=F,sep="")
train_m_and_s_only$SubjectID <- trainSubj[, "V1"]
# read activities and merge activities into dataframe
testActv <- read.table("UCI/test/y_test.txt", header=F,sep="")
testd_m_and_s_only$ActivityKey <- testActv[, "V1"]
trainActv <- read.table("UCI/train/y_train.txt", header=F,sep="")
train_m_and_s_only$ActivityKey <- trainActv[, "V1"]
#1. Merges the training and the test sets to create one data set.
mergedData <- rbind(testd_m_and_s_only, train_m_and_s_only)
#4. Appropriately labels the data set with descriptive variable names.
actMap <- read.table("UCI/activity_labels.txt", header=F,sep="")
colnames(actMap) <- c("ActivityKey", "ActivityDescription")
mergedData <- merge(mergedData, actMap, by.x="ActivityKey", by.y="ActivityKey", sort=FALSE)
#5. From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
meanBySubjectAndActivity <- aggregate(mergedData[, 2:67], list(mergedData$SubjectID, mergedData$ActivityDescription), mean)
colnames(meanBySubjectAndActivity)[colnames(meanBySubjectAndActivity)=="Group.1"] <- "SubjectID"
colnames(meanBySubjectAndActivity)[colnames(meanBySubjectAndActivity)=="Group.2"] <- "ActivityDescription"
# write the data down to a file for submission
write.table(meanBySubjectAndActivity, file="meanBySubjectAndActivity.txt", row.name=FALSE)
|
\name{utility.aggregate.mix}
\alias{utility.aggregate.mix}
\title{Mixed aggregation of values and utilities}
\description{
Function to perform a mixed aggregation of values and utilities.
The mixture consists of a weighted mean of the additive, minimum
and geometric aggregation techniques.
}
\usage{
utility.aggregate.mix(u, par)
}
\arguments{
\item{u}{
numeric vector of values or utilities to be aggregated.
}
\item{par}{
numeric vector of weights for calculating the weighted mean of the
values provided in the argument \code{u} followed by the three
weights of the additive, minimum and geometric aggregation
techniques.
The weights need not be normalized, they will be normalized before use.
In case of missing values in the vector \code{u}, the weights of the
non-missing components will be rescaled to sum to unity.
}
}
\value{
The function returns the aggregated value or utility.
}
\details{
The aggregation function is a mixture of the functions
\code{\link{utility.aggregate.add}},
\code{\link{utility.aggregate.min}}, and
\code{\link{utility.aggregate.geo}}.
The following figure shows examples of the behaviour of this aggregation function for the two-dimensional case:\cr
\if{html}{\figure{aggregationmix.png}{options: width=80\%}}
\if{latex}{\figure{aggregationmix.pdf}{options: width=5in}}
}
\references{
Short description of the package: \cr\cr
Reichert, P., Schuwirth, N. and Langhans, S.,
Constructing, evaluating and visualizing value and utility functions for decision support, Environmental Modelling & Software 46, 283-291, 2013. \cr\cr
Textbooks on the use of utility and value functions in decision analysis: \cr\cr
Keeney, R. L. and Raiffa, H. Decisions with Multiple Objectives - Preferences and Value Tradeoffs. John Wiley & Sons, 1976. \cr\cr
Eisenfuehr, F., Weber, M. and Langer, T., Rational Decision Making, Springer, Berlin, 2010.
}
\author{
Peter Reichert <peter.reichert@eawag.ch>
}
\seealso{
Constructor of aggregation node: \cr\cr
\code{\link{utility.aggregation.create}} \cr\cr
Aggregation techniques provided by uncsim: \cr\cr
\code{\link{utility.aggregate.add}} for additive aggregation (weighted arithmetic mean), \cr
\code{\link{utility.aggregate.min}} for minimum aggregation, \cr
\code{\link{utility.aggregate.max}} for maximum aggregation, \cr
\code{\link{utility.aggregate.geo}} or \code{\link{utility.aggregate.cobbdouglas}} for geometric or Cobb-Douglas aggregation (weighted geometric mean), \cr
\code{\link{utility.aggregate.geooff}} for geometric aggregation with offset, \cr
\code{\link{utility.aggregate.revgeo}} for reverse geometric aggregation, \cr
\code{\link{utility.aggregate.revgeooff}} for reverse geometric aggregation with offset, \cr
\code{\link{utility.aggregate.harmo}} for harmonic aggregation (weighted harmonic mean), \cr
\code{\link{utility.aggregate.harmooff}} for harmonic aggregation with offset, \cr
\code{\link{utility.aggregate.revharmo}} for reverse harmonic aggregation, \cr
\code{\link{utility.aggregate.revharmooff}} for reverse harmonic aggregation with offset, \cr
\code{\link{utility.aggregate.mult}} for multiplicative aggregation, \cr
\code{\link{utility.aggregate.mix}} for a mixture of additive, minimum, and geometric aggregation, \cr
\code{\link{utility.aggregate.addmin}} for a mixture of additive and minimum aggregation. \cr
\code{\link{utility.aggregate.addpower}} for additive power aggregation (weighted power mean), \cr
\code{\link{utility.aggregate.revaddpower}} for reverse additive power aggregation, \cr
\code{\link{utility.aggregate.addsplitpower}} for splitted additive power aggregation, \cr
\code{\link{utility.aggregate.revaddsplitpower}} for reverse splitted additive power aggregation, \cr
\code{\link{utility.aggregate.bonusmalus}} for an aggregation technique that considers some of the values or utilities of sub-objectives only as bonus or malus. \cr
}
\examples{
utility.aggregate.mix(c(0.2,0.8),par=c(1,1 , 1,0,0))
utility.aggregate.mix(c(0.2,0.8),par=c(1,1 , 0,1,0))
utility.aggregate.mix(c(0.2,0.8),par=c(1,1 , 0,0,1))
utility.aggregate.mix(c(0.2,0.8),par=c(1,1 , 1,1,1))
}
|
/man/utility.aggregate.mix.Rd
|
no_license
|
cran/utility
|
R
| false
| false
| 4,200
|
rd
|
\name{utility.aggregate.mix}
\alias{utility.aggregate.mix}
\title{Mixed aggregation of values and utilities}
\description{
Function to perform a mixed aggregation of values and utilities.
The mixture consists of a weighted mean of the additive, minimum
and geometric aggregation techniques.
}
\usage{
utility.aggregate.mix(u, par)
}
\arguments{
\item{u}{
numeric vector of values or utilities to be aggregated.
}
\item{par}{
numeric vector of weights for calculating the weighted mean of the
values provided in the argument \code{u} followed by the three
weights of the additive, minimum and geometric aggregation
techniques.
The weights need not be normalized, they will be normalized before use.
In case of missing values in the vector \code{u}, the weights of the
non-missing components will be rescaled to sum to unity.
}
}
\value{
The function returns the aggregated value or utility.
}
\details{
The aggregation function is a mixture of the functions
\code{\link{utility.aggregate.add}},
\code{\link{utility.aggregate.min}}, and
\code{\link{utility.aggregate.geo}}.
The following figure shows examples of the behaviour of this aggregation function for the two-dimensional case:\cr
\if{html}{\figure{aggregationmix.png}{options: width=80\%}}
\if{latex}{\figure{aggregationmix.pdf}{options: width=5in}}
}
\references{
Short description of the package: \cr\cr
Reichert, P., Schuwirth, N. and Langhans, S.,
Constructing, evaluating and visualizing value and utility functions for decision support, Environmental Modelling & Software 46, 283-291, 2013. \cr\cr
Textbooks on the use of utility and value functions in decision analysis: \cr\cr
Keeney, R. L. and Raiffa, H. Decisions with Multiple Objectives - Preferences and Value Tradeoffs. John Wiley & Sons, 1976. \cr\cr
Eisenfuehr, F., Weber, M. and Langer, T., Rational Decision Making, Springer, Berlin, 2010.
}
\author{
Peter Reichert <peter.reichert@eawag.ch>
}
\seealso{
Constructor of aggregation node: \cr\cr
\code{\link{utility.aggregation.create}} \cr\cr
Aggregation techniques provided by uncsim: \cr\cr
\code{\link{utility.aggregate.add}} for additive aggregation (weighted arithmetic mean), \cr
\code{\link{utility.aggregate.min}} for minimum aggregation, \cr
\code{\link{utility.aggregate.max}} for maximum aggregation, \cr
\code{\link{utility.aggregate.geo}} or \code{\link{utility.aggregate.cobbdouglas}} for geometric or Cobb-Douglas aggregation (weighted geometric mean), \cr
\code{\link{utility.aggregate.geooff}} for geometric aggregation with offset, \cr
\code{\link{utility.aggregate.revgeo}} for reverse geometric aggregation, \cr
\code{\link{utility.aggregate.revgeooff}} for reverse geometric aggregation with offset, \cr
\code{\link{utility.aggregate.harmo}} for harmonic aggregation (weighted harmonic mean), \cr
\code{\link{utility.aggregate.harmooff}} for harmonic aggregation with offset, \cr
\code{\link{utility.aggregate.revharmo}} for reverse harmonic aggregation, \cr
\code{\link{utility.aggregate.revharmooff}} for reverse harmonic aggregation with offset, \cr
\code{\link{utility.aggregate.mult}} for multiplicative aggregation, \cr
\code{\link{utility.aggregate.mix}} for a mixture of additive, minimum, and geometric aggregation, \cr
\code{\link{utility.aggregate.addmin}} for a mixture of additive and minimum aggregation. \cr
\code{\link{utility.aggregate.addpower}} for additive power aggregation (weighted power mean), \cr
\code{\link{utility.aggregate.revaddpower}} for reverse additive power aggregation, \cr
\code{\link{utility.aggregate.addsplitpower}} for splitted additive power aggregation, \cr
\code{\link{utility.aggregate.revaddsplitpower}} for reverse splitted additive power aggregation, \cr
\code{\link{utility.aggregate.bonusmalus}} for an aggregation technique that considers some of the values or utilities of sub-objectives only as bonus or malus. \cr
}
\examples{
utility.aggregate.mix(c(0.2,0.8),par=c(1,1 , 1,0,0))
utility.aggregate.mix(c(0.2,0.8),par=c(1,1 , 0,1,0))
utility.aggregate.mix(c(0.2,0.8),par=c(1,1 , 0,0,1))
utility.aggregate.mix(c(0.2,0.8),par=c(1,1 , 1,1,1))
}
|
context("type bioc")
test_that("parse_remote", {
pr <- parse_remotes("bioc::Biobase")[[1]]
expect_equal(pr$package, "Biobase")
expect_equal(pr$atleast, "")
expect_equal(pr$version, "")
expect_equal(pr$ref, "bioc::Biobase")
expect_equal(pr$type, "bioc")
expect_true("remote_ref_bioc" %in% class(pr))
expect_true("remote_ref" %in% class(pr))
})
test_that("resolve_remote", {
skip_if_offline()
skip_on_cran()
conf <- remotes_default_config()
cache <- list(package = NULL, metadata = global_metadata_cache)
res <- synchronise(
resolve_remote_bioc(parse_remotes("bioc::Biobase")[[1]], TRUE, conf,
cache, dependencies = FALSE)
)
expect_true(is_tibble(res))
expect_true(all(res$ref == "bioc::Biobase"))
expect_true(all(res$type == "bioc"))
expect_true(all(res$direct))
expect_true(all(res$status == "OK"))
expect_true(all(res$package == "Biobase"))
expect_true(all(vcapply(res$metadata, "[[", "RemoteType")== "bioc"))
expect_true(all(vcapply(res$metadata, "[[", "RemoteRef") == "bioc::Biobase"))
expect_true(all(vcapply(res$metadata, "[[", "RemoteSha") == res$version))
expect_true(all(vcapply(res$metadata, "[[", "RemoteRepos") == res$mirror))
})
test_that("failed resolution", {
skip_if_offline()
skip_on_cran()
conf <- remotes_default_config()
cache <- list(package = NULL, metadata = global_metadata_cache)
ref <- paste0("bioc::", basename(tempfile()))
res <- synchronise(
resolve_remote_bioc(parse_remotes(ref)[[1]], TRUE, conf,
cache, dependencies = FALSE)
)
expect_true(all(res$status == "FAILED"))
## Existing package, non-existing version
skip("TODO")
r <- remotes$new(
"bioc::Biobase@0.0", config = list(cache_dir = tmp))
withr::with_options(
c(pkg.show_progress = FALSE),
expect_error(r$resolve(), NA))
res <- r$get_resolution()
expect_true(all(res$data$status == "FAILED"))
})
test_that("download_remote", {
skip_if_offline()
skip_on_cran()
dir.create(tmp <- tempfile())
dir.create(tmp2 <- tempfile())
on.exit(unlink(c(tmp, tmp2), recursive = TRUE), add = TRUE)
conf <- remotes_default_config()
conf$platforms <- "macos"
conf$cache_dir <- tmp
conf$package_cache_dir <- tmp2
cache <- list(
package = package_cache$new(conf$package_cache_dir),
metadata = global_metadata_cache)
res <- synchronise(
resolve_remote_bioc(parse_remotes("bioc::Biobase")[[1]], TRUE, conf, cache,
dependencies = FALSE))
target <- file.path(conf$cache_dir, res$target[1])
dl <- synchronise(
download_remote_bioc(res[1,], target, conf, cache, on_progress = NULL))
expect_equal(dl, "Got")
expect_true(file.exists(target))
unlink(target)
dl2 <- synchronise(
download_remote_bioc(res[1,], target, conf, cache, on_progress = NULL))
expect_true(dl2 %in% c("Had", "Current"))
expect_true(file.exists(target))
})
test_that("satisfies_remote", {
res <- make_fake_resolution(`bioc::eisa@>=1.0.0` = list())
## GitHub type is never good
bad1 <- make_fake_resolution(`github::r-lib/eisa` = list())
expect_false(ans <- satisfy_remote_bioc(res, bad1))
expect_match(attr(ans, "reason"), "Type must be")
## Missing DESCRIPTION for installed type
bad2 <- make_fake_resolution(`installed::foobar` = list())
expect_false(ans <- satisfy_remote_bioc(res, bad2))
expect_match(attr(ans, "reason"), "not from BioC")
## installed, but not from BioC
fake_desc <- desc::desc("!new")
bad3 <- make_fake_resolution(`installed::foobar` = list(
extra = list(list(description = fake_desc))))
expect_false(ans <- satisfy_remote_bioc(res, bad3))
expect_match(attr(ans, "reason"), "not from BioC")
## BioC type, but package name does not match
bad4 <- make_fake_resolution(`bioc::eisa2` = list())
expect_false(ans <- satisfy_remote_bioc(res, bad4))
expect_match(attr(ans, "reason"), "names differ")
## installed type, but package name does not match
fake_desc <- desc::desc("!new")
fake_desc$set(biocViews = "foobar")
bad5 <- make_fake_resolution(`installed::foobar` = list(
package = "eisa2",
extra = list(list(description = fake_desc))))
expect_false(ans <- satisfy_remote_bioc(res, bad5))
expect_match(attr(ans, "reason"), "names differ")
## BioC type, but version is not good enough
bad6 <- make_fake_resolution(`bioc::eisa` = list(version = "0.0.1"))
expect_false(ans <- satisfy_remote_bioc(res, bad6))
expect_match(attr(ans, "reason"), "Insufficient version")
## Same version, BioC
ok1 <- make_fake_resolution(`bioc::eisa` = list())
expect_true(satisfy_remote_bioc(res, ok1))
## Newer version, BioC
ok2 <- make_fake_resolution(`bioc::eisa` = list(version = "2.0.0"))
expect_true(satisfy_remote_bioc(res, ok2))
## Same version, installed
fake_desc <- desc::desc("!new")
fake_desc$set(biocViews ="BioC")
ok3 <- make_fake_resolution(`installed::foobar` = list(
package = "eisa",
extra = list(list(description = fake_desc))))
expect_true(satisfy_remote_bioc(res, ok3))
## Newer version, installed
fake_desc <- desc::desc("!new")
fake_desc$set(biocViews = "foobar")
ok4 <- make_fake_resolution(`installed::foobar` = list(
package = "eisa",
version = "2.0.0",
extra = list(list(description = fake_desc))))
expect_true(satisfy_remote_bioc(res, ok4))
})
|
/tests/testthat/test-type-bioc.R
|
permissive
|
dpastoor/pkgdepends
|
R
| false
| false
| 5,377
|
r
|
context("type bioc")
test_that("parse_remote", {
pr <- parse_remotes("bioc::Biobase")[[1]]
expect_equal(pr$package, "Biobase")
expect_equal(pr$atleast, "")
expect_equal(pr$version, "")
expect_equal(pr$ref, "bioc::Biobase")
expect_equal(pr$type, "bioc")
expect_true("remote_ref_bioc" %in% class(pr))
expect_true("remote_ref" %in% class(pr))
})
test_that("resolve_remote", {
skip_if_offline()
skip_on_cran()
conf <- remotes_default_config()
cache <- list(package = NULL, metadata = global_metadata_cache)
res <- synchronise(
resolve_remote_bioc(parse_remotes("bioc::Biobase")[[1]], TRUE, conf,
cache, dependencies = FALSE)
)
expect_true(is_tibble(res))
expect_true(all(res$ref == "bioc::Biobase"))
expect_true(all(res$type == "bioc"))
expect_true(all(res$direct))
expect_true(all(res$status == "OK"))
expect_true(all(res$package == "Biobase"))
expect_true(all(vcapply(res$metadata, "[[", "RemoteType")== "bioc"))
expect_true(all(vcapply(res$metadata, "[[", "RemoteRef") == "bioc::Biobase"))
expect_true(all(vcapply(res$metadata, "[[", "RemoteSha") == res$version))
expect_true(all(vcapply(res$metadata, "[[", "RemoteRepos") == res$mirror))
})
test_that("failed resolution", {
skip_if_offline()
skip_on_cran()
conf <- remotes_default_config()
cache <- list(package = NULL, metadata = global_metadata_cache)
ref <- paste0("bioc::", basename(tempfile()))
res <- synchronise(
resolve_remote_bioc(parse_remotes(ref)[[1]], TRUE, conf,
cache, dependencies = FALSE)
)
expect_true(all(res$status == "FAILED"))
## Existing package, non-existing version
skip("TODO")
r <- remotes$new(
"bioc::Biobase@0.0", config = list(cache_dir = tmp))
withr::with_options(
c(pkg.show_progress = FALSE),
expect_error(r$resolve(), NA))
res <- r$get_resolution()
expect_true(all(res$data$status == "FAILED"))
})
test_that("download_remote", {
skip_if_offline()
skip_on_cran()
dir.create(tmp <- tempfile())
dir.create(tmp2 <- tempfile())
on.exit(unlink(c(tmp, tmp2), recursive = TRUE), add = TRUE)
conf <- remotes_default_config()
conf$platforms <- "macos"
conf$cache_dir <- tmp
conf$package_cache_dir <- tmp2
cache <- list(
package = package_cache$new(conf$package_cache_dir),
metadata = global_metadata_cache)
res <- synchronise(
resolve_remote_bioc(parse_remotes("bioc::Biobase")[[1]], TRUE, conf, cache,
dependencies = FALSE))
target <- file.path(conf$cache_dir, res$target[1])
dl <- synchronise(
download_remote_bioc(res[1,], target, conf, cache, on_progress = NULL))
expect_equal(dl, "Got")
expect_true(file.exists(target))
unlink(target)
dl2 <- synchronise(
download_remote_bioc(res[1,], target, conf, cache, on_progress = NULL))
expect_true(dl2 %in% c("Had", "Current"))
expect_true(file.exists(target))
})
test_that("satisfies_remote", {
res <- make_fake_resolution(`bioc::eisa@>=1.0.0` = list())
## GitHub type is never good
bad1 <- make_fake_resolution(`github::r-lib/eisa` = list())
expect_false(ans <- satisfy_remote_bioc(res, bad1))
expect_match(attr(ans, "reason"), "Type must be")
## Missing DESCRIPTION for installed type
bad2 <- make_fake_resolution(`installed::foobar` = list())
expect_false(ans <- satisfy_remote_bioc(res, bad2))
expect_match(attr(ans, "reason"), "not from BioC")
## installed, but not from BioC
fake_desc <- desc::desc("!new")
bad3 <- make_fake_resolution(`installed::foobar` = list(
extra = list(list(description = fake_desc))))
expect_false(ans <- satisfy_remote_bioc(res, bad3))
expect_match(attr(ans, "reason"), "not from BioC")
## BioC type, but package name does not match
bad4 <- make_fake_resolution(`bioc::eisa2` = list())
expect_false(ans <- satisfy_remote_bioc(res, bad4))
expect_match(attr(ans, "reason"), "names differ")
## installed type, but package name does not match
fake_desc <- desc::desc("!new")
fake_desc$set(biocViews = "foobar")
bad5 <- make_fake_resolution(`installed::foobar` = list(
package = "eisa2",
extra = list(list(description = fake_desc))))
expect_false(ans <- satisfy_remote_bioc(res, bad5))
expect_match(attr(ans, "reason"), "names differ")
## BioC type, but version is not good enough
bad6 <- make_fake_resolution(`bioc::eisa` = list(version = "0.0.1"))
expect_false(ans <- satisfy_remote_bioc(res, bad6))
expect_match(attr(ans, "reason"), "Insufficient version")
## Same version, BioC
ok1 <- make_fake_resolution(`bioc::eisa` = list())
expect_true(satisfy_remote_bioc(res, ok1))
## Newer version, BioC
ok2 <- make_fake_resolution(`bioc::eisa` = list(version = "2.0.0"))
expect_true(satisfy_remote_bioc(res, ok2))
## Same version, installed
fake_desc <- desc::desc("!new")
fake_desc$set(biocViews ="BioC")
ok3 <- make_fake_resolution(`installed::foobar` = list(
package = "eisa",
extra = list(list(description = fake_desc))))
expect_true(satisfy_remote_bioc(res, ok3))
## Newer version, installed
fake_desc <- desc::desc("!new")
fake_desc$set(biocViews = "foobar")
ok4 <- make_fake_resolution(`installed::foobar` = list(
package = "eisa",
version = "2.0.0",
extra = list(list(description = fake_desc))))
expect_true(satisfy_remote_bioc(res, ok4))
})
|
install.packages("data.table")
install.packages("magrittr")
install.packages("corrplot")
install.packages("psych")
install.packages("dplyr")
install.packages("plyr")
library(corrplot)
library(data.table)
library(magrittr)
library(dplyr)
library(psych)
library(plyr)
df_total_alta = read.csv(file = './src/alta-regiao-2-alta - Página1.csv')
df_total_baixa = read.csv(file = './src/alta-regiao-2-baixa - Página1.csv')
# Utils
# =================================
# apply(dadosExemplo, MARGIN = 2, FUN = mean)
# colnames(df_total_alta)
# ====================================================================
# 2) Análise de correlação e regressão linear (3,0 pontos) Nas análises de correlação e regressão linear, você deve
# - b) Gerar modelos de regressão linear simples e múltipla para previsão da doença do café.
# Observações:
# 1) Fazer testes considerando a média mensal e a soma das variáveis independentes considerando 1 e 2 meses de antecedência.
# 2) Fazer testes considerando antecedência de:
# - 20 dias de antecedência (ex: doenca_cafe de abril, com os dados de março (11_20))
# - 30 dias de antecedência (ex: doenca_cafe de abril, com os dados de março (1_10))
# - 40 dias de antecedência (ex: doenca_cafe de abril, com os dados de fevereiro (21_30))
# - 50 dias de antecedência (ex: doenca_cafe de abril, com os dados de fevereiro (11_20))
# - 60 dias de antecedência (ex: doenca_cafe de abril, com os dados de fevereiro (1_10))
# - 70 dias de antecedência (ex: doenca_cafe de abril, com os dados de janeiro (21_30)
converte = function(x){
if(x < 20.1) return("baixo")
else if(x > 60) return("alto")
else return("medio")
}
for(i in 1:nrow(df_total_alta))
df_total_alta$alta_categoria[i] = converte(df_total_alta$alta[i])
for(i in 1:nrow(df_total_baixa))
df_total_baixa$alta_categoria[i] = converte(df_total_baixa$baixa[i])
df_total_alta = df_total_alta[,c(27,1,2,28,3:26)]
df_total_baixa = df_total_baixa[,c(27,1,2,28,3:26)]
df_total_alta[,5:28] <- lapply(df_total_alta[,5:28],function(x){
options(digits=5)
as.double(sub(",", ".", x, fixed = TRUE))
}
)
df_total_baixa[,5:28] <- lapply(df_total_baixa[,5:28],function(x){
options(digits=5)
as.double(sub(",", ".", x, fixed = TRUE))
}
)
df_total_alta$P = apply(df_total_alta[,6:8], 1, mean)
df_total_alta$UR = apply(df_total_alta[,9:11], 1, mean)
df_total_alta$TMAX = apply(df_total_alta[,12:14], 1, mean)
df_total_alta$TMIN = apply(df_total_alta[,15:17], 1, mean)
df_total_alta$NDR.1mm = apply(df_total_alta[,18:20], 1, mean)
df_total_alta$NDR.10mm = apply(df_total_alta[,21:23], 1, mean)
df_total_alta$NRH80 = apply(df_total_alta[,24:26], 1, mean)
df_total_alta$NRH90 = apply(df_total_alta[,27:29], 1, mean)
df_total_baixa$P = apply(df_total_baixa[,6:8], 1, mean)
df_total_baixa$UR = apply(df_total_baixa[,9:11], 1, mean)
df_total_baixa$TMAX = apply(df_total_baixa[,12:14], 1, mean)
df_total_baixa$TMIN = apply(df_total_baixa[,15:17], 1, mean)
df_total_baixa$NDR.1mm = apply(df_total_baixa[,18:20], 1, mean)
df_total_baixa$NDR.10mm = apply(df_total_baixa[,21:23], 1, mean)
df_total_baixa$NRH80 = apply(df_total_baixa[,24:26], 1, mean)
df_total_baixa$NRH90 = apply(df_total_baixa[,27:29], 1, mean)
df_total_sA = df_total_alta[,c(1:5,29:36)]
df_total_sB = df_total_baixa[,c(1:5,29:36)]
df_cor = df_total_sA[c(1:126),c(6:13)]
lista = c(df_total_sA$alta[c(2:127)])
df_cor$alta = lista
df_cor = df_cor[,c(9,1:8)]
names(df_cor)<-c("alta","P","UR","TMAX","TMIN","NDR.1mm","NDR.10mm","NRH.80","NRH.90")
modelo = lm(alta ~ P , data = df_cor)
abline(modelo, col="red")
plot(modelo)
modelo = lm(alta ~ UR, data = df_cor)
abline(modelo, col="red")
plot(modelo)
modelo = lm(alta ~ TMIN, data = df_cor)
abline(modelo, col="red")
plot(modelo)
modelo = lm(alta ~ NDR.1mm, data = df_cor)
abline(modelo, col="red")
plot(modelo)
modelo = lm(alta ~ NDR.10mm, data = df_cor)
abline(modelo, col="red")
plot(modelo)
modelo = lm(alta ~ NRH.80, data = df_cor)
abline(modelo, col="red")
plot(modelo)
modelo = lm(alta ~ NRH.90, data = df_cor)
abline(modelo, col="red")
plot(modelo)
modelo = lm(alta ~ TMAX, data = df_cor)
abline(modelo, col="red")
plot(modelo)
# Fazer testes considerando a média mensal e a soma das variáveis independentes considerando 1 e 2 meses de antecedência.
|
/exercicios/2-b.r
|
no_license
|
SamLucas/Trabalho-do-Ramon
|
R
| false
| false
| 4,319
|
r
|
install.packages("data.table")
install.packages("magrittr")
install.packages("corrplot")
install.packages("psych")
install.packages("dplyr")
install.packages("plyr")
library(corrplot)
library(data.table)
library(magrittr)
library(dplyr)
library(psych)
library(plyr)
df_total_alta = read.csv(file = './src/alta-regiao-2-alta - Página1.csv')
df_total_baixa = read.csv(file = './src/alta-regiao-2-baixa - Página1.csv')
# Utils
# =================================
# apply(dadosExemplo, MARGIN = 2, FUN = mean)
# colnames(df_total_alta)
# ====================================================================
# 2) Análise de correlação e regressão linear (3,0 pontos) Nas análises de correlação e regressão linear, você deve
# - b) Gerar modelos de regressão linear simples e múltipla para previsão da doença do café.
# Observações:
# 1) Fazer testes considerando a média mensal e a soma das variáveis independentes considerando 1 e 2 meses de antecedência.
# 2) Fazer testes considerando antecedência de:
# - 20 dias de antecedência (ex: doenca_cafe de abril, com os dados de março (11_20))
# - 30 dias de antecedência (ex: doenca_cafe de abril, com os dados de março (1_10))
# - 40 dias de antecedência (ex: doenca_cafe de abril, com os dados de fevereiro (21_30))
# - 50 dias de antecedência (ex: doenca_cafe de abril, com os dados de fevereiro (11_20))
# - 60 dias de antecedência (ex: doenca_cafe de abril, com os dados de fevereiro (1_10))
# - 70 dias de antecedência (ex: doenca_cafe de abril, com os dados de janeiro (21_30)
converte = function(x){
if(x < 20.1) return("baixo")
else if(x > 60) return("alto")
else return("medio")
}
for(i in 1:nrow(df_total_alta))
df_total_alta$alta_categoria[i] = converte(df_total_alta$alta[i])
for(i in 1:nrow(df_total_baixa))
df_total_baixa$alta_categoria[i] = converte(df_total_baixa$baixa[i])
df_total_alta = df_total_alta[,c(27,1,2,28,3:26)]
df_total_baixa = df_total_baixa[,c(27,1,2,28,3:26)]
df_total_alta[,5:28] <- lapply(df_total_alta[,5:28],function(x){
options(digits=5)
as.double(sub(",", ".", x, fixed = TRUE))
}
)
df_total_baixa[,5:28] <- lapply(df_total_baixa[,5:28],function(x){
options(digits=5)
as.double(sub(",", ".", x, fixed = TRUE))
}
)
df_total_alta$P = apply(df_total_alta[,6:8], 1, mean)
df_total_alta$UR = apply(df_total_alta[,9:11], 1, mean)
df_total_alta$TMAX = apply(df_total_alta[,12:14], 1, mean)
df_total_alta$TMIN = apply(df_total_alta[,15:17], 1, mean)
df_total_alta$NDR.1mm = apply(df_total_alta[,18:20], 1, mean)
df_total_alta$NDR.10mm = apply(df_total_alta[,21:23], 1, mean)
df_total_alta$NRH80 = apply(df_total_alta[,24:26], 1, mean)
df_total_alta$NRH90 = apply(df_total_alta[,27:29], 1, mean)
df_total_baixa$P = apply(df_total_baixa[,6:8], 1, mean)
df_total_baixa$UR = apply(df_total_baixa[,9:11], 1, mean)
df_total_baixa$TMAX = apply(df_total_baixa[,12:14], 1, mean)
df_total_baixa$TMIN = apply(df_total_baixa[,15:17], 1, mean)
df_total_baixa$NDR.1mm = apply(df_total_baixa[,18:20], 1, mean)
df_total_baixa$NDR.10mm = apply(df_total_baixa[,21:23], 1, mean)
df_total_baixa$NRH80 = apply(df_total_baixa[,24:26], 1, mean)
df_total_baixa$NRH90 = apply(df_total_baixa[,27:29], 1, mean)
df_total_sA = df_total_alta[,c(1:5,29:36)]
df_total_sB = df_total_baixa[,c(1:5,29:36)]
df_cor = df_total_sA[c(1:126),c(6:13)]
lista = c(df_total_sA$alta[c(2:127)])
df_cor$alta = lista
df_cor = df_cor[,c(9,1:8)]
names(df_cor)<-c("alta","P","UR","TMAX","TMIN","NDR.1mm","NDR.10mm","NRH.80","NRH.90")
modelo = lm(alta ~ P , data = df_cor)
abline(modelo, col="red")
plot(modelo)
modelo = lm(alta ~ UR, data = df_cor)
abline(modelo, col="red")
plot(modelo)
modelo = lm(alta ~ TMIN, data = df_cor)
abline(modelo, col="red")
plot(modelo)
modelo = lm(alta ~ NDR.1mm, data = df_cor)
abline(modelo, col="red")
plot(modelo)
modelo = lm(alta ~ NDR.10mm, data = df_cor)
abline(modelo, col="red")
plot(modelo)
modelo = lm(alta ~ NRH.80, data = df_cor)
abline(modelo, col="red")
plot(modelo)
modelo = lm(alta ~ NRH.90, data = df_cor)
abline(modelo, col="red")
plot(modelo)
modelo = lm(alta ~ TMAX, data = df_cor)
abline(modelo, col="red")
plot(modelo)
# Fazer testes considerando a média mensal e a soma das variáveis independentes considerando 1 e 2 meses de antecedência.
|
#' @param minMAF The minimum valid minor allele frequency (MAF). Large differences between the variances of two variables in the same model can cause optimization failures that invalidate the model. Further, very small minor allele frequencies are more affected by outliers or influential observations. Accordingly, users can specify the minimum allowable MAF. The default value is MAF > .01. This option only works when the SNP is entered into the model as an observed variable. If you wish to filter out small MAF alleles for all models, use other software programs such as PLINK.
|
/man-roxygen/args-minmaf.R
|
no_license
|
jpritikin/gwsem
|
R
| false
| false
| 583
|
r
|
#' @param minMAF The minimum valid minor allele frequency (MAF). Large differences between the variances of two variables in the same model can cause optimization failures that invalidate the model. Further, very small minor allele frequencies are more affected by outliers or influential observations. Accordingly, users can specify the minimum allowable MAF. The default value is MAF > .01. This option only works when the SNP is entered into the model as an observed variable. If you wish to filter out small MAF alleles for all models, use other software programs such as PLINK.
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/shiny.helpers.R
\name{get_run_ids}
\alias{get_run_ids}
\title{Get vector of run IDs for a given workflow ID}
\usage{
get_run_ids(bety, workflow_id)
}
\arguments{
\item{bety}{BETYdb connection, as opened by `betyConnect()`}
\item{workflow_id}{Workflow ID}
}
\description{
Get vector of run IDs for a given workflow ID
}
|
/visualization/man/get_run_ids.Rd
|
permissive
|
Viskari/pecan
|
R
| false
| true
| 399
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/shiny.helpers.R
\name{get_run_ids}
\alias{get_run_ids}
\title{Get vector of run IDs for a given workflow ID}
\usage{
get_run_ids(bety, workflow_id)
}
\arguments{
\item{bety}{BETYdb connection, as opened by `betyConnect()`}
\item{workflow_id}{Workflow ID}
}
\description{
Get vector of run IDs for a given workflow ID
}
|
library(glmnet)
mydata = read.table("./TrainingSet/LassoBIC/kidney.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.15,family="gaussian",standardize=TRUE)
sink('./Model/EN/Lasso/kidney/kidney_031.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
/Model/EN/Lasso/kidney/kidney_031.R
|
no_license
|
leon1003/QSMART
|
R
| false
| false
| 351
|
r
|
library(glmnet)
mydata = read.table("./TrainingSet/LassoBIC/kidney.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.15,family="gaussian",standardize=TRUE)
sink('./Model/EN/Lasso/kidney/kidney_031.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
## File Name: nedelsky.latresp.R
## File Version: 0.10
#---- latent responses for Nedelksy function
nedelsky.latresp <- function(K)
{
nodes <- c(0,1)
ndim <- K
combis <- as.matrix( expand.grid( as.data.frame( matrix( rep(nodes, ndim), ncol=ndim ))))
return(combis)
}
|
/sirt/R/nedelsky.latresp.R
|
no_license
|
akhikolla/TestedPackages-NoIssues
|
R
| false
| false
| 297
|
r
|
## File Name: nedelsky.latresp.R
## File Version: 0.10
#---- latent responses for Nedelksy function
nedelsky.latresp <- function(K)
{
nodes <- c(0,1)
ndim <- K
combis <- as.matrix( expand.grid( as.data.frame( matrix( rep(nodes, ndim), ncol=ndim ))))
return(combis)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CPT-POT-extract.R
\name{extract-cpt}
\alias{extract-cpt}
\alias{extractCPT}
\alias{extractCPT.data.frame}
\alias{extractCPT.table}
\alias{extractPOT}
\alias{extractPOT.data.frame}
\alias{extractPOT.table}
\title{Extract conditional probabilities and clique potentials from
data.}
\usage{
extractCPT(x, graph, smooth = 0)
extractPOT(x, graph, smooth = 0)
}
\arguments{
\item{x}{An array or a dataframe.}
\item{graph}{A graph represented as a graphNEL object. For
extractCPT, graph must be a DAG while for extractPOT, graph
must be undirected triangulated graph.}
\item{smooth}{See 'details' below.}
}
\value{
extractCPT: A list of conditional probability tables
extractPOT: A list of clique potentials.
}
\description{
Extract list of conditional probability tables and
list of clique potentials from data.
}
\details{
If \code{smooth} is non--zero then \code{smooth} is added
to all cell counts before normalization takes place.
}
\examples{
## Asia (chest clinic) example:
## Version 1) Specify conditional probability tables.
yn <- c("yes","no")
a <- cptable(~asia, values=c(1,99),levels=yn)
t.a <- cptable(~tub+asia, values=c(5,95,1,99),levels=yn)
s <- cptable(~smoke, values=c(5,5), levels=yn)
l.s <- cptable(~lung+smoke, values=c(1,9,1,99), levels=yn)
b.s <- cptable(~bronc+smoke, values=c(6,4,3,7), levels=yn)
e.lt <- cptable(~either+lung+tub,values=c(1,0,1,0,1,0,0,1),levels=yn)
x.e <- cptable(~xray+either, values=c(98,2,5,95), levels=yn)
d.be <- cptable(~dysp+bronc+either, values=c(9,1,7,3,8,2,1,9), levels=yn)
plist <- compileCPT(list(a, t.a, s, l.s, b.s, e.lt, x.e, d.be))
pn1 <- grain(plist)
q1 <- querygrain(pn1)
## Version 2) Specify DAG and data
data(chestSim100000, package="gRbase")
dgf <- ~asia + tub * asia + smoke + lung * smoke +
bronc * smoke + either * tub * lung +
xray * either + dysp * bronc * either
dg <- dag(dgf)
pp <- extractCPT(chestSim100000, dg)
cpp2 <- compileCPT(pp)
pn2 <- grain(cpp2)
q2 <- querygrain(pn2)
## Version 2) Specify triangulated undirected graph and data
ugf <- list(c("either", "lung", "tub"), c("either", "lung", "bronc"),
c("either", "xray"), c("either", "dysp", "bronc"), c("smoke",
"lung", "bronc"), c("asia", "tub"))
gg <- ugList(ugf)
pp <- extractPOT(chestSim100000, gg)
cpp3 <- compilePOT(pp)
pn3 <- grain(cpp3)
q3 <- querygrain(pn3)
## Compare results:
str(q1)
str(q2[names(q1)])
str(q3[names(q1)])
}
\author{
Søren Højsgaard, \email{sorenh@math.aau.dk}
}
\references{
Søren Højsgaard (2012). Graphical Independence Networks
with the gRain Package for R. Journal of Statistical Software,
46(10), 1-26. \url{http://www.jstatsoft.org/v46/i10/}.
}
\seealso{
\code{\link{compileCPT}}, \code{\link{compilePOT}},
\code{\link{grain}}
}
\keyword{utilities}
|
/man/extract-cpt.Rd
|
no_license
|
ktargows/gRain
|
R
| false
| true
| 2,895
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CPT-POT-extract.R
\name{extract-cpt}
\alias{extract-cpt}
\alias{extractCPT}
\alias{extractCPT.data.frame}
\alias{extractCPT.table}
\alias{extractPOT}
\alias{extractPOT.data.frame}
\alias{extractPOT.table}
\title{Extract conditional probabilities and clique potentials from
data.}
\usage{
extractCPT(x, graph, smooth = 0)
extractPOT(x, graph, smooth = 0)
}
\arguments{
\item{x}{An array or a dataframe.}
\item{graph}{A graph represented as a graphNEL object. For
extractCPT, graph must be a DAG while for extractPOT, graph
must be undirected triangulated graph.}
\item{smooth}{See 'details' below.}
}
\value{
extractCPT: A list of conditional probability tables
extractPOT: A list of clique potentials.
}
\description{
Extract list of conditional probability tables and
list of clique potentials from data.
}
\details{
If \code{smooth} is non--zero then \code{smooth} is added
to all cell counts before normalization takes place.
}
\examples{
## Asia (chest clinic) example:
## Version 1) Specify conditional probability tables.
yn <- c("yes","no")
a <- cptable(~asia, values=c(1,99),levels=yn)
t.a <- cptable(~tub+asia, values=c(5,95,1,99),levels=yn)
s <- cptable(~smoke, values=c(5,5), levels=yn)
l.s <- cptable(~lung+smoke, values=c(1,9,1,99), levels=yn)
b.s <- cptable(~bronc+smoke, values=c(6,4,3,7), levels=yn)
e.lt <- cptable(~either+lung+tub,values=c(1,0,1,0,1,0,0,1),levels=yn)
x.e <- cptable(~xray+either, values=c(98,2,5,95), levels=yn)
d.be <- cptable(~dysp+bronc+either, values=c(9,1,7,3,8,2,1,9), levels=yn)
plist <- compileCPT(list(a, t.a, s, l.s, b.s, e.lt, x.e, d.be))
pn1 <- grain(plist)
q1 <- querygrain(pn1)
## Version 2) Specify DAG and data
data(chestSim100000, package="gRbase")
dgf <- ~asia + tub * asia + smoke + lung * smoke +
bronc * smoke + either * tub * lung +
xray * either + dysp * bronc * either
dg <- dag(dgf)
pp <- extractCPT(chestSim100000, dg)
cpp2 <- compileCPT(pp)
pn2 <- grain(cpp2)
q2 <- querygrain(pn2)
## Version 2) Specify triangulated undirected graph and data
ugf <- list(c("either", "lung", "tub"), c("either", "lung", "bronc"),
c("either", "xray"), c("either", "dysp", "bronc"), c("smoke",
"lung", "bronc"), c("asia", "tub"))
gg <- ugList(ugf)
pp <- extractPOT(chestSim100000, gg)
cpp3 <- compilePOT(pp)
pn3 <- grain(cpp3)
q3 <- querygrain(pn3)
## Compare results:
str(q1)
str(q2[names(q1)])
str(q3[names(q1)])
}
\author{
Søren Højsgaard, \email{sorenh@math.aau.dk}
}
\references{
Søren Højsgaard (2012). Graphical Independence Networks
with the gRain Package for R. Journal of Statistical Software,
46(10), 1-26. \url{http://www.jstatsoft.org/v46/i10/}.
}
\seealso{
\code{\link{compileCPT}}, \code{\link{compilePOT}},
\code{\link{grain}}
}
\keyword{utilities}
|
fluidPage(
titlePanel("Probabilty Prediction given Radius"),
title = 'Formular',
withMathJax(),
uiOutput('ex1'),
uiOutput('ex2'),
uiOutput('ex3'),
uiOutput('ex4'),
sidebarPanel(
sidebarLayout(position = "left",
selectInput("Country", label = "Select a Country",
choices = unique(as.character(data$Country))),
hr()
)),
mainPanel(
plotOutput("Plot")
),
fluidRow(
column(8,
sidebarPanel(
sidebarLayout(
selectInput("Country1", label = "Select a Country",
choices = unique(as.character(data$Country))),
hr()
)))),
fluidRow(
column(8,
sidebarPanel(
sidebarLayout(
selectInput("Magnitude", label = "Select a Magnitude level",
choices = unique(as.character(data$Mag))),
hr()
)))),
fluidRow(
column(9,
sliderInput(inputId = "Time",
label = "Select a Future Time",
value = 20, min = 1, max = 80)
)
),
fluidRow(
column(9, verbatimTextOutput("value"))
)
)
|
/Data Fest Project/R_Shiny/ProbabilityModel-Rshiny/PD2/ui.R
|
no_license
|
sileyang/Sile_Yang
|
R
| false
| false
| 1,219
|
r
|
fluidPage(
titlePanel("Probabilty Prediction given Radius"),
title = 'Formular',
withMathJax(),
uiOutput('ex1'),
uiOutput('ex2'),
uiOutput('ex3'),
uiOutput('ex4'),
sidebarPanel(
sidebarLayout(position = "left",
selectInput("Country", label = "Select a Country",
choices = unique(as.character(data$Country))),
hr()
)),
mainPanel(
plotOutput("Plot")
),
fluidRow(
column(8,
sidebarPanel(
sidebarLayout(
selectInput("Country1", label = "Select a Country",
choices = unique(as.character(data$Country))),
hr()
)))),
fluidRow(
column(8,
sidebarPanel(
sidebarLayout(
selectInput("Magnitude", label = "Select a Magnitude level",
choices = unique(as.character(data$Mag))),
hr()
)))),
fluidRow(
column(9,
sliderInput(inputId = "Time",
label = "Select a Future Time",
value = 20, min = 1, max = 80)
)
),
fluidRow(
column(9, verbatimTextOutput("value"))
)
)
|
# if statement
var1 <- 1
var2 <- 2
if (var1 > var2){
cat("var1 큼")
} else {
cat("var2 큼")
}
cat(ifelse(var1>var2, "var1 큼", "var2 큼"))
# switch statement
# switch(EXPR=수치데이터, 식1, 식2, 식3, ...)
# switch(EXPR=문자열데이터, 비교값1=식1, 비교값2=식2, 비교값3=, 비교값4=식3, ..., 식4)
switch(EXPR=var1, "월", "화", "수", "목", "금", "토", "일")
varStr <- '화'
switch(EXPR=varStr, "월"=1, "화"=2)
# while loop
no <- 0
while(no <5){
print(no);
no<-no+1;
}
x <- 1
while(x<5){
x<-x+1;
if(x==4){
break;
}
print(x);
}
x <- 1
while(x<5){
x<-x+1;
if(x==4){
next;
}
print(x);
}
# for loop
for(i in 1:5){
print(i)
}
sum <- 0
for(i in 1:10){
sum <- sum+i;
}
sum
# 반복문 내에서 화면에 결과 출력시 출력함수 print() or cat() 사용
|
/control_statement_study.R
|
no_license
|
eunjjeong/R_study
|
R
| false
| false
| 833
|
r
|
# if statement
var1 <- 1
var2 <- 2
if (var1 > var2){
cat("var1 큼")
} else {
cat("var2 큼")
}
cat(ifelse(var1>var2, "var1 큼", "var2 큼"))
# switch statement
# switch(EXPR=수치데이터, 식1, 식2, 식3, ...)
# switch(EXPR=문자열데이터, 비교값1=식1, 비교값2=식2, 비교값3=, 비교값4=식3, ..., 식4)
switch(EXPR=var1, "월", "화", "수", "목", "금", "토", "일")
varStr <- '화'
switch(EXPR=varStr, "월"=1, "화"=2)
# while loop
no <- 0
while(no <5){
print(no);
no<-no+1;
}
x <- 1
while(x<5){
x<-x+1;
if(x==4){
break;
}
print(x);
}
x <- 1
while(x<5){
x<-x+1;
if(x==4){
next;
}
print(x);
}
# for loop
for(i in 1:5){
print(i)
}
sum <- 0
for(i in 1:10){
sum <- sum+i;
}
sum
# 반복문 내에서 화면에 결과 출력시 출력함수 print() or cat() 사용
|
output$currUI <- renderUI({
if(logged$status == "notlogged"){
fluidRow(align="center",
br(),br(),br(),br(),br(),
imageOutput("login_header",inline = T),
textInput("username", "username:"),
passwordInput("password", "password:"),
tags$style(type="text/css", "#string { height: 50px; width: 100%; text-align:center;
font-size: 30px; display: block;}"),
shinyWidgets::circleButton("login", icon = icon("arrow-right")),
br(),br(),
div(style="width:300px;",verbatimTextOutput("login_status", placeholder = FALSE)))
}else if(logged$status == "logged"){
print("rendering...")
# read settings
opts <- getOptions(lcl$paths$opt.loc)
# generate CSS for the interface based on user settings for colours, fonts etc.
bar.css <<- nav.bar.css(opts$col1, opts$col2, opts$col3, opts$col4)
font.css <<- app.font.css(opts$font1, opts$font2, opts$font3, opts$font4,
opts$size1, opts$size2, opts$size3, opts$size4, online=online)
# === GOOGLE FONT SUPPORT FOR GGPLOT2 ===
online = internetWorks()
# Download a webfont
if(online){
lapply(c(opts[grepl(pattern = "font", names(opts))]), function(font){
try({
sysfonts::font_add_google(name = font,
family = font,
regular.wt = 400,
bold.wt = 700)
})
})
}
# set taskbar image as set in options
taskbar_image <- opts$task_img
# parse color opts
lcl$aes$mycols <<- get.col.map(lcl$paths$opt.loc) # colours for discrete sets, like group A vs group B etc.
lcl$aes$theme <<- opts$gtheme # gradient function for heatmaps, volcano plot etc.
lcl$aes$spectrum <<- opts$gspec # gradient function for heatmaps, volcano plot etc.
# load existing file
bgcol <<- opts$col1
# - - load custom dbs - -
# load in custom databases
has.customs <- dir.exists(file.path(lcl$paths$db_dir, "custom"))
if(has.customs){
customs = list.files(path = file.path(lcl$paths$db_dir, "custom"),
pattern = "\\.RData")
dbnames = unique(tools::file_path_sans_ext(customs))
for(db in dbnames){
# add name to global
dblist <- gbl$vectors$db_list
dblist <- dblist[-which(dblist == "custom")]
if(!(db %in% dblist)){
dblist <- c(dblist, db, "custom")
gbl$vectors$db_list <- dblist
}
metadata.path <- file.path(lcl$paths$db_dir, "custom", paste0(db, ".RData"))
load(metadata.path)
# add description to global
gbl$constants$db.build.info[[db]] <- meta.dbpage
# add image to global
maxi = length(gbl$constants$images)
gbl$constants$images[[maxi + 1]] <- meta.img
}
}
# init stuff that depends on opts file
lcl$proj_name <<- opts$proj_name
lcl$paths$patdb <<- file.path(opts$work_dir, paste0(opts$proj_name, ".db"))
lcl$paths$csv_loc <<- file.path(opts$work_dir, paste0(opts$proj_name, ".csv"))
lcl$texts <<- list(
list(name='curr_exp_dir',text=lcl$paths$work_dir),
list(name='curr_db_dir',text=lcl$paths$db_dir),
list(name='ppm',text=opts$ppm),
list(name='proj_name',text=opts$proj_name)
)
lcl$vectors$project_names <<- unique(gsub(list.files(opts$work_dir,pattern = "\\.csv"),pattern = "(_no_out\\.csv)|(\\.csv)", replacement=""))
updateSelectizeInput(session,
"proj_name",
choices = lcl$vectors$proj_names,
selected = opts$proj_name)
lapply(lcl$texts, FUN=function(default){
output[[default$name]] = renderText(default$text)
})
lcl$aes$font <- list(family = opts$font4,
ax.num.size = 11,
ax.txt.size = 15,
ann.size = 20,
title.size = 25)
# other default stuff that needs opts
library(showtext)
online = internetWorks()
# create color pickers based on amount of colours allowed in global
output$colorPickers <- renderUI({
lapply(c(1:gbl$constants$max.cols), function(i) {
colourpicker::colourInput(inputId = paste("col", i, sep="_"),
label = paste("Choose colour", i),
value = lcl$aes$mycols[i],
allowTransparent = F)
})
})
# create color1, color2 etc variables to use in plotting functions
# and update when colours picked change
observe({
values <- unlist(lapply(c(1:gbl$constants$max.cols), function(i) {
input[[paste("col", i, sep="_")]]
}))
if(!any(is.null(values))){
if(lcl$paths$opt.loc != ""){
set.col.map(optionfile = lcl$paths$opt.loc, values)
lcl$aes$mycols <<- values
}
}
})
updateSelectInput(session, "ggplot_theme", selected = opts$gtheme)
updateSelectInput(session, "color_ramp", selected = opts$gspec)
html = tagList(
tags$style(type="text/css", bar.css),
tags$script(src="spinnytitle.js"),
tags$script(src="sparkle.js"),
# - - - - - - - - -
navbarPage(windowTitle='MetaboShiny',
inverse=TRUE,
# use this for title
# https://codepen.io/maxspeicher/pen/zrVKLE
title=div(h1("MetaboShiny"), class="outlined", tags$style(type="text/css", font.css), id="sparkley"), # make it use the sparkle.js for unnecessary sparkle effects ;)
id="nav_general",
# this tab shows the available databases, if they are installed, and buttons to install them. generated as output$db_build_ui in 'server'
tabPanel("database", icon = icon("database",class = "outlined"), value="database",
fluidRow(align="center", bsCollapse(bsCollapsePanel(title=h2("Settings"), style="info",
sliderInput(inputId = "db_mz_range", label = "What mass range can your machine detect?",
min = 0, max = 3000, value = c(60, 600),
step = 1,post = "m/z",dragRange = TRUE),
tags$i("Warning: increasing the upper m/z boundary may drastically increase database build times
- calculating isotopes is more time-consuming for larger molecules!"),
radioButtons("db_build_mode", label = "Build base database, extended (isotopes+adducts) or both?",
choices = c("base","extended","both"),selected = "extended"),
tags$i("For example: if you only defined a new adduct, pick 'extended' as the source database doesn't change.")
)
)),
uiOutput("db_build_ui") #%>% shinycssloaders::withSpinner() # see server, is autogenerated now
),
tabPanel("data import", icon = icon("upload", class = "outlined"),
fluidRow(column(12, align="center",
textInput("proj_name_new", label = "STEP 1: What is your project name?", value = lcl$proj_name),
sliderInput("ppm", "STEP 2: What level accuracy is your mass spectrometer?",min = 0.1,max = 50,value = 5,step = .1))),
hr(),
fluidRow(column(3, align="center",
imageOutput("merge_icon",inline = T),
radioButtons("importmode", label = "",
choices = list("Peaks are in a .db file"="db", "Peaks are in two .csv files (pos/neg mode)"="csv"),
selected = "db"),
tags$b("STEP 3: Click buttons to load data."),
shinyFilesButton('metadata', 'metadata', 'Select metadata in csv/xls(x)', FALSE),
conditionalPanel(condition = "input.importmode == 'db'",
shinyFilesButton('database', 'database', 'Select .db file', FALSE)
),
conditionalPanel(condition = "input.importmode == 'csv'",
shinyFilesButton('outlist_pos', '+ peaks', 'Select .csv for - mode peaks', FALSE),
shinyFilesButton('outlist_neg', '- peaks', 'Select .csv for + mode peaks', FALSE)
)
)
,column(2, align="center", #ok
tags$b("STEP 4: Merge data and metadata"),br(),br(),
shinyWidgets::circleButton("create_db", icon = icon("long-arrow-alt-right", class = "fa-2x"), size = "lg"))
,column(2, align="center", # issue lol
imageOutput("db_icon")
)
,column(2, align="center",
tags$b("STEP 5: Convert to input-ready format"),
br(),br(),
shinyWidgets::circleButton("create_csv", icon = icon("long-arrow-alt-right", class = "fa-2x"), size = "lg"))
,column(3, align="center",
imageOutput("laptop_icon", inline=T),br(),br(),
div(DT::dataTableOutput('csv_tab'),style='font-size:80%')
)
),
fluidRow(column(3, align="center",
tags$i("Input files chosen?"),br(),br(),
imageOutput("proj_merge_check")
),
column(2, align="center",
tags$i("Database present?"),br(),br(),
imageOutput("proj_db_check"),offset = 2),
column(3, align="center",
tags$i("Final table present?"),br(),br(),
imageOutput("proj_csv_check", inline=T),br(),br(),
tags$b("STEP 6: If "), icon("check-circle"), tags$b(" continue to normalization"),
offset = 2))
),
# this tab is used to perform normalization of your data. settings are processed as input$filt_type etc. in 'server'.
tabPanel("normalize", icon = icon("shower",class = "outlined"), value="filter",
fluidRow(column(3, aligh="center",
selectInput('samp_var', 'Which variable represents sample amount/concentration?', choices = c("")), #TODO: only show this when normalize by sample specific factor (specnorm) is selected
selectizeInput('batch_var', 'What are your batch variables?', choices = c("batch"), multiple=TRUE, options = list(maxItems = 2L)),
actionButton("check_csv", "Get options", icon=icon("refresh")),
hr(),
shinyWidgets::sliderTextInput("perc_limit","Max. missing feature percent:",
choices=c(0, 0.0001, 0.001, 0.01, 0.1, seq(1, 100, 1)),
selected=1, grid = T),
selectInput('filt_type', 'How will you filter your m/z values?', choices = list("Interquantile range"="iqr",
"Relative stdev"="rsd",
"Non-parametric relative stdev"="nrsd",
"Mean"="mean",
"Standard deviation"="sd",
"Median absolute deviation"="mad",
"Median"="median",
"None"="none"),
selected = "none"),
selectInput('norm_type', 'What type of normalization do you want to do?', choices = list("Quantile normalization"="QuantileNorm",
"By reference feature"="ProbNorm",
"By reference compound"="CompNorm",
"By sample specific factor"="SpecNorm",
"Sum"="SumNorm",
"Median"="MedianNorm",
"None"="NULL")),
uiOutput("ref_select"),
selectInput('trans_type', 'How will you transform your data?', choices = list("Log transform"="LogNorm",
"Cubic root transform"="CrNorm",
"None"="NULL")),
selectInput('scale_type', 'How will you scale your data?', choices = list("Autoscale/Z-transform"="AutoNorm",
"Mean-center"="MeanCenter",
"Pareto Scaling"="ParetoNorm",
"Range scaling"="RangeNorm",
"None"="NULL")),
selectInput('miss_type', 'How to deal with missing values?', choices = list("Half feature minimum"="colmin",
"Half sample minimum"="rowmin",
"Total minimum"="min",
"Random forest"="rf",
#"Impute w/ regression"="regr",
"KNN imputation"="knn",
"SVD imputation"="svdImpute",
"BPCA imputation"="bpca",
"PPCA imputation"="ppca",
"Median"="median",
"Mean"="mean",
"Leave them out"="exclude",
"Leave them alone"="none"),
selected = "knn"),
conditionalPanel("input.miss_type == 'rf'",
sliderInput("rf_norm_ntree", label = "Trees built per variable", value = 10, min = 1, max = 50, step=1),
#numericInput("rf_norm_mtry", label = "Trees built per variable", value = 10, min = 1, max = 50)
radioButtons("rf_norm_parallel", label = "Parallelize?", choices = list("no",
"forests",
"variables"),
selected = "variables")
),
# - - - - - -
switchButton(inputId = "remove_outliers",
label = "Exclude outliers?",
value = FALSE, col = "BW", type = "YN"),
actionButton("initialize", "Go", icon=icon("hand-o-right")),
hr(),
imageOutput("dataset_icon",inline = T),
fileInput("pat_dataset", "Import dataset",
multiple = F,
accept = c(".RData")),
actionButton("import_dataset", "Import", icon = icon("hand-peace-o")),
imageOutput("dataset_upload_check",inline = T)
), column(9,
# show the summary plots post-normalization
navbarPage(inverse=F,h3("explore"),
tabPanel("m/z values",# icon=icon("braille"),
fluidRow(column(6,plotOutput("var1",height='300px')),
column(6,plotOutput("var3", height='300px'))
),
fluidRow(column(6,plotOutput("var2", height='500px')),
column(6,plotOutput("var4", height='500px')))
),
tabPanel("samples",# icon=icon("tint"),
fluidRow(column(6,plotOutput("samp1",height='300px')),
column(6,plotOutput("samp3", height='300px'))
),
fluidRow(column(6,plotOutput("samp2", height='500px')),
column(6,plotOutput("samp4", height='500px')))
)
)
)
)),
tabPanel("prematch", icon = icon("search", class = "outlined"), value = "prematch",
# - - - pre-matching part - - -
fluidRow(align="center",
switchButton(inputId = "do_prematch",
label = "Do matching beforehand?",
col = "BW",
type = "YN"),
tags$i("All m/z values will be searched in the databases of choice and the results will be saved to your save file for fast access."),br(),
tags$i("Search results can still be overridden by manual searching. Don't forget to save after!")),
br(),
fluidRow(align="center",
column(2),
column(8, conditionalPanel("input.do_prematch == true",
h2("Included databases:"),
uiOutput("db_prematch_select"),
shinyWidgets::circleButton("select_db_prematch_all",
icon = icon("shopping-cart"),
size = "default")),
hr(),
fluidRow(column(6,h2("Find matches"),
shinyWidgets::circleButton(inputId = "prematch",
icon = icon("searchengin"))),
column(6,h2("Clear matches"),
shinyWidgets::circleButton(inputId = "clear_prematch",
icon = icon("trash"))))
),
column(2)
)),
# this tab is the main analysis tab. all tabs for all analyses are listed here, but the visibility is changed depending on the current experiment
tabPanel("analyse", icon = icon("bar-chart",class = "outlined"), value = "analysis",
sidebarLayout(position="right",
mainPanel = mainPanel(width = 8,
tabsetPanel(id="statistics",selected = "pca",
#navbarPage(inverse=F, "", id="statistics", selected = "pca", collapsible = T,
# TODO: T-SNE
# this tab shows general information, mostly a message with 'please give me some data' :-)
tabPanel(icon("star"), value = "inf",
fluidRow(column(width=12, align="center",
br(),br(),br(),br(),
#hr(),
#icon("arrow-right","fa-lg"), icon("arrow-right","fa-lg"), icon("arrow-right","fa-lg"),
br(),br(),
h2("Please select a variable of interest in the sidebar!"),br(),
icon("exchange", "fa-4x"),
br(),br(),br()
#hr()
#icon("arrow-right","fa-lg"), icon("arrow-right","fa-lg"), icon("arrow-right","fa-lg")
))),
tabPanel("dimension reduction", value = "dimred", icon=icon("cube"),
navbarPage(inverse=T, icon("cube"), id = "dimred",
# loading this tab performs PCA. summary and loading tables, alongside a 2d/3d PCA plot, are available here.
tabPanel("pca", value = "pca", #icon=icon("cube"),
fluidRow(align="center",column(12,plotly::plotlyOutput("plot_pca",height = "600px", width="600px"))),#%>% shinycssloaders::withSpinner())),
fluidRow(align="center",column(12,
switchButton("pca_2d3d", label = "", col = "BW", type = "2d3d", value=T))),
hr(),
fluidRow(column(3,
selectInput("pca_x", label = "X axis:", choices = paste0("PC",1:20),selected = "PC1",width="100%"),
selectInput("pca_y", label = "Y axis:", choices = paste0("PC",1:20),selected = "PC2",width="100%"),
selectInput("pca_z", label = "Z axis:", choices = paste0("PC",1:20),selected = "PC3",width="100%")),
column(9,
tabsetPanel(id="pca_2",
tabPanel(title="Table",
div(DT::dataTableOutput('pca_tab',width="100%"),style='font-size:80%')),
tabPanel(title="Scree",
plotOutput("pca_scree", width = "100%", height="250px")
),
tabPanel(title="Loadings",
div(DT::dataTableOutput('pca_load_tab',width="100%"),style='font-size:80%'))
))
)
),
# TODO: enable the sparse and orthogonal PLS-DA options in metaboanalystR
# this tab is used to perform pls-da. it triggers on 'go' button as it is a time costly analysis.
tabPanel("pls-da", value = "plsda",
fluidRow(align="center",column(12,plotly::plotlyOutput("plot_plsda",height = "500px", width="500px"))),
fluidRow(align="center",column(12,
switchButton("plsda_2d3d", label = "", col = "BW", type = "2d3d"))),
hr(),
fluidRow(column(3,
div(style="display:inline-block",
selectInput("plsda_type",
label="Type:",
choices=list("Normal"="normal")
#,
# "Orthogonal"="ortho",
# "Sparse"="sparse")
,width = '100px',
selected=1)),
div(style="display:inline-block",
shinyWidgets::circleButton("do_plsda", icon = icon("hand-pointer-o"), size = "sm")
),
selectInput("plsda_x", label = "X axis:", choices = paste0("PC",1:8),selected = "PC1",width="100%"),
selectInput("plsda_y", label = "Y axis:", choices = paste0("PC",1:8),selected = "PC2",width="100%"),
selectInput("plsda_z", label = "Z axis:", choices = paste0("PC",1:8),selected = "PC3",width="100%")),
column(9,
tabsetPanel(id="plsda_2",
tabPanel(title="Cross-validation",
plotOutput("plsda_cv_plot")),
tabPanel(title="Permutation",
plotOutput("plsda_perm_plot")),
tabPanel(title="Table",
div(DT::dataTableOutput('plsda_tab',width="100%"),style='font-size:80%')),
tabPanel(title="Loadings",
div(DT::dataTableOutput('plsda_load_tab',width="100%"),style='font-size:80%'))
))
)
),
tabPanel("t-sne", value = "tsne",
helpText("working on it")
)
)),
tabPanel("per m/z", value = "permz", icon=icon("fingerprint"),
navbarPage(inverse=T, icon("fingerprint"), id = "permz",
tabPanel("t-test", value="tt",
fluidRow(plotly::plotlyOutput('tt_specific_plot',width="100%")),
fluidRow(align="center",
sardine(switchButton("tt_nonpar", "Non-parametric?", col="BW", type="YN", value = T)),
#sardine(uiOutput("tt_parbutton")),
sardine(switchButton("tt_eqvar", "Equal variance?", col="BW", type="YN", value = T))
),
navbarPage(inverse=F,"",
tabPanel("", icon=icon("table"),
div(DT::dataTableOutput('tt_tab',width="100%"),style='font-size:80%'))
,tabPanel("", icon=icon("area-chart"),
plotly::plotlyOutput('tt_overview_plot',height="300px") %>% shinycssloaders::withSpinner()
)
)),
tabPanel("anova", value="aov",
fluidRow(plotly::plotlyOutput('aov_specific_plot',width="100%")),
navbarPage(inverse=F,"",
tabPanel("", icon=icon("table"),
div(DT::dataTableOutput('aov_tab',width="100%"),style='font-size:80%'))
,tabPanel("", icon=icon("area-chart"),
plotly::plotlyOutput('aov_overview_plot',height="300px") %>% shinycssloaders::withSpinner()
)
)),
tabPanel("fold-change", value="fc",
fluidRow(plotly::plotlyOutput('fc_specific_plot',width="100%")),
navbarPage(inverse=F,"",
tabPanel("", icon=icon("table"),
div(DT::dataTableOutput('fc_tab',width="100%"),style='font-size:80%'))
,tabPanel("", icon=icon("area-chart"),
plotly::plotlyOutput('fc_overview_plot',height="300px") %>% shinycssloaders::withSpinner()
))
),
tabPanel("meba", value="meba",
fluidRow(plotly::plotlyOutput('meba_specific_plot',height="600px")),
fluidRow(div(DT::dataTableOutput('meba_tab', width="100%"),style='font-size:80%'))
),
tabPanel("asca", value="asca",
fluidRow(plotly::plotlyOutput('asca_specific_plot', height="600px")),
fluidRow(div(DT::dataTableOutput('asca_tab',width="100%"),style='font-size:80%'))
)
)
),
tabPanel("overview analyses", value = "overview", icon=icon("globe"),
navbarPage(inverse=T, icon("globe"), id = "overview",
tabPanel("volcano plot", value="volc",
fluidRow(plotly::plotlyOutput('volc_plot',width="100%",height="600px") %>% shinycssloaders::withSpinner()),
fluidRow(div(DT::dataTableOutput('volc_tab',width="100%"),style='font-size:80%'))
),
tabPanel("heatmap", value="heatmap",
plotly::plotlyOutput("heatmap",width="100%",height="700px") %>% shinycssloaders::withSpinner(),
br(),
fluidRow(column(align="center",
width=12,
sliderInput("heatmap_topn", "Use top ... from table:", value=100, min = 10, max = 200))
),
fluidRow(column(align="center",
width=12,
uiOutput("heatbutton"),
switchButton("heatsign", label = "Only significant hits?", col = "GB", type = "YN"),
switchButton("heatlimits", label = "Color based on -all- metabolites?", col = "GB", type = "YN")
))
),
# this tab is used to find overlapping features of interest between analyses
# TODO: enable this with multiple saved mSets in mSet$storage
tabPanel(title="venn", value="venn", #icon=icon("comments"),
sidebarLayout(position = "left",
sidebarPanel = sidebarPanel(
fluidRow(div(DT::dataTableOutput('venn_unselected'),style='font-size:80%'), align="center"),
fluidRow(shinyWidgets::circleButton("venn_add", icon=icon("arrow-down"), size="sm"),
shinyWidgets::circleButton("venn_remove", icon=icon("arrow-up"), size="sm"),
align="center"),
fluidRow(div(DT::dataTableOutput('venn_selected'),style='font-size:80%'),align="center"),
hr(),
fluidRow(
sliderInput("venn_tophits", label = "Only include top:", min = 1, max = 200, post = " hits", value=20)
,align="center"),
fluidRow(
shinyWidgets::circleButton("venn_build", icon=icon("hand-pointer-o"),size="default")
,align="center")
),
mainPanel = mainPanel(
hr(),
plotOutput("venn_plot",inline = F),
# find the overlapping compounds between the groups you want to compare (user select)
# TODO: enable this with clicking the numbers/areas
fluidRow(selectInput("intersect_venn", label = "Show hits from (only):", selected = 1,choices = "",multiple = T),
align="center"),
fluidRow(uiOutput("venn_pval"), align="center"),
br(),
fluidRow(div(DT::dataTableOutput('venn_tab'),style='font-size:80%'),
align="center")
))
)
)
),
# this tab enables machine learning
tabPanel("machine learning", value = "ml", icon=icon("signature"),
br(),
navbarPage(inverse=F, icon("signature"), id = "ml",
tabPanel("initialize", value="init",
fluidRow(
column(width=3,align="center",
selectInput("ml_perf_metr", label=h2("Performance metric"),
choices = c("boot", "boot632", "optimism_boot",
"boot_all", "cv", "repeatedcv",
"LOOCV", "LGOCV", "none", "oob",
"timeslice", "addaptive_cv", "adaptive_boot",
"adaptive_LGOCV"),
multiple = F, selected = "repeatedcv"),
sliderInput("ml_train_perc",
label = h2("Percentage in training"),
min = 1,
max = 100,
step = 1,
value = 60,
post = "%"),
selectInput("ml_folds", label=h2("Fold CV"),choices = c("5",
"10",
"20",
"50",
"LOOCV"),
multiple = F),
sliderInput("ml_attempts",
label = "Attempts",
min = 1,
max = 100,
step = 1,
value = 20,
post = "x")
),
column(width=6,align="center",
selectInput("ml_method",
label = h2("Used algorithm"),
selected = "glmnet",
choices = {
lst = as.list(gbl$constants$ml.models)
# names(lst) <- sapply(gbl$constants$ml.models, function(mdl) caret.mdls[[mdl]]$label)
lst
},
multiple = F),
div(uiOutput("ml_params"), style = "font-size:60%"),
selectizeInput("ml_preproc", label = h2("Data reprocessing"),
choices = c("center", "scale"),
selected = c("center", "scale"), multiple=T),
shinyWidgets::circleButton("do_ml",
icon = h3(paste("Go"),
icon("hand-pointer-o", "fa-lg")),
status = "default",
size = "lg")
),
column(width=3,align="center",
fluidRow(textOutput("ml_train_ss"),
actionButton("ml_train_ss", label = "train on:", icon = icon("arrow-up"))),
fluidRow(textOutput("ml_test_ss"),
actionButton("ml_test_ss", label = "test on:", icon = icon("arrow-up"))),
br(),
textInput("ml_name", label=h3("Name:"), value = "all"))
)
),
tabPanel("results", value="res", icon=icon("poll"),
br(),
div(selectInput("show_which_ml", label = "Plot which model?", choices = c())),
navbarPage(title=icon("poll"),id="ml_results",inverse=F,
tabPanel(title = "roc",value = "roc",icon=icon("area-chart"),
plotlyOutput("ml_roc",height = "600px"),
div(DT::dataTableOutput("ml_tab",width="100%"),style='font-size:80%')),
tabPanel("importance",value= "bar",icon=icon("star"),
fluidRow(plotlyOutput("ml_bar", width = "100%", height="600px")),
fluidRow(
column(12, sliderInput("ml_top_x",
label = "Show top:",
min = 10,
max = 200,
step=10,
value=20), align="center")
)
)
)
)
)
)
)
),
# this is the sidebar that shows in the analysis tab. contains a lot of settings on the current variable of interest, plot themes and colours, and venn diagrams.
sidebarPanel =
sidebarPanel(align="center",width = 4,
tabsetPanel(id = "anal_sidebar", selected="switch/subset",#type = "pills",
tabPanel(title="export",icon=icon("file"),
radioButtons("export_format", "Which format do you want to export plots to?",
choices = list(".svg", ".eps", ".png", ".jpeg", ".pdf")),
shinyWidgets::circleButton("export_plot", icon=icon("hand-o-up"))),
tabPanel(title="search", icon=icon("search"),
br(),
bsCollapse(bsCollapsePanel(title=h2("Settings"), style="info",
tabsetPanel(id="tab_iden_1", selected = "start",
# forward searching
tabPanel(title=icon("database"),value="start",
uiOutput("db_search_select"),
div(id = "curly-brace", div(id = "left", class = "brace"),
div(id = "right", class = "brace")),
br(),br(),
shinyWidgets::circleButton("select_db_all",
icon = icon("shopping-cart"),
size = "default") # icon("fingerprint"), size = "sm")
), # clicky buttons for database selection; this is generated in 'server'
tabPanel(title=icon("chart-bar"),
plotly::plotlyOutput("curr_plot", height="300px", width="100%") %>% shinycssloaders::withSpinner()
),
tabPanel(title=icon("magic"),
h2("MagicBall settings"),
fluidRow(align="center",switchButton(inputId = "magicball_pubchem_cids",
label = "Check PubChem for predicted formulas?",
col = "BW", type = "YN", value = F)
),
fluidRow(align="center",switchButton(inputId = "magicball_pubchem_details",
label = "Get detailed PubChem matches? (SLOW!)",
col = "BW", type = "YN", value = F)
),
fluidRow(align="center", helpText("Considered adducts:")),
fluidRow(div(DT::dataTableOutput('magicball_add_tab'),style='font-size:100%'),
align="center")
),
tabPanel(title=icon("star-half-alt"),
selectInput("iso_score_method",
"Which method used to score compounds of same weight?",
selected="mscore",
choices=list("M-score"="mscore"
#"Chi-square"="chisq",
#"Mean absolute percentage error"="mape",
#"SIRIUS"="sirius",
#"Network-based"="network"
)),
sliderInput("int_prec", label = "Intensity imprecision", min = 1, max = 100, value = 2, post = "%"),
shinyWidgets::circleButton("score_iso", icon = icon("award"), size = "sm") # icon("fingerprint"), size = "sm")
)
))),
tags$i("Click the detective to search the selected databases for this m/z value."),br(),
fluidRow(
tags$button(
id = "search_mz",
class = "btn btn-default action-button",
img(src = "detective.png",
height = "50px")
),
div(
sardine(div(icon("paw","fa-xs fa-rotate-90"),
style="position:relative;
top:10px;")),
sardine(div(icon("paw","fa-xs fa-rotate-90"),
style="position:relative;
top:25px;")),
sardine(div(icon("paw","fa-xs fa-rotate-90"),
style="position:relative;
top:10px;")),
sardine(h2(textOutput("curr_mz"),style="padding:10px;")),
sardine(div(icon("paw","fa-xs fa-rotate-90"),
style="position:relative;
top:10px;")),
sardine(div(icon("paw","fa-xs fa-rotate-90"),
style="position:relative;
top:25px;")),
sardine(div(icon("paw","fa-xs fa-rotate-90"),
style="position:relative;
top:10px;")),
style="background-color:white;
height:55px;
width:115%;
position:relative;
right:30px;
border-top: 1px solid #DFDCDC;
border-bottom: 1px solid #DFDCDC;")
),
tabsetPanel(id="tab_iden_2",
# forward searching
tabPanel(title="mz > molecule",
hr(),
bsCollapse(bsCollapsePanel(title=h2("Compound info"), style="warning",
tabsetPanel(id="tab_iden_3",
tabPanel(title=icon("atlas"),
wellPanel(id = "def",style = "overflow-y:scroll; max-height: 200px",
textOutput("curr_definition"))
),
tabPanel(title=icon("atom"),
textOutput("curr_formula"),
plotOutput("curr_struct", height="300px")
)
))),
bsCollapse(bsCollapsePanel(title=h2("Search results"), style="error",
tabsetPanel(id="tab_iden_4",selected = "start",
# forward searching
tabPanel(title=icon("table"), value="start",
div(DT::dataTableOutput('match_tab', width="100%"),style='font-size:80%'),
hr(),
fluidRow(
switchButton(inputId = "auto_copy",
label = "Auto-copy name to clipboard??",
value = TRUE, col = "GB", type = "YN"),
align="center"),
helpText("Undo filtering"),
fluidRow(
shinyWidgets::circleButton("undo_match_filt", icon = icon("undo-alt"), size = "sm") # icon("fingerprint"), size = "sm")
)
),
tabPanel(title=icon("database"), value="pie_db",
fluidRow(align = "center",
plotly::plotlyOutput("match_pie_db") %>% shinycssloaders::withSpinner()
)
),
tabPanel(title=icon("plus"), value = "pie_add",
fluidRow(align = "center",
plotly::plotlyOutput("match_pie_add") %>% shinycssloaders::withSpinner()
)
),
tabPanel(title=icon("percentage"), value = "pie_iso",
fluidRow(align = "center",
plotly::plotlyOutput("match_pie_iso") %>% shinycssloaders::withSpinner()
)
),
tabPanel(title=icon("cloud"), value = "word_cloud",
fluidRow(align = "center",
conditionalPanel("input.wc_cloudbar == true",
tagList(
wordcloud2::wordcloud2Output("wordcloud_desc"),
tags$script(HTML(
"$(document).on('click', '#canvas', function() {",
'word = document.getElementById("wcSpan").innerHTML;',
"Shiny.onInputChange('selected_word_desc', word);",
"});"
))
)),
conditionalPanel("input.wc_cloudbar == false",
plotlyOutput("wordbar_desc")
),
sliderInput("wc_topn", "Top words shown:", min = 1, max = 100,step = 1,value = 30, width="60%"),
switchButton("wc_cloudbar", label = "", col = "BW", type = "CLBR",value = T)
)
),
tabPanel(title=icon("searchengin"),
textInput('pm_query', "Search for:"),
sliderInput('pm_year', "Paper publication range:",
min = 1900, max = as.numeric(format(Sys.Date(), '%Y')),
value = c(2000,as.numeric(format(Sys.Date(), '%Y'))),
step = 1,sep = ""
),
sliderInput("pm_max",
"Stop after ... papers:",
min = 1,
max = 1000,
value = 500),
shinyWidgets::circleButton("search_pubmed", icon = icon("search"), size = "sm"),
tabsetPanel(selected = 1,
tabPanel(title = icon("cloud"),
conditionalPanel("input.wc_cloudbar_pm == true",
tagList(
wordcloud2::wordcloud2Output("wordcloud_desc_pm"),
tags$script(HTML(
"$(document).on('click', '#canvas', function() {",
'word = document.getElementById("wcSpan").innerHTML;',
"Shiny.onInputChange('selected_word_desc_pm', word);",
"});"
))
)),
conditionalPanel("input.wc_cloudbar_pm == false",
plotlyOutput("wordbar_desc_pm")
),
sliderInput("wc_topn_pm", "Top words shown:", min = 1,
max = 100, step = 1,value = 30, width = "60%"),
switchButton("wc_cloudbar_pm", label = "", col = "BW", type = "CLBR",value = F)
),
tabPanel(title = icon("table"),
div(DT::dataTableOutput('pm_tab', width="100%"),
style='font-size:80%')
)
)
)
)))
),
# reverse searching
tabPanel(title="molecule > mz",
br(),
tags$i("Press the below button to browse compounds of the selected databases."),br(),
actionButton("browse_db", "Browse", icon=icon("eye")),
br(),
tabsetPanel(
tabPanel(NULL, icon = icon("database"),
wellPanel(id = "def",style = "overflow-y:scroll; max-height: 200px",
textOutput("browse_definition")),
div(DT::dataTableOutput('browse_tab'),style='font-size:80%'),
br(),
tags$i("Find m/z values matching adducts or isotopes of this compound."),br(),
actionButton("revsearch_mz", "Search", icon=icon("search"))
),
tabPanel(NULL, icon = icon("search-location"),
div(DT::dataTableOutput('hits_tab'),style='font-size:80%')
)
)
))
),
tabPanel("switch/subset", icon=icon("exchange")
,h2("Current experiment:")
,div(
sardine(h2(textOutput("curr_name"),style="padding:10px;")),
style="background-color:white;
height:55px;
width:115%;
position:relative;
right:30px;
border-top: 1px solid #DFDCDC;
border-bottom: 1px solid #DFDCDC;")
,hr()
,h2("Change variable of interest")
,selectInput("stats_var", label="Do statistics on:", choices = c("label"))
,shinyWidgets::circleButton("change_cls", icon = icon("hand-pointer-o"), size = "sm")
,fluidRow(column(12, align="center", uiOutput("timebutton")))
,hr()
,h2("Subset data")
,selectInput("subset_var", label="Subset data based on:", choices = c("label"))
,selectizeInput("subset_group", label="Group(s) in subset:", choices = c(), multiple=TRUE)
,shinyWidgets::circleButton("change_subset", icon = icon("hand-pointer-o"), size = "sm")
,shinyWidgets::circleButton("reset_subset", icon = icon("undo"), size = "sm")
),
# this tab is used to select user plot theme and user colours (discrete and continuous)
tabPanel("plot aesthetics", icon=icon("paint-brush"),
h2("Summary plot style"),br(),
selectizeInput("ggplot_sum_style", multiple=T, label = "Style(s)", choices = list("Box"="box",
"Violin"="violin",
"Beeswarm"="beeswarm",
"Scatterplot"="scatter"),
selected = c("violin")
),
selectInput("ggplot_sum_stats", label = "Stats shown", choices = list("median", "mean", "none")),
h2("Shape")
,selectInput("shape_var", label="Marker shape based on:", choices = c("label"))
,h2("Color")
,selectInput("col_var", label="Marker color based on:", choices = c("label"))
,h2("Hover text")
,selectInput("txt_var", label="Marker hover text based on:", choices = c("label")),
h2("Plot theme"),
selectInput("ggplot_theme", label = "Theme", choices = list("Grid, white bg"="bw",
"No grid, white bg"="classic",
"Grid, gray bg"="gray",
"Minimal"="min",
"Grid, black bg"="dark",
"Grid, white bg, gray axes"="light",
"Line drawing"="line"),
selected = opts$gtheme),
fluidRow(plotOutput("ggplot_theme_example",inline = F, width="100%")),
h2("Continuous data"),
# the below options need to match with the corresponding function storage in 'global'. if you want to add more it'll go here!
selectInput("color_ramp", label = "Color scheme", choices = list("RAINBOW!"="rb",
"Yellow - blue"="y2b",
"Matlab 1"="ml1",
"Matlab 2 "="ml2",
"Magenta - Green"="m2g",
"Cyan - yellow"="c2y",
"Blue - yellow"="b2y",
"Green - red"="g2r",
"Blue - green"="b2g",
"Blue - red"="b2r",
"Blue - pink (pastel)"="b2p",
"Blue - green - yellow"="bgy",
"Green - yellow - white"="gyw",
"Red - yellow - white"="ryw",
"Grayscale"="bw",
"Blues (brew)" = "Blues",
"Blue - green (brew)" = "BuGn",
"Blue - purple (brew)" = "BuPu",
"Green - blue (brew)" = "GnBu",
"Greens (brew)" = "Greens",
"Grayscale (brew)" = "Greys",
"Oranges (brew)" = "Oranges",
"Orange - red (brew)" = "OrRd",
"Purple - blue (brew)" = "PuBu",
"Purple - blue - green (brew)" = "PuBuGn",
"Purple - red (brew)" = "PuRd",
"Purples (brew)" = "Purples",
"Red - purple (brew)" = "RdPu",
"Reds (brew)" = "Reds",
"Yellow - green (brew)" = "YlGn",
"Yellow - green - blue (brew)" = "YlGnBu",
"Yellow - orange - brown (brew)" = "YlOrBr",
"Yellow - orange - red (brew)"="YlOrRd",
"BrBG", "PiYG", "PRGn", "PuOr", "RdBu", #TODO: add descriptions (or remove all?)
"RdGy", "RdYlBu", "RdYlGn", "Spectral",
"Accent", "Dark2", "Paired", "Pastel1",
"Pastel2", "Set1", "Set2", "Set3"),selected = opts$gspec
),
# preview plot
fluidRow(plotly::plotlyOutput("ramp_plot",inline = T, width="100%") %>% shinycssloaders::withSpinner()),
h2("Discrete data"),
uiOutput("colorPickers") # colour pickers generated in server.R. default settings taken from user_options.txt.
))
)
)),
# report tab
tabPanel("report",
icon = icon("file-invoice", class = "outlined"),
value="reportTab",
fluidRow(
column(width=12, align="center",
h2("Report"),
br(),
helpText("Report contents:"),
div(DT::dataTableOutput('report_unselected', width="100%"))
)#close column
)#close fluidrow
),#close tabpanel
# this tab is used to change general settings.
tabPanel("settings", icon = icon("cog",class = "outlined"), value="options",
navbarPage(inverse=TRUE,"Settings", id="tab_settings",
tabPanel("Mode", icon=icon("box-open"),
switchButton(inputId = "db_only", label = "Run in database-only mode?",
value = switch(opts$mode, dbonly=T, complete=F),
col = "BW", type = "YN")
),
tabPanel("Project", icon=icon("gift"),
#textInput(inputId="proj_name", label="Project name", value = ''),
selectizeInput(inputId="proj_name",
label="Project name",
choices=lcl$vectors$project_names, # existing projects in user folder (generated in 'global')
selected = opts$proj_name,
options=list(create = TRUE)), # let users add new names
actionButton("set_proj_name", label="Apply"),
helpText("This name will be used in all save files."),
textOutput("proj_name")
),
# change list of adducts used, or add your own
# TODO: fix, i think this is currently non-functional
tabPanel("Adducts", icon=icon("plus-square"),
h3("Current adduct table:"),
rhandsontable::rHandsontableOutput("adduct_tab", width=800, height=600),
shinySaveButton("save_adducts",
"Save changed table",
"Save file as ...",
filetype=list(RData="RData", csv="csv")
),
hr(),
fileInput("add_tab", "Import adduct table",
multiple = F,
accept = c(".RData", ".csv")),
sardine(actionButton("import_adducts", "Import", icon = icon("hand-peace-o"))),
sardine(imageOutput("adduct_upload_check",inline = T))
),
# change toolbar colour, text font and size
tabPanel("Aesthetic", icon=icon("child"),
h3("Change app settings"),
hr(),
h2("Navigation bar colours"),
colourpicker::colourInput(inputId = "bar.col.1",
label = paste("Active background"),
value = opts$col1,
allowTransparent = FALSE),
colourpicker::colourInput(inputId = "bar.col.2",
label = paste("Inactive background"),
value = opts$col2,
allowTransparent = FALSE),
colourpicker::colourInput(inputId = "bar.col.3",
label = paste("Active tab"),
value = opts$col3,
allowTransparent = FALSE),
colourpicker::colourInput(inputId = "bar.col.4",
label = paste("Inactive tab"),
value = opts$col4,
allowTransparent = FALSE),
br(),
h2("Fonts (Google fonts)"),
textInput(inputId="font.1", label="h1", value = opts$font1),
textInput(inputId="font.2", label="h2", value = opts$font2),
textInput(inputId="font.3", label="h3", value = opts$font3),
textInput(inputId="font.4", label="body", value = opts$font4),
br(), # TODO: font size modifier slider
h2("Font size"),
sliderInput("size.1", label="h1", value=as.numeric(opts$size1),min = 5, max=50),
sliderInput("size.2", label="h2", value=as.numeric(opts$size2),min = 5, max=50),
sliderInput("size.3", label="h3", value=as.numeric(opts$size3),min = 5, max=50),
sliderInput("size.4", label="body", value=as.numeric(opts$size4),min = 5, max=50),
br(),
h3("Taskbar image"),
div(imageOutput("taskbar_image",inline = T)),
shinyFilesButton('taskbar_image_path',
'Select image',
'Please select an image file',
FALSE),
hr(),
actionButton("change_css", "Save settings (restart to apply)") # need to reload CSS to enable new settings
)
)
),
# prompt user on opening the quit tab.
# TODO: add 'save project?' dialog
#tabPanel(title = "", value="stop", icon = icon("times-circle",class = "outlined")),
div(class="spinnylocation1",
div(class="plus", img(class="imagetop", src=opts$taskbar_image, width="100px", height="100px")),
div(class="minus", img(class="imagebottom", src=opts$taskbar_image, width="100px", height="100px"))
),
div(class="line")
,footer=fluidRow(
br(),br(),br(),
div(
#actionButton("show_window", label="", icon = icon("map-marked")),
actionButton("load_mset", label="load", icon = icon("folder-open"),style=gsubfn::fn$paste("background-color:$bgcol; border-color:$bgcol;")),
actionButton("save_mset", label="save", icon = icon("save"),style=gsubfn::fn$paste("background-color:$bgcol; border-color:$bgcol;")),
actionButton("debug", label="debug", icon = icon("bug"),style=gsubfn::fn$paste("background-color:$bgcol; border-color:$bgcol;"))
, style=gsubfn::fn$paste("position:fixed;bottom:0;width:100%;height:40px;z-index:1005;background-color:$bgcol;border-style:solid; border-color:black;border-width:1px;")),
align="center")
)
)
removeModal()
return(html)
}
})
|
/backend/scripts/reactive/loginUi.R
|
permissive
|
mwang87/MetaboShiny
|
R
| false
| false
| 121,735
|
r
|
output$currUI <- renderUI({
if(logged$status == "notlogged"){
fluidRow(align="center",
br(),br(),br(),br(),br(),
imageOutput("login_header",inline = T),
textInput("username", "username:"),
passwordInput("password", "password:"),
tags$style(type="text/css", "#string { height: 50px; width: 100%; text-align:center;
font-size: 30px; display: block;}"),
shinyWidgets::circleButton("login", icon = icon("arrow-right")),
br(),br(),
div(style="width:300px;",verbatimTextOutput("login_status", placeholder = FALSE)))
}else if(logged$status == "logged"){
print("rendering...")
# read settings
opts <- getOptions(lcl$paths$opt.loc)
# generate CSS for the interface based on user settings for colours, fonts etc.
bar.css <<- nav.bar.css(opts$col1, opts$col2, opts$col3, opts$col4)
font.css <<- app.font.css(opts$font1, opts$font2, opts$font3, opts$font4,
opts$size1, opts$size2, opts$size3, opts$size4, online=online)
# === GOOGLE FONT SUPPORT FOR GGPLOT2 ===
online = internetWorks()
# Download a webfont
if(online){
lapply(c(opts[grepl(pattern = "font", names(opts))]), function(font){
try({
sysfonts::font_add_google(name = font,
family = font,
regular.wt = 400,
bold.wt = 700)
})
})
}
# set taskbar image as set in options
taskbar_image <- opts$task_img
# parse color opts
lcl$aes$mycols <<- get.col.map(lcl$paths$opt.loc) # colours for discrete sets, like group A vs group B etc.
lcl$aes$theme <<- opts$gtheme # gradient function for heatmaps, volcano plot etc.
lcl$aes$spectrum <<- opts$gspec # gradient function for heatmaps, volcano plot etc.
# load existing file
bgcol <<- opts$col1
# - - load custom dbs - -
# load in custom databases
has.customs <- dir.exists(file.path(lcl$paths$db_dir, "custom"))
if(has.customs){
customs = list.files(path = file.path(lcl$paths$db_dir, "custom"),
pattern = "\\.RData")
dbnames = unique(tools::file_path_sans_ext(customs))
for(db in dbnames){
# add name to global
dblist <- gbl$vectors$db_list
dblist <- dblist[-which(dblist == "custom")]
if(!(db %in% dblist)){
dblist <- c(dblist, db, "custom")
gbl$vectors$db_list <- dblist
}
metadata.path <- file.path(lcl$paths$db_dir, "custom", paste0(db, ".RData"))
load(metadata.path)
# add description to global
gbl$constants$db.build.info[[db]] <- meta.dbpage
# add image to global
maxi = length(gbl$constants$images)
gbl$constants$images[[maxi + 1]] <- meta.img
}
}
# init stuff that depends on opts file
lcl$proj_name <<- opts$proj_name
lcl$paths$patdb <<- file.path(opts$work_dir, paste0(opts$proj_name, ".db"))
lcl$paths$csv_loc <<- file.path(opts$work_dir, paste0(opts$proj_name, ".csv"))
lcl$texts <<- list(
list(name='curr_exp_dir',text=lcl$paths$work_dir),
list(name='curr_db_dir',text=lcl$paths$db_dir),
list(name='ppm',text=opts$ppm),
list(name='proj_name',text=opts$proj_name)
)
lcl$vectors$project_names <<- unique(gsub(list.files(opts$work_dir,pattern = "\\.csv"),pattern = "(_no_out\\.csv)|(\\.csv)", replacement=""))
updateSelectizeInput(session,
"proj_name",
choices = lcl$vectors$proj_names,
selected = opts$proj_name)
lapply(lcl$texts, FUN=function(default){
output[[default$name]] = renderText(default$text)
})
lcl$aes$font <- list(family = opts$font4,
ax.num.size = 11,
ax.txt.size = 15,
ann.size = 20,
title.size = 25)
# other default stuff that needs opts
library(showtext)
online = internetWorks()
# create color pickers based on amount of colours allowed in global
output$colorPickers <- renderUI({
lapply(c(1:gbl$constants$max.cols), function(i) {
colourpicker::colourInput(inputId = paste("col", i, sep="_"),
label = paste("Choose colour", i),
value = lcl$aes$mycols[i],
allowTransparent = F)
})
})
# create color1, color2 etc variables to use in plotting functions
# and update when colours picked change
observe({
values <- unlist(lapply(c(1:gbl$constants$max.cols), function(i) {
input[[paste("col", i, sep="_")]]
}))
if(!any(is.null(values))){
if(lcl$paths$opt.loc != ""){
set.col.map(optionfile = lcl$paths$opt.loc, values)
lcl$aes$mycols <<- values
}
}
})
updateSelectInput(session, "ggplot_theme", selected = opts$gtheme)
updateSelectInput(session, "color_ramp", selected = opts$gspec)
html = tagList(
tags$style(type="text/css", bar.css),
tags$script(src="spinnytitle.js"),
tags$script(src="sparkle.js"),
# - - - - - - - - -
navbarPage(windowTitle='MetaboShiny',
inverse=TRUE,
# use this for title
# https://codepen.io/maxspeicher/pen/zrVKLE
title=div(h1("MetaboShiny"), class="outlined", tags$style(type="text/css", font.css), id="sparkley"), # make it use the sparkle.js for unnecessary sparkle effects ;)
id="nav_general",
# this tab shows the available databases, if they are installed, and buttons to install them. generated as output$db_build_ui in 'server'
tabPanel("database", icon = icon("database",class = "outlined"), value="database",
fluidRow(align="center", bsCollapse(bsCollapsePanel(title=h2("Settings"), style="info",
sliderInput(inputId = "db_mz_range", label = "What mass range can your machine detect?",
min = 0, max = 3000, value = c(60, 600),
step = 1,post = "m/z",dragRange = TRUE),
tags$i("Warning: increasing the upper m/z boundary may drastically increase database build times
- calculating isotopes is more time-consuming for larger molecules!"),
radioButtons("db_build_mode", label = "Build base database, extended (isotopes+adducts) or both?",
choices = c("base","extended","both"),selected = "extended"),
tags$i("For example: if you only defined a new adduct, pick 'extended' as the source database doesn't change.")
)
)),
uiOutput("db_build_ui") #%>% shinycssloaders::withSpinner() # see server, is autogenerated now
),
tabPanel("data import", icon = icon("upload", class = "outlined"),
fluidRow(column(12, align="center",
textInput("proj_name_new", label = "STEP 1: What is your project name?", value = lcl$proj_name),
sliderInput("ppm", "STEP 2: What level accuracy is your mass spectrometer?",min = 0.1,max = 50,value = 5,step = .1))),
hr(),
fluidRow(column(3, align="center",
imageOutput("merge_icon",inline = T),
radioButtons("importmode", label = "",
choices = list("Peaks are in a .db file"="db", "Peaks are in two .csv files (pos/neg mode)"="csv"),
selected = "db"),
tags$b("STEP 3: Click buttons to load data."),
shinyFilesButton('metadata', 'metadata', 'Select metadata in csv/xls(x)', FALSE),
conditionalPanel(condition = "input.importmode == 'db'",
shinyFilesButton('database', 'database', 'Select .db file', FALSE)
),
conditionalPanel(condition = "input.importmode == 'csv'",
shinyFilesButton('outlist_pos', '+ peaks', 'Select .csv for - mode peaks', FALSE),
shinyFilesButton('outlist_neg', '- peaks', 'Select .csv for + mode peaks', FALSE)
)
)
,column(2, align="center", #ok
tags$b("STEP 4: Merge data and metadata"),br(),br(),
shinyWidgets::circleButton("create_db", icon = icon("long-arrow-alt-right", class = "fa-2x"), size = "lg"))
,column(2, align="center", # issue lol
imageOutput("db_icon")
)
,column(2, align="center",
tags$b("STEP 5: Convert to input-ready format"),
br(),br(),
shinyWidgets::circleButton("create_csv", icon = icon("long-arrow-alt-right", class = "fa-2x"), size = "lg"))
,column(3, align="center",
imageOutput("laptop_icon", inline=T),br(),br(),
div(DT::dataTableOutput('csv_tab'),style='font-size:80%')
)
),
fluidRow(column(3, align="center",
tags$i("Input files chosen?"),br(),br(),
imageOutput("proj_merge_check")
),
column(2, align="center",
tags$i("Database present?"),br(),br(),
imageOutput("proj_db_check"),offset = 2),
column(3, align="center",
tags$i("Final table present?"),br(),br(),
imageOutput("proj_csv_check", inline=T),br(),br(),
tags$b("STEP 6: If "), icon("check-circle"), tags$b(" continue to normalization"),
offset = 2))
),
# this tab is used to perform normalization of your data. settings are processed as input$filt_type etc. in 'server'.
tabPanel("normalize", icon = icon("shower",class = "outlined"), value="filter",
fluidRow(column(3, aligh="center",
selectInput('samp_var', 'Which variable represents sample amount/concentration?', choices = c("")), #TODO: only show this when normalize by sample specific factor (specnorm) is selected
selectizeInput('batch_var', 'What are your batch variables?', choices = c("batch"), multiple=TRUE, options = list(maxItems = 2L)),
actionButton("check_csv", "Get options", icon=icon("refresh")),
hr(),
shinyWidgets::sliderTextInput("perc_limit","Max. missing feature percent:",
choices=c(0, 0.0001, 0.001, 0.01, 0.1, seq(1, 100, 1)),
selected=1, grid = T),
selectInput('filt_type', 'How will you filter your m/z values?', choices = list("Interquantile range"="iqr",
"Relative stdev"="rsd",
"Non-parametric relative stdev"="nrsd",
"Mean"="mean",
"Standard deviation"="sd",
"Median absolute deviation"="mad",
"Median"="median",
"None"="none"),
selected = "none"),
selectInput('norm_type', 'What type of normalization do you want to do?', choices = list("Quantile normalization"="QuantileNorm",
"By reference feature"="ProbNorm",
"By reference compound"="CompNorm",
"By sample specific factor"="SpecNorm",
"Sum"="SumNorm",
"Median"="MedianNorm",
"None"="NULL")),
uiOutput("ref_select"),
selectInput('trans_type', 'How will you transform your data?', choices = list("Log transform"="LogNorm",
"Cubic root transform"="CrNorm",
"None"="NULL")),
selectInput('scale_type', 'How will you scale your data?', choices = list("Autoscale/Z-transform"="AutoNorm",
"Mean-center"="MeanCenter",
"Pareto Scaling"="ParetoNorm",
"Range scaling"="RangeNorm",
"None"="NULL")),
selectInput('miss_type', 'How to deal with missing values?', choices = list("Half feature minimum"="colmin",
"Half sample minimum"="rowmin",
"Total minimum"="min",
"Random forest"="rf",
#"Impute w/ regression"="regr",
"KNN imputation"="knn",
"SVD imputation"="svdImpute",
"BPCA imputation"="bpca",
"PPCA imputation"="ppca",
"Median"="median",
"Mean"="mean",
"Leave them out"="exclude",
"Leave them alone"="none"),
selected = "knn"),
conditionalPanel("input.miss_type == 'rf'",
sliderInput("rf_norm_ntree", label = "Trees built per variable", value = 10, min = 1, max = 50, step=1),
#numericInput("rf_norm_mtry", label = "Trees built per variable", value = 10, min = 1, max = 50)
radioButtons("rf_norm_parallel", label = "Parallelize?", choices = list("no",
"forests",
"variables"),
selected = "variables")
),
# - - - - - -
switchButton(inputId = "remove_outliers",
label = "Exclude outliers?",
value = FALSE, col = "BW", type = "YN"),
actionButton("initialize", "Go", icon=icon("hand-o-right")),
hr(),
imageOutput("dataset_icon",inline = T),
fileInput("pat_dataset", "Import dataset",
multiple = F,
accept = c(".RData")),
actionButton("import_dataset", "Import", icon = icon("hand-peace-o")),
imageOutput("dataset_upload_check",inline = T)
), column(9,
# show the summary plots post-normalization
navbarPage(inverse=F,h3("explore"),
tabPanel("m/z values",# icon=icon("braille"),
fluidRow(column(6,plotOutput("var1",height='300px')),
column(6,plotOutput("var3", height='300px'))
),
fluidRow(column(6,plotOutput("var2", height='500px')),
column(6,plotOutput("var4", height='500px')))
),
tabPanel("samples",# icon=icon("tint"),
fluidRow(column(6,plotOutput("samp1",height='300px')),
column(6,plotOutput("samp3", height='300px'))
),
fluidRow(column(6,plotOutput("samp2", height='500px')),
column(6,plotOutput("samp4", height='500px')))
)
)
)
)),
tabPanel("prematch", icon = icon("search", class = "outlined"), value = "prematch",
# - - - pre-matching part - - -
fluidRow(align="center",
switchButton(inputId = "do_prematch",
label = "Do matching beforehand?",
col = "BW",
type = "YN"),
tags$i("All m/z values will be searched in the databases of choice and the results will be saved to your save file for fast access."),br(),
tags$i("Search results can still be overridden by manual searching. Don't forget to save after!")),
br(),
fluidRow(align="center",
column(2),
column(8, conditionalPanel("input.do_prematch == true",
h2("Included databases:"),
uiOutput("db_prematch_select"),
shinyWidgets::circleButton("select_db_prematch_all",
icon = icon("shopping-cart"),
size = "default")),
hr(),
fluidRow(column(6,h2("Find matches"),
shinyWidgets::circleButton(inputId = "prematch",
icon = icon("searchengin"))),
column(6,h2("Clear matches"),
shinyWidgets::circleButton(inputId = "clear_prematch",
icon = icon("trash"))))
),
column(2)
)),
# this tab is the main analysis tab. all tabs for all analyses are listed here, but the visibility is changed depending on the current experiment
tabPanel("analyse", icon = icon("bar-chart",class = "outlined"), value = "analysis",
sidebarLayout(position="right",
mainPanel = mainPanel(width = 8,
tabsetPanel(id="statistics",selected = "pca",
#navbarPage(inverse=F, "", id="statistics", selected = "pca", collapsible = T,
# TODO: T-SNE
# this tab shows general information, mostly a message with 'please give me some data' :-)
tabPanel(icon("star"), value = "inf",
fluidRow(column(width=12, align="center",
br(),br(),br(),br(),
#hr(),
#icon("arrow-right","fa-lg"), icon("arrow-right","fa-lg"), icon("arrow-right","fa-lg"),
br(),br(),
h2("Please select a variable of interest in the sidebar!"),br(),
icon("exchange", "fa-4x"),
br(),br(),br()
#hr()
#icon("arrow-right","fa-lg"), icon("arrow-right","fa-lg"), icon("arrow-right","fa-lg")
))),
tabPanel("dimension reduction", value = "dimred", icon=icon("cube"),
navbarPage(inverse=T, icon("cube"), id = "dimred",
# loading this tab performs PCA. summary and loading tables, alongside a 2d/3d PCA plot, are available here.
tabPanel("pca", value = "pca", #icon=icon("cube"),
fluidRow(align="center",column(12,plotly::plotlyOutput("plot_pca",height = "600px", width="600px"))),#%>% shinycssloaders::withSpinner())),
fluidRow(align="center",column(12,
switchButton("pca_2d3d", label = "", col = "BW", type = "2d3d", value=T))),
hr(),
fluidRow(column(3,
selectInput("pca_x", label = "X axis:", choices = paste0("PC",1:20),selected = "PC1",width="100%"),
selectInput("pca_y", label = "Y axis:", choices = paste0("PC",1:20),selected = "PC2",width="100%"),
selectInput("pca_z", label = "Z axis:", choices = paste0("PC",1:20),selected = "PC3",width="100%")),
column(9,
tabsetPanel(id="pca_2",
tabPanel(title="Table",
div(DT::dataTableOutput('pca_tab',width="100%"),style='font-size:80%')),
tabPanel(title="Scree",
plotOutput("pca_scree", width = "100%", height="250px")
),
tabPanel(title="Loadings",
div(DT::dataTableOutput('pca_load_tab',width="100%"),style='font-size:80%'))
))
)
),
# TODO: enable the sparse and orthogonal PLS-DA options in metaboanalystR
# this tab is used to perform pls-da. it triggers on 'go' button as it is a time costly analysis.
tabPanel("pls-da", value = "plsda",
fluidRow(align="center",column(12,plotly::plotlyOutput("plot_plsda",height = "500px", width="500px"))),
fluidRow(align="center",column(12,
switchButton("plsda_2d3d", label = "", col = "BW", type = "2d3d"))),
hr(),
fluidRow(column(3,
div(style="display:inline-block",
selectInput("plsda_type",
label="Type:",
choices=list("Normal"="normal")
#,
# "Orthogonal"="ortho",
# "Sparse"="sparse")
,width = '100px',
selected=1)),
div(style="display:inline-block",
shinyWidgets::circleButton("do_plsda", icon = icon("hand-pointer-o"), size = "sm")
),
selectInput("plsda_x", label = "X axis:", choices = paste0("PC",1:8),selected = "PC1",width="100%"),
selectInput("plsda_y", label = "Y axis:", choices = paste0("PC",1:8),selected = "PC2",width="100%"),
selectInput("plsda_z", label = "Z axis:", choices = paste0("PC",1:8),selected = "PC3",width="100%")),
column(9,
tabsetPanel(id="plsda_2",
tabPanel(title="Cross-validation",
plotOutput("plsda_cv_plot")),
tabPanel(title="Permutation",
plotOutput("plsda_perm_plot")),
tabPanel(title="Table",
div(DT::dataTableOutput('plsda_tab',width="100%"),style='font-size:80%')),
tabPanel(title="Loadings",
div(DT::dataTableOutput('plsda_load_tab',width="100%"),style='font-size:80%'))
))
)
),
tabPanel("t-sne", value = "tsne",
helpText("working on it")
)
)),
tabPanel("per m/z", value = "permz", icon=icon("fingerprint"),
navbarPage(inverse=T, icon("fingerprint"), id = "permz",
tabPanel("t-test", value="tt",
fluidRow(plotly::plotlyOutput('tt_specific_plot',width="100%")),
fluidRow(align="center",
sardine(switchButton("tt_nonpar", "Non-parametric?", col="BW", type="YN", value = T)),
#sardine(uiOutput("tt_parbutton")),
sardine(switchButton("tt_eqvar", "Equal variance?", col="BW", type="YN", value = T))
),
navbarPage(inverse=F,"",
tabPanel("", icon=icon("table"),
div(DT::dataTableOutput('tt_tab',width="100%"),style='font-size:80%'))
,tabPanel("", icon=icon("area-chart"),
plotly::plotlyOutput('tt_overview_plot',height="300px") %>% shinycssloaders::withSpinner()
)
)),
tabPanel("anova", value="aov",
fluidRow(plotly::plotlyOutput('aov_specific_plot',width="100%")),
navbarPage(inverse=F,"",
tabPanel("", icon=icon("table"),
div(DT::dataTableOutput('aov_tab',width="100%"),style='font-size:80%'))
,tabPanel("", icon=icon("area-chart"),
plotly::plotlyOutput('aov_overview_plot',height="300px") %>% shinycssloaders::withSpinner()
)
)),
tabPanel("fold-change", value="fc",
fluidRow(plotly::plotlyOutput('fc_specific_plot',width="100%")),
navbarPage(inverse=F,"",
tabPanel("", icon=icon("table"),
div(DT::dataTableOutput('fc_tab',width="100%"),style='font-size:80%'))
,tabPanel("", icon=icon("area-chart"),
plotly::plotlyOutput('fc_overview_plot',height="300px") %>% shinycssloaders::withSpinner()
))
),
tabPanel("meba", value="meba",
fluidRow(plotly::plotlyOutput('meba_specific_plot',height="600px")),
fluidRow(div(DT::dataTableOutput('meba_tab', width="100%"),style='font-size:80%'))
),
tabPanel("asca", value="asca",
fluidRow(plotly::plotlyOutput('asca_specific_plot', height="600px")),
fluidRow(div(DT::dataTableOutput('asca_tab',width="100%"),style='font-size:80%'))
)
)
),
tabPanel("overview analyses", value = "overview", icon=icon("globe"),
navbarPage(inverse=T, icon("globe"), id = "overview",
tabPanel("volcano plot", value="volc",
fluidRow(plotly::plotlyOutput('volc_plot',width="100%",height="600px") %>% shinycssloaders::withSpinner()),
fluidRow(div(DT::dataTableOutput('volc_tab',width="100%"),style='font-size:80%'))
),
tabPanel("heatmap", value="heatmap",
plotly::plotlyOutput("heatmap",width="100%",height="700px") %>% shinycssloaders::withSpinner(),
br(),
fluidRow(column(align="center",
width=12,
sliderInput("heatmap_topn", "Use top ... from table:", value=100, min = 10, max = 200))
),
fluidRow(column(align="center",
width=12,
uiOutput("heatbutton"),
switchButton("heatsign", label = "Only significant hits?", col = "GB", type = "YN"),
switchButton("heatlimits", label = "Color based on -all- metabolites?", col = "GB", type = "YN")
))
),
# this tab is used to find overlapping features of interest between analyses
# TODO: enable this with multiple saved mSets in mSet$storage
tabPanel(title="venn", value="venn", #icon=icon("comments"),
sidebarLayout(position = "left",
sidebarPanel = sidebarPanel(
fluidRow(div(DT::dataTableOutput('venn_unselected'),style='font-size:80%'), align="center"),
fluidRow(shinyWidgets::circleButton("venn_add", icon=icon("arrow-down"), size="sm"),
shinyWidgets::circleButton("venn_remove", icon=icon("arrow-up"), size="sm"),
align="center"),
fluidRow(div(DT::dataTableOutput('venn_selected'),style='font-size:80%'),align="center"),
hr(),
fluidRow(
sliderInput("venn_tophits", label = "Only include top:", min = 1, max = 200, post = " hits", value=20)
,align="center"),
fluidRow(
shinyWidgets::circleButton("venn_build", icon=icon("hand-pointer-o"),size="default")
,align="center")
),
mainPanel = mainPanel(
hr(),
plotOutput("venn_plot",inline = F),
# find the overlapping compounds between the groups you want to compare (user select)
# TODO: enable this with clicking the numbers/areas
fluidRow(selectInput("intersect_venn", label = "Show hits from (only):", selected = 1,choices = "",multiple = T),
align="center"),
fluidRow(uiOutput("venn_pval"), align="center"),
br(),
fluidRow(div(DT::dataTableOutput('venn_tab'),style='font-size:80%'),
align="center")
))
)
)
),
# this tab enables machine learning
tabPanel("machine learning", value = "ml", icon=icon("signature"),
br(),
navbarPage(inverse=F, icon("signature"), id = "ml",
tabPanel("initialize", value="init",
fluidRow(
column(width=3,align="center",
selectInput("ml_perf_metr", label=h2("Performance metric"),
choices = c("boot", "boot632", "optimism_boot",
"boot_all", "cv", "repeatedcv",
"LOOCV", "LGOCV", "none", "oob",
"timeslice", "addaptive_cv", "adaptive_boot",
"adaptive_LGOCV"),
multiple = F, selected = "repeatedcv"),
sliderInput("ml_train_perc",
label = h2("Percentage in training"),
min = 1,
max = 100,
step = 1,
value = 60,
post = "%"),
selectInput("ml_folds", label=h2("Fold CV"),choices = c("5",
"10",
"20",
"50",
"LOOCV"),
multiple = F),
sliderInput("ml_attempts",
label = "Attempts",
min = 1,
max = 100,
step = 1,
value = 20,
post = "x")
),
column(width=6,align="center",
selectInput("ml_method",
label = h2("Used algorithm"),
selected = "glmnet",
choices = {
lst = as.list(gbl$constants$ml.models)
# names(lst) <- sapply(gbl$constants$ml.models, function(mdl) caret.mdls[[mdl]]$label)
lst
},
multiple = F),
div(uiOutput("ml_params"), style = "font-size:60%"),
selectizeInput("ml_preproc", label = h2("Data reprocessing"),
choices = c("center", "scale"),
selected = c("center", "scale"), multiple=T),
shinyWidgets::circleButton("do_ml",
icon = h3(paste("Go"),
icon("hand-pointer-o", "fa-lg")),
status = "default",
size = "lg")
),
column(width=3,align="center",
fluidRow(textOutput("ml_train_ss"),
actionButton("ml_train_ss", label = "train on:", icon = icon("arrow-up"))),
fluidRow(textOutput("ml_test_ss"),
actionButton("ml_test_ss", label = "test on:", icon = icon("arrow-up"))),
br(),
textInput("ml_name", label=h3("Name:"), value = "all"))
)
),
tabPanel("results", value="res", icon=icon("poll"),
br(),
div(selectInput("show_which_ml", label = "Plot which model?", choices = c())),
navbarPage(title=icon("poll"),id="ml_results",inverse=F,
tabPanel(title = "roc",value = "roc",icon=icon("area-chart"),
plotlyOutput("ml_roc",height = "600px"),
div(DT::dataTableOutput("ml_tab",width="100%"),style='font-size:80%')),
tabPanel("importance",value= "bar",icon=icon("star"),
fluidRow(plotlyOutput("ml_bar", width = "100%", height="600px")),
fluidRow(
column(12, sliderInput("ml_top_x",
label = "Show top:",
min = 10,
max = 200,
step=10,
value=20), align="center")
)
)
)
)
)
)
)
),
# this is the sidebar that shows in the analysis tab. contains a lot of settings on the current variable of interest, plot themes and colours, and venn diagrams.
sidebarPanel =
sidebarPanel(align="center",width = 4,
tabsetPanel(id = "anal_sidebar", selected="switch/subset",#type = "pills",
tabPanel(title="export",icon=icon("file"),
radioButtons("export_format", "Which format do you want to export plots to?",
choices = list(".svg", ".eps", ".png", ".jpeg", ".pdf")),
shinyWidgets::circleButton("export_plot", icon=icon("hand-o-up"))),
tabPanel(title="search", icon=icon("search"),
br(),
bsCollapse(bsCollapsePanel(title=h2("Settings"), style="info",
tabsetPanel(id="tab_iden_1", selected = "start",
# forward searching
tabPanel(title=icon("database"),value="start",
uiOutput("db_search_select"),
div(id = "curly-brace", div(id = "left", class = "brace"),
div(id = "right", class = "brace")),
br(),br(),
shinyWidgets::circleButton("select_db_all",
icon = icon("shopping-cart"),
size = "default") # icon("fingerprint"), size = "sm")
), # clicky buttons for database selection; this is generated in 'server'
tabPanel(title=icon("chart-bar"),
plotly::plotlyOutput("curr_plot", height="300px", width="100%") %>% shinycssloaders::withSpinner()
),
tabPanel(title=icon("magic"),
h2("MagicBall settings"),
fluidRow(align="center",switchButton(inputId = "magicball_pubchem_cids",
label = "Check PubChem for predicted formulas?",
col = "BW", type = "YN", value = F)
),
fluidRow(align="center",switchButton(inputId = "magicball_pubchem_details",
label = "Get detailed PubChem matches? (SLOW!)",
col = "BW", type = "YN", value = F)
),
fluidRow(align="center", helpText("Considered adducts:")),
fluidRow(div(DT::dataTableOutput('magicball_add_tab'),style='font-size:100%'),
align="center")
),
tabPanel(title=icon("star-half-alt"),
selectInput("iso_score_method",
"Which method used to score compounds of same weight?",
selected="mscore",
choices=list("M-score"="mscore"
#"Chi-square"="chisq",
#"Mean absolute percentage error"="mape",
#"SIRIUS"="sirius",
#"Network-based"="network"
)),
sliderInput("int_prec", label = "Intensity imprecision", min = 1, max = 100, value = 2, post = "%"),
shinyWidgets::circleButton("score_iso", icon = icon("award"), size = "sm") # icon("fingerprint"), size = "sm")
)
))),
tags$i("Click the detective to search the selected databases for this m/z value."),br(),
fluidRow(
tags$button(
id = "search_mz",
class = "btn btn-default action-button",
img(src = "detective.png",
height = "50px")
),
div(
sardine(div(icon("paw","fa-xs fa-rotate-90"),
style="position:relative;
top:10px;")),
sardine(div(icon("paw","fa-xs fa-rotate-90"),
style="position:relative;
top:25px;")),
sardine(div(icon("paw","fa-xs fa-rotate-90"),
style="position:relative;
top:10px;")),
sardine(h2(textOutput("curr_mz"),style="padding:10px;")),
sardine(div(icon("paw","fa-xs fa-rotate-90"),
style="position:relative;
top:10px;")),
sardine(div(icon("paw","fa-xs fa-rotate-90"),
style="position:relative;
top:25px;")),
sardine(div(icon("paw","fa-xs fa-rotate-90"),
style="position:relative;
top:10px;")),
style="background-color:white;
height:55px;
width:115%;
position:relative;
right:30px;
border-top: 1px solid #DFDCDC;
border-bottom: 1px solid #DFDCDC;")
),
tabsetPanel(id="tab_iden_2",
# forward searching
tabPanel(title="mz > molecule",
hr(),
bsCollapse(bsCollapsePanel(title=h2("Compound info"), style="warning",
tabsetPanel(id="tab_iden_3",
tabPanel(title=icon("atlas"),
wellPanel(id = "def",style = "overflow-y:scroll; max-height: 200px",
textOutput("curr_definition"))
),
tabPanel(title=icon("atom"),
textOutput("curr_formula"),
plotOutput("curr_struct", height="300px")
)
))),
bsCollapse(bsCollapsePanel(title=h2("Search results"), style="error",
tabsetPanel(id="tab_iden_4",selected = "start",
# forward searching
tabPanel(title=icon("table"), value="start",
div(DT::dataTableOutput('match_tab', width="100%"),style='font-size:80%'),
hr(),
fluidRow(
switchButton(inputId = "auto_copy",
label = "Auto-copy name to clipboard??",
value = TRUE, col = "GB", type = "YN"),
align="center"),
helpText("Undo filtering"),
fluidRow(
shinyWidgets::circleButton("undo_match_filt", icon = icon("undo-alt"), size = "sm") # icon("fingerprint"), size = "sm")
)
),
tabPanel(title=icon("database"), value="pie_db",
fluidRow(align = "center",
plotly::plotlyOutput("match_pie_db") %>% shinycssloaders::withSpinner()
)
),
tabPanel(title=icon("plus"), value = "pie_add",
fluidRow(align = "center",
plotly::plotlyOutput("match_pie_add") %>% shinycssloaders::withSpinner()
)
),
tabPanel(title=icon("percentage"), value = "pie_iso",
fluidRow(align = "center",
plotly::plotlyOutput("match_pie_iso") %>% shinycssloaders::withSpinner()
)
),
tabPanel(title=icon("cloud"), value = "word_cloud",
fluidRow(align = "center",
conditionalPanel("input.wc_cloudbar == true",
tagList(
wordcloud2::wordcloud2Output("wordcloud_desc"),
tags$script(HTML(
"$(document).on('click', '#canvas', function() {",
'word = document.getElementById("wcSpan").innerHTML;',
"Shiny.onInputChange('selected_word_desc', word);",
"});"
))
)),
conditionalPanel("input.wc_cloudbar == false",
plotlyOutput("wordbar_desc")
),
sliderInput("wc_topn", "Top words shown:", min = 1, max = 100,step = 1,value = 30, width="60%"),
switchButton("wc_cloudbar", label = "", col = "BW", type = "CLBR",value = T)
)
),
tabPanel(title=icon("searchengin"),
textInput('pm_query', "Search for:"),
sliderInput('pm_year', "Paper publication range:",
min = 1900, max = as.numeric(format(Sys.Date(), '%Y')),
value = c(2000,as.numeric(format(Sys.Date(), '%Y'))),
step = 1,sep = ""
),
sliderInput("pm_max",
"Stop after ... papers:",
min = 1,
max = 1000,
value = 500),
shinyWidgets::circleButton("search_pubmed", icon = icon("search"), size = "sm"),
tabsetPanel(selected = 1,
tabPanel(title = icon("cloud"),
conditionalPanel("input.wc_cloudbar_pm == true",
tagList(
wordcloud2::wordcloud2Output("wordcloud_desc_pm"),
tags$script(HTML(
"$(document).on('click', '#canvas', function() {",
'word = document.getElementById("wcSpan").innerHTML;',
"Shiny.onInputChange('selected_word_desc_pm', word);",
"});"
))
)),
conditionalPanel("input.wc_cloudbar_pm == false",
plotlyOutput("wordbar_desc_pm")
),
sliderInput("wc_topn_pm", "Top words shown:", min = 1,
max = 100, step = 1,value = 30, width = "60%"),
switchButton("wc_cloudbar_pm", label = "", col = "BW", type = "CLBR",value = F)
),
tabPanel(title = icon("table"),
div(DT::dataTableOutput('pm_tab', width="100%"),
style='font-size:80%')
)
)
)
)))
),
# reverse searching
tabPanel(title="molecule > mz",
br(),
tags$i("Press the below button to browse compounds of the selected databases."),br(),
actionButton("browse_db", "Browse", icon=icon("eye")),
br(),
tabsetPanel(
tabPanel(NULL, icon = icon("database"),
wellPanel(id = "def",style = "overflow-y:scroll; max-height: 200px",
textOutput("browse_definition")),
div(DT::dataTableOutput('browse_tab'),style='font-size:80%'),
br(),
tags$i("Find m/z values matching adducts or isotopes of this compound."),br(),
actionButton("revsearch_mz", "Search", icon=icon("search"))
),
tabPanel(NULL, icon = icon("search-location"),
div(DT::dataTableOutput('hits_tab'),style='font-size:80%')
)
)
))
),
tabPanel("switch/subset", icon=icon("exchange")
,h2("Current experiment:")
,div(
sardine(h2(textOutput("curr_name"),style="padding:10px;")),
style="background-color:white;
height:55px;
width:115%;
position:relative;
right:30px;
border-top: 1px solid #DFDCDC;
border-bottom: 1px solid #DFDCDC;")
,hr()
,h2("Change variable of interest")
,selectInput("stats_var", label="Do statistics on:", choices = c("label"))
,shinyWidgets::circleButton("change_cls", icon = icon("hand-pointer-o"), size = "sm")
,fluidRow(column(12, align="center", uiOutput("timebutton")))
,hr()
,h2("Subset data")
,selectInput("subset_var", label="Subset data based on:", choices = c("label"))
,selectizeInput("subset_group", label="Group(s) in subset:", choices = c(), multiple=TRUE)
,shinyWidgets::circleButton("change_subset", icon = icon("hand-pointer-o"), size = "sm")
,shinyWidgets::circleButton("reset_subset", icon = icon("undo"), size = "sm")
),
# this tab is used to select user plot theme and user colours (discrete and continuous)
tabPanel("plot aesthetics", icon=icon("paint-brush"),
h2("Summary plot style"),br(),
selectizeInput("ggplot_sum_style", multiple=T, label = "Style(s)", choices = list("Box"="box",
"Violin"="violin",
"Beeswarm"="beeswarm",
"Scatterplot"="scatter"),
selected = c("violin")
),
selectInput("ggplot_sum_stats", label = "Stats shown", choices = list("median", "mean", "none")),
h2("Shape")
,selectInput("shape_var", label="Marker shape based on:", choices = c("label"))
,h2("Color")
,selectInput("col_var", label="Marker color based on:", choices = c("label"))
,h2("Hover text")
,selectInput("txt_var", label="Marker hover text based on:", choices = c("label")),
h2("Plot theme"),
selectInput("ggplot_theme", label = "Theme", choices = list("Grid, white bg"="bw",
"No grid, white bg"="classic",
"Grid, gray bg"="gray",
"Minimal"="min",
"Grid, black bg"="dark",
"Grid, white bg, gray axes"="light",
"Line drawing"="line"),
selected = opts$gtheme),
fluidRow(plotOutput("ggplot_theme_example",inline = F, width="100%")),
h2("Continuous data"),
# the below options need to match with the corresponding function storage in 'global'. if you want to add more it'll go here!
selectInput("color_ramp", label = "Color scheme", choices = list("RAINBOW!"="rb",
"Yellow - blue"="y2b",
"Matlab 1"="ml1",
"Matlab 2 "="ml2",
"Magenta - Green"="m2g",
"Cyan - yellow"="c2y",
"Blue - yellow"="b2y",
"Green - red"="g2r",
"Blue - green"="b2g",
"Blue - red"="b2r",
"Blue - pink (pastel)"="b2p",
"Blue - green - yellow"="bgy",
"Green - yellow - white"="gyw",
"Red - yellow - white"="ryw",
"Grayscale"="bw",
"Blues (brew)" = "Blues",
"Blue - green (brew)" = "BuGn",
"Blue - purple (brew)" = "BuPu",
"Green - blue (brew)" = "GnBu",
"Greens (brew)" = "Greens",
"Grayscale (brew)" = "Greys",
"Oranges (brew)" = "Oranges",
"Orange - red (brew)" = "OrRd",
"Purple - blue (brew)" = "PuBu",
"Purple - blue - green (brew)" = "PuBuGn",
"Purple - red (brew)" = "PuRd",
"Purples (brew)" = "Purples",
"Red - purple (brew)" = "RdPu",
"Reds (brew)" = "Reds",
"Yellow - green (brew)" = "YlGn",
"Yellow - green - blue (brew)" = "YlGnBu",
"Yellow - orange - brown (brew)" = "YlOrBr",
"Yellow - orange - red (brew)"="YlOrRd",
"BrBG", "PiYG", "PRGn", "PuOr", "RdBu", #TODO: add descriptions (or remove all?)
"RdGy", "RdYlBu", "RdYlGn", "Spectral",
"Accent", "Dark2", "Paired", "Pastel1",
"Pastel2", "Set1", "Set2", "Set3"),selected = opts$gspec
),
# preview plot
fluidRow(plotly::plotlyOutput("ramp_plot",inline = T, width="100%") %>% shinycssloaders::withSpinner()),
h2("Discrete data"),
uiOutput("colorPickers") # colour pickers generated in server.R. default settings taken from user_options.txt.
))
)
)),
# report tab
tabPanel("report",
icon = icon("file-invoice", class = "outlined"),
value="reportTab",
fluidRow(
column(width=12, align="center",
h2("Report"),
br(),
helpText("Report contents:"),
div(DT::dataTableOutput('report_unselected', width="100%"))
)#close column
)#close fluidrow
),#close tabpanel
# this tab is used to change general settings.
tabPanel("settings", icon = icon("cog",class = "outlined"), value="options",
navbarPage(inverse=TRUE,"Settings", id="tab_settings",
tabPanel("Mode", icon=icon("box-open"),
switchButton(inputId = "db_only", label = "Run in database-only mode?",
value = switch(opts$mode, dbonly=T, complete=F),
col = "BW", type = "YN")
),
tabPanel("Project", icon=icon("gift"),
#textInput(inputId="proj_name", label="Project name", value = ''),
selectizeInput(inputId="proj_name",
label="Project name",
choices=lcl$vectors$project_names, # existing projects in user folder (generated in 'global')
selected = opts$proj_name,
options=list(create = TRUE)), # let users add new names
actionButton("set_proj_name", label="Apply"),
helpText("This name will be used in all save files."),
textOutput("proj_name")
),
# change list of adducts used, or add your own
# TODO: fix, i think this is currently non-functional
tabPanel("Adducts", icon=icon("plus-square"),
h3("Current adduct table:"),
rhandsontable::rHandsontableOutput("adduct_tab", width=800, height=600),
shinySaveButton("save_adducts",
"Save changed table",
"Save file as ...",
filetype=list(RData="RData", csv="csv")
),
hr(),
fileInput("add_tab", "Import adduct table",
multiple = F,
accept = c(".RData", ".csv")),
sardine(actionButton("import_adducts", "Import", icon = icon("hand-peace-o"))),
sardine(imageOutput("adduct_upload_check",inline = T))
),
# change toolbar colour, text font and size
tabPanel("Aesthetic", icon=icon("child"),
h3("Change app settings"),
hr(),
h2("Navigation bar colours"),
colourpicker::colourInput(inputId = "bar.col.1",
label = paste("Active background"),
value = opts$col1,
allowTransparent = FALSE),
colourpicker::colourInput(inputId = "bar.col.2",
label = paste("Inactive background"),
value = opts$col2,
allowTransparent = FALSE),
colourpicker::colourInput(inputId = "bar.col.3",
label = paste("Active tab"),
value = opts$col3,
allowTransparent = FALSE),
colourpicker::colourInput(inputId = "bar.col.4",
label = paste("Inactive tab"),
value = opts$col4,
allowTransparent = FALSE),
br(),
h2("Fonts (Google fonts)"),
textInput(inputId="font.1", label="h1", value = opts$font1),
textInput(inputId="font.2", label="h2", value = opts$font2),
textInput(inputId="font.3", label="h3", value = opts$font3),
textInput(inputId="font.4", label="body", value = opts$font4),
br(), # TODO: font size modifier slider
h2("Font size"),
sliderInput("size.1", label="h1", value=as.numeric(opts$size1),min = 5, max=50),
sliderInput("size.2", label="h2", value=as.numeric(opts$size2),min = 5, max=50),
sliderInput("size.3", label="h3", value=as.numeric(opts$size3),min = 5, max=50),
sliderInput("size.4", label="body", value=as.numeric(opts$size4),min = 5, max=50),
br(),
h3("Taskbar image"),
div(imageOutput("taskbar_image",inline = T)),
shinyFilesButton('taskbar_image_path',
'Select image',
'Please select an image file',
FALSE),
hr(),
actionButton("change_css", "Save settings (restart to apply)") # need to reload CSS to enable new settings
)
)
),
# prompt user on opening the quit tab.
# TODO: add 'save project?' dialog
#tabPanel(title = "", value="stop", icon = icon("times-circle",class = "outlined")),
div(class="spinnylocation1",
div(class="plus", img(class="imagetop", src=opts$taskbar_image, width="100px", height="100px")),
div(class="minus", img(class="imagebottom", src=opts$taskbar_image, width="100px", height="100px"))
),
div(class="line")
,footer=fluidRow(
br(),br(),br(),
div(
#actionButton("show_window", label="", icon = icon("map-marked")),
actionButton("load_mset", label="load", icon = icon("folder-open"),style=gsubfn::fn$paste("background-color:$bgcol; border-color:$bgcol;")),
actionButton("save_mset", label="save", icon = icon("save"),style=gsubfn::fn$paste("background-color:$bgcol; border-color:$bgcol;")),
actionButton("debug", label="debug", icon = icon("bug"),style=gsubfn::fn$paste("background-color:$bgcol; border-color:$bgcol;"))
, style=gsubfn::fn$paste("position:fixed;bottom:0;width:100%;height:40px;z-index:1005;background-color:$bgcol;border-style:solid; border-color:black;border-width:1px;")),
align="center")
)
)
removeModal()
return(html)
}
})
|
library(psych)
library(caTools)
library(dplyr)
library(caret)
library(ggplot2)
library(ggthemes)
library(ROCR)
library(car)
library(MASS)
library(gridExtra)
library(data.table)
library(scales)
setwd("~/Documents/PhD2/project")
newdata=read.csv('newdata.csv', header=TRUE)
data_train=read.csv('data_train.csv', header=TRUE)
data_test=read.csv('data_test.csv', header=TRUE)
psych::describeBy(newdata, newdata$bullied)
glm.fits=glm(bullied~height+weight+frstgr_age+age+hhincome+hhsize+mother_age+skipped_gr,
data=data_train, family=binomial)
summary(glm.fits)
# prediction
data_train$prediction=predict(glm.fits, newdata = data_train, type = "response" )
data_test$prediction=predict(glm.fits, newdata = data_test , type = "response" )
glm.pred=ifelse(data_test$prediction > 0.5, 1, 0)
table(glm.pred, data_test$bullied)
mean(glm.pred == data_test$bullied)
# finding the cutoff for the imbalanced data
# user-defined different cost for false negative and false positive
source("Additional code.R")
cm_info=ConfusionMatrixInfo( data = data_test, predict = "prediction",
actual = "bullied", cutoff = 0.5 )
cost_fp=100
cost_fn=200
roc_info=ROCInfo( data = cm_info$data, predict = "predict",
actual = "actual", cost.fp = cost_fp, cost.fn = cost_fn )
grid.draw(roc_info$plot)
# plot the confusion matrix plot with the cutoff value
cm_info=ConfusionMatrixInfo( data = data_test, predict = "prediction",
actual = "bullied", cutoff = roc_info$cutoff )
cm_info$plot
# LDA
lda.fit=lda(bullied~height+weight+frstgr_age+age+hhincome+hhsize+mother_age+skipped_gr,
data=data_train)
lda.fit
lda.pred=predict(lda.fit, newdata=data_test)
lda.class=lda.pred$class
table(lda.class, data_test$bullied)
accuracy.lda=mean(lda.class == data_test$bullied)
accuracy.lda
# QDA
qda.fit=qda(bullied~height+weight+frstgr_age+age+hhincome+hhsize+mother_age+skipped_gr,
data=data_train)
qda.fit
qda.pred=predict(qda.fit, newdata=data_test)
qda.class=qda.pred$class
table(qda.class, data_test$bullied)
accuracy.qda=mean(qda.class == data_test$bullied)
accuracy.qda
# create predictions when cutoff=0.21 taken from the logistic model above
lda.pred.adj = ifelse(lda.pred$posterior[, 2] > .27, 1, 0)
qda.pred.adj = ifelse(qda.pred$posterior[, 2] > .27, 1, 0)
# create new confusion matrices
list(LDA_model = table(lda.pred.adj, data_test$bullied),
QDA_model = table(qda.pred.adj, data_test$bullied))
accuracy.lda=mean(lda.pred.adj == data_test$bullied)
accuracy.lda
accuracy.qda.adj=mean(qda.pred.adj == data_test$bullied)
accuracy.qda
#linear discrimininant
par(mfrow = c(1,2))
# Evaluate the model
pred=prediction(lda.pred$posterior[,2], data_test$bullied)
roc.perf = performance(pred, measure = "tpr", x.measure = "fpr")
auc = performance(pred, measure = "auc")
auc = auc@y.values
# Plot
plot(roc.perf, main = 'ROC curve of LDA')
abline(a=0, b= 1)
text(x = .25, y = .65 ,paste("AUC = ", round(auc[[1]],3), sep = ""))
#nonlinear discriminant model
pred=prediction(qda.pred$posterior[,2], data_test$bullied)
roc.perf = performance(pred, measure = "tpr", x.measure = "fpr")
auc = performance(pred, measure = "auc")
auc = auc@y.values
# Plot
plot(roc.perf, main = 'ROC curve of QDA')
abline(a=0, b= 1)
text(x = .25, y = .65 ,paste("AUC = ", round(auc[[1]],3), sep = ""))
|
/classification_lda_qda.R
|
no_license
|
zhalisheva/ML
|
R
| false
| false
| 3,379
|
r
|
library(psych)
library(caTools)
library(dplyr)
library(caret)
library(ggplot2)
library(ggthemes)
library(ROCR)
library(car)
library(MASS)
library(gridExtra)
library(data.table)
library(scales)
setwd("~/Documents/PhD2/project")
newdata=read.csv('newdata.csv', header=TRUE)
data_train=read.csv('data_train.csv', header=TRUE)
data_test=read.csv('data_test.csv', header=TRUE)
psych::describeBy(newdata, newdata$bullied)
glm.fits=glm(bullied~height+weight+frstgr_age+age+hhincome+hhsize+mother_age+skipped_gr,
data=data_train, family=binomial)
summary(glm.fits)
# prediction
data_train$prediction=predict(glm.fits, newdata = data_train, type = "response" )
data_test$prediction=predict(glm.fits, newdata = data_test , type = "response" )
glm.pred=ifelse(data_test$prediction > 0.5, 1, 0)
table(glm.pred, data_test$bullied)
mean(glm.pred == data_test$bullied)
# finding the cutoff for the imbalanced data
# user-defined different cost for false negative and false positive
source("Additional code.R")
cm_info=ConfusionMatrixInfo( data = data_test, predict = "prediction",
actual = "bullied", cutoff = 0.5 )
cost_fp=100
cost_fn=200
roc_info=ROCInfo( data = cm_info$data, predict = "predict",
actual = "actual", cost.fp = cost_fp, cost.fn = cost_fn )
grid.draw(roc_info$plot)
# plot the confusion matrix plot with the cutoff value
cm_info=ConfusionMatrixInfo( data = data_test, predict = "prediction",
actual = "bullied", cutoff = roc_info$cutoff )
cm_info$plot
# LDA
lda.fit=lda(bullied~height+weight+frstgr_age+age+hhincome+hhsize+mother_age+skipped_gr,
data=data_train)
lda.fit
lda.pred=predict(lda.fit, newdata=data_test)
lda.class=lda.pred$class
table(lda.class, data_test$bullied)
accuracy.lda=mean(lda.class == data_test$bullied)
accuracy.lda
# QDA
qda.fit=qda(bullied~height+weight+frstgr_age+age+hhincome+hhsize+mother_age+skipped_gr,
data=data_train)
qda.fit
qda.pred=predict(qda.fit, newdata=data_test)
qda.class=qda.pred$class
table(qda.class, data_test$bullied)
accuracy.qda=mean(qda.class == data_test$bullied)
accuracy.qda
# create predictions when cutoff=0.21 taken from the logistic model above
lda.pred.adj = ifelse(lda.pred$posterior[, 2] > .27, 1, 0)
qda.pred.adj = ifelse(qda.pred$posterior[, 2] > .27, 1, 0)
# create new confusion matrices
list(LDA_model = table(lda.pred.adj, data_test$bullied),
QDA_model = table(qda.pred.adj, data_test$bullied))
accuracy.lda=mean(lda.pred.adj == data_test$bullied)
accuracy.lda
accuracy.qda.adj=mean(qda.pred.adj == data_test$bullied)
accuracy.qda
#linear discrimininant
par(mfrow = c(1,2))
# Evaluate the model
pred=prediction(lda.pred$posterior[,2], data_test$bullied)
roc.perf = performance(pred, measure = "tpr", x.measure = "fpr")
auc = performance(pred, measure = "auc")
auc = auc@y.values
# Plot
plot(roc.perf, main = 'ROC curve of LDA')
abline(a=0, b= 1)
text(x = .25, y = .65 ,paste("AUC = ", round(auc[[1]],3), sep = ""))
#nonlinear discriminant model
pred=prediction(qda.pred$posterior[,2], data_test$bullied)
roc.perf = performance(pred, measure = "tpr", x.measure = "fpr")
auc = performance(pred, measure = "auc")
auc = auc@y.values
# Plot
plot(roc.perf, main = 'ROC curve of QDA')
abline(a=0, b= 1)
text(x = .25, y = .65 ,paste("AUC = ", round(auc[[1]],3), sep = ""))
|
#
# This is a Plumber API. You can run the API by clicking
# the 'Run API' button above.
#
# Find out more about building APIs with Plumber here:
#
# https://www.rplumber.io/
#
library(plumber)
library(config)
library(reticulate)
library(PepTools)
# Get pinned model from RStudio Connect
con <- config::get(file = here::here("part3/immunotherapy-master/plumber/config.yml"))
if (!all(c("rsc_url", "rsc_api_key") %in% names(con)) &
!grepl("<", con$rsc_url, fixed = TRUE)) {
stop("Set rsc_url and rsc_api_key in config.yml before continuing.")
}
pins::board_register_rsconnect(
server = config::get("rsc_url"),
key = config::get("rsc_api_key")
)
mod_pinned <- pins::pin_get("peptide_model")
utils::unzip(mod_pinned[1], exdir = fs::path_dir(mod_pinned[1]))
mod <- keras::load_model_tf(file.path(fs::path_dir(mod_pinned[1]), "saved_model"))
#* @apiTitle Immunotherapy
#* Predict peptide class
#* @param peptide Character vector with a single peptide, eg. `LLTDAQRIV` or comma separated, e.g. `LLTDAQRIV, LMAFYLYEV, VMSPITLPT, SLHLTNCFV, RQFTCMIAV`
#* @get /predict
function(peptide){
# Peptide classes for prediction
peptide_classes <- c("NB", "WB", "SB")
# split on commas and remove white space
peptide <- trimws(strsplit(peptide, ",")[[1]])
# transform input into flattened array
x_val <- peptide %>%
pep_encode() %>%
array_reshape(dim = c(nrow(.), 9*20))
# Get predictions from models
preds <- mod %>%
keras::predict_classes(x_val)
# Return original peptides with predictions
tibble::tibble(
peptide = peptide,
peptide_classes = peptide_classes[preds]
)
}
|
/part3/immunotherapy-master/plumber/plumber.R
|
no_license
|
graemeleehickey/Workshop-R-Tensorflow-Scientific-Computing
|
R
| false
| false
| 1,619
|
r
|
#
# This is a Plumber API. You can run the API by clicking
# the 'Run API' button above.
#
# Find out more about building APIs with Plumber here:
#
# https://www.rplumber.io/
#
library(plumber)
library(config)
library(reticulate)
library(PepTools)
# Get pinned model from RStudio Connect
con <- config::get(file = here::here("part3/immunotherapy-master/plumber/config.yml"))
if (!all(c("rsc_url", "rsc_api_key") %in% names(con)) &
!grepl("<", con$rsc_url, fixed = TRUE)) {
stop("Set rsc_url and rsc_api_key in config.yml before continuing.")
}
pins::board_register_rsconnect(
server = config::get("rsc_url"),
key = config::get("rsc_api_key")
)
mod_pinned <- pins::pin_get("peptide_model")
utils::unzip(mod_pinned[1], exdir = fs::path_dir(mod_pinned[1]))
mod <- keras::load_model_tf(file.path(fs::path_dir(mod_pinned[1]), "saved_model"))
#* @apiTitle Immunotherapy
#* Predict peptide class
#* @param peptide Character vector with a single peptide, eg. `LLTDAQRIV` or comma separated, e.g. `LLTDAQRIV, LMAFYLYEV, VMSPITLPT, SLHLTNCFV, RQFTCMIAV`
#* @get /predict
function(peptide){
# Peptide classes for prediction
peptide_classes <- c("NB", "WB", "SB")
# split on commas and remove white space
peptide <- trimws(strsplit(peptide, ",")[[1]])
# transform input into flattened array
x_val <- peptide %>%
pep_encode() %>%
array_reshape(dim = c(nrow(.), 9*20))
# Get predictions from models
preds <- mod %>%
keras::predict_classes(x_val)
# Return original peptides with predictions
tibble::tibble(
peptide = peptide,
peptide_classes = peptide_classes[preds]
)
}
|
library(ape)
testtree <- read.tree("4245_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="4245_0_unrooted.txt")
|
/codeml_files/newick_trees_processed/4245_0/rinput.R
|
no_license
|
DaniBoo/cyanobacteria_project
|
R
| false
| false
| 135
|
r
|
library(ape)
testtree <- read.tree("4245_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="4245_0_unrooted.txt")
|
##barley
barley<-read.csv("C:\\Users\\nick\\Documents\\Newcastle\\Fourth Year\\BIO8068 Management and Visualisation of Data\\test_BIO8068_1\\Day2 barley.csv")
library(ggplot2)
library(car)
library(RVAideMemoire)
library(mosaic)
head(barley)#looking at top few rows
str(barley)#so you can check the characteristics of the variables
barley$year<-as.factor(barley$year)#converts continuous to a factor
is.factor(barley$year)#checks if year is now a factor
ggplot(barley, aes(x=year, y=Yield)) + xlab("Year") + ylab("Yield kg/ha") +
geom_boxplot()
leveneTest(Yield~year, data=barley)
byf.shapiro(Yield~year, data=barley)
hist(barley$Yield)
t.test(Yield~year, paired=TRUE, data=barley)
mean(Yield~year, data=barley)
#if you were to run an independent t-test
t.test(Yield~year, data=barley)
###
mean(Yield~loc, data=barley)
barley$loc<-as.factor(barley$loc)
is.factor(barley$loc)
leveneTest(Yield~loc, data=barley)
byf.mshapiro(Yield~loc, data=barley)
##cars
mtcars#will show you the whole file
str(mtcars)#will show you the variable characteristics
head(mtcars)#will show you the top 6 rows
mtcars$am<-as.factor(mtcars$am)#converts am(transmission) to factor
is.factor(barley$year)#check it worked
ggplot(mtcars, aes(x=am, y=mpg)) + xlab("transmission") + ylab("mpg") +
geom_boxplot()
leveneTest(mpg~am, data=mtcars)
byf.shapiro(mpg~am, data=mtcars)
hist(mtcars$mpg)
t.test(mpg~am, data=mtcars)
##bottles
bottles<-read.csv("Day2 bottles.csv")
str(bottles)#will show you the variable characteristics
head(bottles)#will show you the top 6 rows
shapiro.test(bottles$Volume)
hist(bottles$Volume)
t.test(bottles$Volume, mu=500)
|
/Day2 t-test practice.R
|
no_license
|
CSellwood1/test_BIO8068
|
R
| false
| false
| 1,631
|
r
|
##barley
barley<-read.csv("C:\\Users\\nick\\Documents\\Newcastle\\Fourth Year\\BIO8068 Management and Visualisation of Data\\test_BIO8068_1\\Day2 barley.csv")
library(ggplot2)
library(car)
library(RVAideMemoire)
library(mosaic)
head(barley)#looking at top few rows
str(barley)#so you can check the characteristics of the variables
barley$year<-as.factor(barley$year)#converts continuous to a factor
is.factor(barley$year)#checks if year is now a factor
ggplot(barley, aes(x=year, y=Yield)) + xlab("Year") + ylab("Yield kg/ha") +
geom_boxplot()
leveneTest(Yield~year, data=barley)
byf.shapiro(Yield~year, data=barley)
hist(barley$Yield)
t.test(Yield~year, paired=TRUE, data=barley)
mean(Yield~year, data=barley)
#if you were to run an independent t-test
t.test(Yield~year, data=barley)
###
mean(Yield~loc, data=barley)
barley$loc<-as.factor(barley$loc)
is.factor(barley$loc)
leveneTest(Yield~loc, data=barley)
byf.mshapiro(Yield~loc, data=barley)
##cars
mtcars#will show you the whole file
str(mtcars)#will show you the variable characteristics
head(mtcars)#will show you the top 6 rows
mtcars$am<-as.factor(mtcars$am)#converts am(transmission) to factor
is.factor(barley$year)#check it worked
ggplot(mtcars, aes(x=am, y=mpg)) + xlab("transmission") + ylab("mpg") +
geom_boxplot()
leveneTest(mpg~am, data=mtcars)
byf.shapiro(mpg~am, data=mtcars)
hist(mtcars$mpg)
t.test(mpg~am, data=mtcars)
##bottles
bottles<-read.csv("Day2 bottles.csv")
str(bottles)#will show you the variable characteristics
head(bottles)#will show you the top 6 rows
shapiro.test(bottles$Volume)
hist(bottles$Volume)
t.test(bottles$Volume, mu=500)
|
# Load packages
library(data.table)
library(phytools)
library(phangorn)
# Set working directory
setwd(here("analysis_rerun/results/mtree/gains_losses"))
# Source helper functions
source(here("R/aic-rate-summary.R"))
source(here("R/gains-losses.R"))
# Read in tree and traits------------------------------------------------------
mtree <- read.nexus(here("data/updated_trees_traits/mtree_traits",
"mtree.trees"))
traits <- fread(here("data/updated_trees_traits/mtree_traits",
"mtree_traits_B_as_Z.csv"),
header = FALSE, col.names = c("taxa", "state"))
# Format traits and tip labels-------------------------------------------------
# Drop taxa without data from tree
mtree <- lapply(mtree, drop.tip, tip = traits[state == "-", taxa])
class(mtree) <- "multiPhylo"
# Remove taxa with missing data from traits
traits <- traits[state != "-"]
# Read in corHMM runs----------------------------------------------------------
# Read in asr summarized across all 1000 trees
fanc2 <- readRDS(here("analysis_rerun/mtree_asr",
"mtree-nodeframes.rds"))
# Read in 1000 individual asrs
anc2 <- readRDS(here("analysis_rerun/mtree_asr",
"mtree-asr.rds"))
# Calculate consensus state probs at internal nodes and tips-------------------
# Make consensus tree
ctree <- consensus(mtree, p = 0.95)
# Drop outgroup
ctree <- drop.tip(ctree, c("Discosoma", "Rhodactis", "Ricordea_florida"))
# Get state probs at internal nodes
setnames(fanc2, c("AS", "ZS", "AF", "ZF", "ctree.nodes"))
fanc2 <- na.omit(fanc2)
fanc2 <- fanc2[, .(AS = mean(AS), ZS = mean(ZS), AF = mean(AF), ZF = mean(ZF)),
by = ctree.nodes]
# Reorder nodes after dropping nas
setkey(fanc2, ctree.nodes)
# Remove nodes that were dropped from tree
fanc2 <- fanc2[-which(ctree.nodes %in% c(579, 858, 859)),]
# Drop ctree nodes
fanc2[, ctree.nodes := NULL]
### Get tip probs
tip.probs <- lapply(anc2, "[[", "lik.tip.states")
tip.probs <- lapply(tip.probs, data.table)
tip.index <- lapply(mtree, "[[", "tip.label")
tip.index <- lapply(tip.index, data.table)
lapply(tip.index, setnames, "species")
tip.probs <- rbindlist(mapply(cbind, tip.probs, tip.index, SIMPLIFY = FALSE))
setnames(tip.probs, c("AS", "ZS", "AF", "ZF", "species"))
tip.probs <- tip.probs[, .(AS = mean(AS), ZS = mean(ZS),
AF = mean(AF), ZF = mean(ZF)),
by = species]
# Drop tips that were dropped from tree
tip.probs <- tip.probs[-which(species %in%
c("Discosoma", "Rhodactis", "Ricordea_florida"))]
tip.probs
### 100 tree posterior~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Check that the same nodes need to be dropped for all mtrees
# # 579, 580, 581 for all
# unique(unlist(lapply(mtree, function(x) Ancestors(x, which(x$tip.label %in%
# c("Discosoma", "Rhodactis", "Ricordea_florida")), "all"))))
# # This gives same answer bc 1st 3 tips are always the outgroup
# unique(unlist(lapply(mtree, Ancestors, 1:3, "all")))
# # Again, see that 1st 3 tip labels are always the outgroup
# unique(unlist(lapply(mtree, function(x) x$tip.label[1:3])))
# # So based on the above I can just drop the 1st three entries from every
# # matrix of tip probs because 1st 3 entries are always the outgroup.
# # I can drop the 1st three entries for nodeprobs because the descendents
# # of the outgroup are always the 1st 3 nodes.
tips <- lapply(anc2, "[[", "lik.tip.states")
nodes <- lapply(anc2, "[[", "lik.anc.states")
tips <- lapply(tips, function(x) x[-c(1:3),])
nodes <- lapply(nodes, function(x) x[-c(1:3),])
# Count number of changes on consensus asr-------------------------------------
### All 4 states
tip.vec <- apply(tip.probs[,2:5], 1, which.max)
node.vec <- apply(fanc2, 1, which.max)
cat <- c(tip.vec, node.vec)
cc <- getChanges(ctree, cat, rate.cat = 2)
cc <- melt(cc, variable = "type", value = "ctree")
### Binary gains and losses
az.fanc2 <- copy(fanc2)
az.tip <- copy(tip.probs)
az.fanc2[, asum := rowSums(.SD), .SDcols = grep("A", names(az.fanc2))]
az.fanc2[, zsum := rowSums(.SD), .SDcols = grep("Z", names(az.fanc2))]
az.tip[, asum := rowSums(.SD), .SDcols = grep("A", names(az.tip))]
az.tip[, zsum := rowSums(.SD), .SDcols = grep("Z", names(az.tip))]
az.node.vec <- apply(az.fanc2[, .(asum, zsum)], 1, which.max)
az.tip.vec <- apply(az.tip[, .(asum, zsum)], 1, which.max)
az.cat <- c(az.tip.vec, az.node.vec)
az.cc <- getChanges(ctree, az.cat, rate.cat = 1)
az.cc <- melt(az.cc, variable = "type", value = "ctree")
# Count number of changes across posterior-------------------------------------
### All 4 states
ftips <- lapply(anc2, "[[", "lik.tip.states")
fnodes <- lapply(anc2, "[[", "lik.anc.states")
ftips <- lapply(ftips, function(x) apply(x, 1, which.max))
fnodes <- lapply(fnodes, function(x) apply(x, 1, which.max))
fcat <- mapply(function(x,y) c(x,y), ftips, fnodes, SIMPLIFY = FALSE)
fchanges <- rbindlist(mapply(getChanges, mtree, fcat, rate.cat = 2,
SIMPLIFY = FALSE))
fchanges
cmelt <- melt(fchanges, variable.name = "type", value = "n")
csumm <- cmelt[, .(mean = mean(n), median = median(n)), by = type]
csumm
# Get confidence intervals around median number of changes
cq <- apply(fchanges[,1:ncol(fchanges)], 2, bootMed, n = 100000)
cq <- data.table(t(cq), keep.rownames = T)
setnames(cq, c("type", "0.025", "median", "0.975"))
setkey(cq, type)
setkey(cc, type)
cq <- cq[cc]
setcolorder(cq, c("type", "ctree", "0.025", "median", "0.975"))
cq
write.csv(cq, file = "all_changes.csv")
### Binary gains and losses
az.ftips <- lapply(anc2, "[[", "lik.tip.states")
az.fnodes <- lapply(anc2, "[[", "lik.anc.states")
az.ftips <- lapply(az.ftips, data.table)
az.fnodes <- lapply(az.fnodes, data.table)
lapply(az.ftips, setnames, c("AS", "ZS", "AF", "ZF"))
lapply(az.fnodes, setnames, c("AS", "ZS", "AF", "ZF"))
lapply(az.ftips, function(x)
x[, asum := rowSums(.SD), .SDcols = grep("A", names(x))])
lapply(az.ftips, function(x)
x[, zsum := rowSums(.SD), .SDcols = grep("Z", names(x))])
lapply(az.fnodes, function(x)
x[, asum := rowSums(.SD), .SDcols = grep("A", names(x))])
lapply(az.fnodes, function(x)
x[, zsum := rowSums(.SD), .SDcols = grep("Z", names(x))])
az.tvec <- lapply(az.ftips, function(x) apply(x[,.(asum, zsum)], 1, which.max))
az.nvec <- lapply(az.fnodes, function(x) apply(x[,.(asum, zsum)],1, which.max))
az.fcat <- mapply(function(x,y) c(x,y), az.tvec, az.nvec, SIMPLIFY = FALSE)
az.fchanges <- rbindlist(mapply(getChanges, mtree, az.fcat, rate.cat = 1,
SIMPLIFY = FALSE))
az.cmelt <- melt(az.fchanges, variable.name = "type", value = "n")
az.csumm <- az.cmelt[, .(mean = mean(n), median = median(n)), by = type]
az.csumm
# Get confidence intervals around median number of changes
az.cq <- apply(az.fchanges[,1:ncol(az.fchanges)], 2, bootMed, n = 100000)
az.cq <- data.table(t(az.cq), keep.rownames = T)
setnames(az.cq, c("type", "0.025", "median", "0.975"))
setkey(az.cq, type)
setkey(az.cc, type)
az.cq <- az.cq[az.cc]
setcolorder(az.cq, c("type", "ctree", "0.025", "median", "0.975"))
az.cq
az.cq <- az.cq[-(1:2),]
write.csv(az.cq, file = "binary_changes.csv")
|
/analysis_rerun/results/mtree/gains_losses/mtree-gains-losses.R
|
no_license
|
jagault/evolution-photosymbiosis
|
R
| false
| false
| 7,265
|
r
|
# Load packages
library(data.table)
library(phytools)
library(phangorn)
# Set working directory
setwd(here("analysis_rerun/results/mtree/gains_losses"))
# Source helper functions
source(here("R/aic-rate-summary.R"))
source(here("R/gains-losses.R"))
# Read in tree and traits------------------------------------------------------
mtree <- read.nexus(here("data/updated_trees_traits/mtree_traits",
"mtree.trees"))
traits <- fread(here("data/updated_trees_traits/mtree_traits",
"mtree_traits_B_as_Z.csv"),
header = FALSE, col.names = c("taxa", "state"))
# Format traits and tip labels-------------------------------------------------
# Drop taxa without data from tree
mtree <- lapply(mtree, drop.tip, tip = traits[state == "-", taxa])
class(mtree) <- "multiPhylo"
# Remove taxa with missing data from traits
traits <- traits[state != "-"]
# Read in corHMM runs----------------------------------------------------------
# Read in asr summarized across all 1000 trees
fanc2 <- readRDS(here("analysis_rerun/mtree_asr",
"mtree-nodeframes.rds"))
# Read in 1000 individual asrs
anc2 <- readRDS(here("analysis_rerun/mtree_asr",
"mtree-asr.rds"))
# Calculate consensus state probs at internal nodes and tips-------------------
# Make consensus tree
ctree <- consensus(mtree, p = 0.95)
# Drop outgroup
ctree <- drop.tip(ctree, c("Discosoma", "Rhodactis", "Ricordea_florida"))
# Get state probs at internal nodes
setnames(fanc2, c("AS", "ZS", "AF", "ZF", "ctree.nodes"))
fanc2 <- na.omit(fanc2)
fanc2 <- fanc2[, .(AS = mean(AS), ZS = mean(ZS), AF = mean(AF), ZF = mean(ZF)),
by = ctree.nodes]
# Reorder nodes after dropping nas
setkey(fanc2, ctree.nodes)
# Remove nodes that were dropped from tree
fanc2 <- fanc2[-which(ctree.nodes %in% c(579, 858, 859)),]
# Drop ctree nodes
fanc2[, ctree.nodes := NULL]
### Get tip probs
tip.probs <- lapply(anc2, "[[", "lik.tip.states")
tip.probs <- lapply(tip.probs, data.table)
tip.index <- lapply(mtree, "[[", "tip.label")
tip.index <- lapply(tip.index, data.table)
lapply(tip.index, setnames, "species")
tip.probs <- rbindlist(mapply(cbind, tip.probs, tip.index, SIMPLIFY = FALSE))
setnames(tip.probs, c("AS", "ZS", "AF", "ZF", "species"))
tip.probs <- tip.probs[, .(AS = mean(AS), ZS = mean(ZS),
AF = mean(AF), ZF = mean(ZF)),
by = species]
# Drop tips that were dropped from tree
tip.probs <- tip.probs[-which(species %in%
c("Discosoma", "Rhodactis", "Ricordea_florida"))]
tip.probs
### 100 tree posterior~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Check that the same nodes need to be dropped for all mtrees
# # 579, 580, 581 for all
# unique(unlist(lapply(mtree, function(x) Ancestors(x, which(x$tip.label %in%
# c("Discosoma", "Rhodactis", "Ricordea_florida")), "all"))))
# # This gives same answer bc 1st 3 tips are always the outgroup
# unique(unlist(lapply(mtree, Ancestors, 1:3, "all")))
# # Again, see that 1st 3 tip labels are always the outgroup
# unique(unlist(lapply(mtree, function(x) x$tip.label[1:3])))
# # So based on the above I can just drop the 1st three entries from every
# # matrix of tip probs because 1st 3 entries are always the outgroup.
# # I can drop the 1st three entries for nodeprobs because the descendents
# # of the outgroup are always the 1st 3 nodes.
tips <- lapply(anc2, "[[", "lik.tip.states")
nodes <- lapply(anc2, "[[", "lik.anc.states")
tips <- lapply(tips, function(x) x[-c(1:3),])
nodes <- lapply(nodes, function(x) x[-c(1:3),])
# Count number of changes on consensus asr-------------------------------------
### All 4 states
tip.vec <- apply(tip.probs[,2:5], 1, which.max)
node.vec <- apply(fanc2, 1, which.max)
cat <- c(tip.vec, node.vec)
cc <- getChanges(ctree, cat, rate.cat = 2)
cc <- melt(cc, variable = "type", value = "ctree")
### Binary gains and losses
az.fanc2 <- copy(fanc2)
az.tip <- copy(tip.probs)
az.fanc2[, asum := rowSums(.SD), .SDcols = grep("A", names(az.fanc2))]
az.fanc2[, zsum := rowSums(.SD), .SDcols = grep("Z", names(az.fanc2))]
az.tip[, asum := rowSums(.SD), .SDcols = grep("A", names(az.tip))]
az.tip[, zsum := rowSums(.SD), .SDcols = grep("Z", names(az.tip))]
az.node.vec <- apply(az.fanc2[, .(asum, zsum)], 1, which.max)
az.tip.vec <- apply(az.tip[, .(asum, zsum)], 1, which.max)
az.cat <- c(az.tip.vec, az.node.vec)
az.cc <- getChanges(ctree, az.cat, rate.cat = 1)
az.cc <- melt(az.cc, variable = "type", value = "ctree")
# Count number of changes across posterior-------------------------------------
### All 4 states
ftips <- lapply(anc2, "[[", "lik.tip.states")
fnodes <- lapply(anc2, "[[", "lik.anc.states")
ftips <- lapply(ftips, function(x) apply(x, 1, which.max))
fnodes <- lapply(fnodes, function(x) apply(x, 1, which.max))
fcat <- mapply(function(x,y) c(x,y), ftips, fnodes, SIMPLIFY = FALSE)
fchanges <- rbindlist(mapply(getChanges, mtree, fcat, rate.cat = 2,
SIMPLIFY = FALSE))
fchanges
cmelt <- melt(fchanges, variable.name = "type", value = "n")
csumm <- cmelt[, .(mean = mean(n), median = median(n)), by = type]
csumm
# Get confidence intervals around median number of changes
cq <- apply(fchanges[,1:ncol(fchanges)], 2, bootMed, n = 100000)
cq <- data.table(t(cq), keep.rownames = T)
setnames(cq, c("type", "0.025", "median", "0.975"))
setkey(cq, type)
setkey(cc, type)
cq <- cq[cc]
setcolorder(cq, c("type", "ctree", "0.025", "median", "0.975"))
cq
write.csv(cq, file = "all_changes.csv")
### Binary gains and losses
az.ftips <- lapply(anc2, "[[", "lik.tip.states")
az.fnodes <- lapply(anc2, "[[", "lik.anc.states")
az.ftips <- lapply(az.ftips, data.table)
az.fnodes <- lapply(az.fnodes, data.table)
lapply(az.ftips, setnames, c("AS", "ZS", "AF", "ZF"))
lapply(az.fnodes, setnames, c("AS", "ZS", "AF", "ZF"))
lapply(az.ftips, function(x)
x[, asum := rowSums(.SD), .SDcols = grep("A", names(x))])
lapply(az.ftips, function(x)
x[, zsum := rowSums(.SD), .SDcols = grep("Z", names(x))])
lapply(az.fnodes, function(x)
x[, asum := rowSums(.SD), .SDcols = grep("A", names(x))])
lapply(az.fnodes, function(x)
x[, zsum := rowSums(.SD), .SDcols = grep("Z", names(x))])
az.tvec <- lapply(az.ftips, function(x) apply(x[,.(asum, zsum)], 1, which.max))
az.nvec <- lapply(az.fnodes, function(x) apply(x[,.(asum, zsum)],1, which.max))
az.fcat <- mapply(function(x,y) c(x,y), az.tvec, az.nvec, SIMPLIFY = FALSE)
az.fchanges <- rbindlist(mapply(getChanges, mtree, az.fcat, rate.cat = 1,
SIMPLIFY = FALSE))
az.cmelt <- melt(az.fchanges, variable.name = "type", value = "n")
az.csumm <- az.cmelt[, .(mean = mean(n), median = median(n)), by = type]
az.csumm
# Get confidence intervals around median number of changes
az.cq <- apply(az.fchanges[,1:ncol(az.fchanges)], 2, bootMed, n = 100000)
az.cq <- data.table(t(az.cq), keep.rownames = T)
setnames(az.cq, c("type", "0.025", "median", "0.975"))
setkey(az.cq, type)
setkey(az.cc, type)
az.cq <- az.cq[az.cc]
setcolorder(az.cq, c("type", "ctree", "0.025", "median", "0.975"))
az.cq
az.cq <- az.cq[-(1:2),]
write.csv(az.cq, file = "binary_changes.csv")
|
# build matrix for data
# 7 March 2020
library(reshape2)
library(TTR)
library(dplyr)
library(DataCombine)
create_data_frame <- function() {
For_Eval <- data.frame()
Corr <- 0.5
Max_sample <- 3
R_sample <- integer(48)
Hocount_sample <- integer(48)
LengthOfStay <- integer(48)
AvgCleaning <- integer(48)
Units <- paste(letters[1:12])
writeLines('\nUnits:')
cat(Units)
writeLines('\n')
for (i in c(1:12)) {
for (j in c(1:48)) {
R_sample_j = sample(Max_sample,1) - 1
if (j==1) {
R_sample[j] = R_sample_j
} else {
R_sample[j] = round(R_sample[j-1] * Corr + R_sample_j * (1-Corr))
}
Hocount_sample[j] = sample(R_sample[j] + 1, 1) - 1
LengthOfStay[j] = sample(10,1) - 1
AvgCleaning[j] = sample(5,1) - 1
}
Hocount = R_sample
For_Eval_i <- data.frame(
month_cum = c(1:48),
Infection_Count = R_sample,
HoCount = Hocount_sample,
NotHoCount = R_sample - Hocount_sample,
UnitName = Units[i],
HHCom = runif(48,min = 0.8,max = 0.95),
LengthOfStay = LengthOfStay,
AvgCleaning = AvgCleaning,
other = "other",
stringsAsFactors = FALSE
)
For_Eval <- rbind(For_Eval,For_Eval_i)
}
print(head(For_Eval))
# Last line is returned
For_Eval
}
For_Evaluation <- create_data_frame()
moving_average <- function(array,k){
temp <- runMean(array,n = k)
for (i in (1:k-1)){
temp[i] = mean(array[1:i])
}
return (temp)
}
by_month <- For_Evaluation %>% group_by(month_cum)
by_month_mean <- by_month %>% summarise(
Infection_Count = mean(Infection_Count),
HHCom = mean(HHCom)
)
by_month_mean <- as.data.frame(by_month_mean)
ma_infect = moving_average(by_month_mean$Infection_Count,3)
plot(by_month_mean$month_cum, ma_infect,type="l",xlab = 'Month', ylab = 'infection count',
main = 'moving average of infection count')
For_Evaluation$Y = For_Evaluation$HoCount > 0
For_Evaluation <- slide(For_Evaluation, Var = "Y", NewVar = "Y_Lag1", GroupVar = 'UnitName', slideBy = -1) # create lag1 variable
For_Evaluation <- slide(For_Evaluation, Var = "NotHoCount", NewVar = "NotHoCount_Lag1", GroupVar = 'UnitName', slideBy = -1) # create lag1 variable
For_Evaluation <- slide(For_Evaluation, Var = "LengthOfStay", NewVar = "LengthOfStay_Lag1", GroupVar = 'UnitName', slideBy = -1) # create lag1 variable
For_Evaluation <- slide(For_Evaluation, Var = "HHCom", NewVar = "HHCom_Lag1", GroupVar = 'UnitName', slideBy = -1) # create lag1 variable
For_Evaluation <- slide(For_Evaluation, Var = "AvgCleaning", NewVar = "AvgCleaning_Lag1", GroupVar = 'UnitName', slideBy = -1) # create lag1 variable
fit <- glm(Y ~ Y_Lag1 + NotHoCount_Lag1 + LengthOfStay_Lag1 + HHCom_Lag1 + AvgCleaning_Lag1,
data=For_Evaluation,
, family=binomial(link="logit"))
coefficients(fit) # model coefficients
confint(fit, level=0.95) # CIs for model parameters
fitted(fit) # predicted values
residuals(fit) # residuals
anova(fit) # anova table
vcov(fit) # covariance matrix for model parameters
layout(matrix(c(1,2,3,4),2,2)) # optional 4 graphs/page
plot(fit)
|
/create_data.R
|
no_license
|
wangqi12240205/JHH-risk-prediction
|
R
| false
| false
| 3,134
|
r
|
# build matrix for data
# 7 March 2020
library(reshape2)
library(TTR)
library(dplyr)
library(DataCombine)
create_data_frame <- function() {
For_Eval <- data.frame()
Corr <- 0.5
Max_sample <- 3
R_sample <- integer(48)
Hocount_sample <- integer(48)
LengthOfStay <- integer(48)
AvgCleaning <- integer(48)
Units <- paste(letters[1:12])
writeLines('\nUnits:')
cat(Units)
writeLines('\n')
for (i in c(1:12)) {
for (j in c(1:48)) {
R_sample_j = sample(Max_sample,1) - 1
if (j==1) {
R_sample[j] = R_sample_j
} else {
R_sample[j] = round(R_sample[j-1] * Corr + R_sample_j * (1-Corr))
}
Hocount_sample[j] = sample(R_sample[j] + 1, 1) - 1
LengthOfStay[j] = sample(10,1) - 1
AvgCleaning[j] = sample(5,1) - 1
}
Hocount = R_sample
For_Eval_i <- data.frame(
month_cum = c(1:48),
Infection_Count = R_sample,
HoCount = Hocount_sample,
NotHoCount = R_sample - Hocount_sample,
UnitName = Units[i],
HHCom = runif(48,min = 0.8,max = 0.95),
LengthOfStay = LengthOfStay,
AvgCleaning = AvgCleaning,
other = "other",
stringsAsFactors = FALSE
)
For_Eval <- rbind(For_Eval,For_Eval_i)
}
print(head(For_Eval))
# Last line is returned
For_Eval
}
For_Evaluation <- create_data_frame()
moving_average <- function(array,k){
temp <- runMean(array,n = k)
for (i in (1:k-1)){
temp[i] = mean(array[1:i])
}
return (temp)
}
by_month <- For_Evaluation %>% group_by(month_cum)
by_month_mean <- by_month %>% summarise(
Infection_Count = mean(Infection_Count),
HHCom = mean(HHCom)
)
by_month_mean <- as.data.frame(by_month_mean)
ma_infect = moving_average(by_month_mean$Infection_Count,3)
plot(by_month_mean$month_cum, ma_infect,type="l",xlab = 'Month', ylab = 'infection count',
main = 'moving average of infection count')
For_Evaluation$Y = For_Evaluation$HoCount > 0
For_Evaluation <- slide(For_Evaluation, Var = "Y", NewVar = "Y_Lag1", GroupVar = 'UnitName', slideBy = -1) # create lag1 variable
For_Evaluation <- slide(For_Evaluation, Var = "NotHoCount", NewVar = "NotHoCount_Lag1", GroupVar = 'UnitName', slideBy = -1) # create lag1 variable
For_Evaluation <- slide(For_Evaluation, Var = "LengthOfStay", NewVar = "LengthOfStay_Lag1", GroupVar = 'UnitName', slideBy = -1) # create lag1 variable
For_Evaluation <- slide(For_Evaluation, Var = "HHCom", NewVar = "HHCom_Lag1", GroupVar = 'UnitName', slideBy = -1) # create lag1 variable
For_Evaluation <- slide(For_Evaluation, Var = "AvgCleaning", NewVar = "AvgCleaning_Lag1", GroupVar = 'UnitName', slideBy = -1) # create lag1 variable
fit <- glm(Y ~ Y_Lag1 + NotHoCount_Lag1 + LengthOfStay_Lag1 + HHCom_Lag1 + AvgCleaning_Lag1,
data=For_Evaluation,
, family=binomial(link="logit"))
coefficients(fit) # model coefficients
confint(fit, level=0.95) # CIs for model parameters
fitted(fit) # predicted values
residuals(fit) # residuals
anova(fit) # anova table
vcov(fit) # covariance matrix for model parameters
layout(matrix(c(1,2,3,4),2,2)) # optional 4 graphs/page
plot(fit)
|
x=seq(7.45,32.45,5)
a=c(2,9,29,54,11,5)
b=c(9,11,18,32,27,13)
na=sum(a)
nb=sum(b)
mean_a=sum(x*a)/sum(a)
mean_a
mean_b=sum(x*b)/sum(b)
mean_b
A=22.45
d=x-A
fd=a*d
fdd=a*d*d
var_a= (sum(fdd)/na) - (sum(fd)/na)^2
sd_a=sqrt(var_a)
sd_a
#now for b
fdb=b*d
fddb=fdb*d
var_b = sum(fddb)/nb - (sum(fdb)/nb)^2
sd_b=sqrt(var_b)
sd_b
|
/Stats-Lab/DA1/q2_script.R
|
no_license
|
theProgrammerDavid/VIT-LABs
|
R
| false
| false
| 368
|
r
|
x=seq(7.45,32.45,5)
a=c(2,9,29,54,11,5)
b=c(9,11,18,32,27,13)
na=sum(a)
nb=sum(b)
mean_a=sum(x*a)/sum(a)
mean_a
mean_b=sum(x*b)/sum(b)
mean_b
A=22.45
d=x-A
fd=a*d
fdd=a*d*d
var_a= (sum(fdd)/na) - (sum(fd)/na)^2
sd_a=sqrt(var_a)
sd_a
#now for b
fdb=b*d
fddb=fdb*d
var_b = sum(fddb)/nb - (sum(fdb)/nb)^2
sd_b=sqrt(var_b)
sd_b
|
library("ggplot2")
experiment.one <- read.table("samples/survey_1_2.csv", header = TRUE, sep=",")
# Tables for the follow the herd experiment
AB.herd <- matrix(c(
# control A , treatment A added
experiment.one[c(1,3),3] - experiment.one[c(1,3), 6],
# control B , treatment B
experiment.one[c(1,3),4]
), nr=2, dimnames=list(c("control", "treatment"), c("A", "B")))
AC.herd <- matrix(c(
# control A , treatment A
experiment.one[2,3], experiment.one[4,4],
# control C , treatment C added
experiment.one[2,4], experiment.one[4,3] - experiment.one[4,6]
), nr=2, dimnames=list(c("control", "treatment"), c("A", "C")))
# Tables for the follow the vip special access experiment
AB.special <- matrix(c(
# control A , treatment A
experiment.one[1,3], experiment.one[5,3],
# control B , treatment B special
experiment.one[1,4], experiment.one[5,4]
), nr=2, dimnames=list(c("control", "treatment"), c("A", "B")))
AC.special <- matrix(c(
# control A , treatment A
experiment.one[2,3], experiment.one[6,3],
# control C , treatment C special
experiment.one[2,4], experiment.one[6,4]
), nr=2, dimnames=list(c("control", "treatment"), c("A", "C")))
# Running Analysis on the experiment 1 and 2 results
# the fisher test produces a more accurate value for p than the chisq.test
# feel free to change this to chisq.test though
AB.herd.result <- fisher.test(AB.herd)
AC.herd.result <- fisher.test(AC.herd)
AB.special.result <- fisher.test(AB.special)
AC.special.result <- fisher.test(AC.special)
# if p.value < some alpha, then there is a significant association between
# seeing more votes for one wine and wine preference
# => to some extent people follow the herd when making judgments of taste
alpha <- 0.05
if(AB.herd.result$p.value < alpha){ "some herdiness" }else{ "unlikely to have herdiness"}
if(AC.herd.result$p.value < alpha){ "some herdiness" }else{ "unlikely to have herdiness"}
# having a vip special access deal and wine preference
# => to some extent people are affected by the allusion of special treatment
if(AB.special.result$p.value < alpha){ "vip matters" }else{ "unlikely that vip matters"}
if(AC.special.result$p.value < alpha){ "vip matters" }else{ "unlikely that vip matters"}
dist.AB.herd <- AB.herd / apply(AB.herd, 1, sum)
dist.AC.herd <- AC.herd / apply(AC.herd, 1, sum)
dist.AB.special <- AB.special / apply(AB.special, 1, sum)
dist.AC.special <- AC.special / apply(AC.special, 1, sum)
# still unsure how to show the proportion on this
# (additional votes) / (total real vote sum at the station)
# proportion is calculated from within the experiment
added.to.A.treatment <- experiment.one[3,6] / sum(experiment.one[3,3:5])
added.to.C.treatment <- experiment.one[4,6] / sum(experiment.one[4,3:5])
# These bar charts don't look very good.
barplot(dist.AB.herd, main="Follow the Herd? Distribution of Wine Preference",
xlab="Wine Tasted",
legend = rownames(AB.herd), beside=TRUE)
barplot(dist.AC.herd, main="Follow the Herd? Distribution of Wine Preference",
xlab="Wine Tasted",
legend = rownames(AC.herd), beside=TRUE)
barplot(dist.AB.special, main="VIP syndrome: Distribution of Wine Preference",
xlab="Wine Tasted",
legend = rownames(AB.special), beside=TRUE)
barplot(dist.AC.special, main="VIP syndrome: Distribution of Wine Preference",
xlab="Wine Tasted",
legend = rownames(AC.special), beside=TRUE)
# ggplot bar charts (need to add titles and other things legend markers)
nice.plot <- function(exptable, filename="barplot.pdf", fillvals=c("#00C0c3", "#fa736f"), added_votes=c(0,0,0,0)) {
df = data.frame(prop=c(exptable[,1],
exptable[,2],
added_votes),
group=rownames(exptable),
wine=rep(rep(colnames(exptable), each=2), 2))
# do things to change color here or potentially even save to a pdf
# switch group and wine in this line to change grouping (group by treatment or group by wine)
ggplot(df, aes(wine, prop, fill=wine, colour=wine)) +
geom_bar(stat="identity") +
facet_grid(. ~ group) +
scale_colour_manual(values=c("#000000","#000000")) +
scale_fill_manual(values=fillvals) +
theme_bw()
ggsave(filename)
}
# pick the wine colors as represented in the bar graph
wine.A <- "#00C0c3"
wine.B <- "#fa736f"
wine.C <- "#e79d31"
# votes added to a
# 0,0.22,0,0.0
nice.plot(dist.AB.herd, "experiment_1_AB.pdf", c(wine.A, wine.B), c(0,added.to.A.treatment,0,0))
# votes added to c
# 0,0.00,0,0.1
nice.plot(dist.AC.herd, "experiment_1_AC.pdf", c(wine.A, wine.C), c(0,0,0,added.to.C.treatment))
nice.plot(dist.AB.special, "experiment_2_AB.pdf", c(wine.A, wine.B), c(0,0,0,0))
nice.plot(dist.AC.special, "experiment_2_AC.pdf", c(wine.A, wine.C), c(0,0,0,0))
######
# ACTUAL ANALYSIS FOR BAR CHARTS SINCE THE STUFF CAME OUT VERY DIFFERENTLY
######
act.one <- read.table("actual/survey_1_2.csv", header = TRUE, sep=",")
A.merlot.normal <- act.one[1, 3]
C.pinot.special <- act.one[1, 5]
A.merlot.normal.two <- act.one[2, 3]
C.crap.special <- act.one[2, 5]
A.merlot.treatment <- act.one[3, 3]
B.crap.treatment <- act.one[3, 5]
A.merlot.control <- act.one[4, 3]
B.crap.control <- act.one[4, 5]
# experiment 1
AB.herd <- matrix(c(
A.merlot.control, A.merlot.treatment,
B.crap.control, B.crap.treatment
), nr=2, dimnames=list(c("control", "treatment"), c("A", "B")))
nice.plot(AB.herd, "actual/experiment_1_AB.pdf", c(wine.B, wine.A))
# experiment 2
exp.special <- data.frame(value=c(
A.merlot.normal, C.pinot.special,
A.merlot.normal.two, C.crap.special,
A.merlot.control, B.crap.control),
station=rep(c("station 1", "station 2", "station 4"), each=2),
wine=c("A", "B", "A", "B", "A", "B"))
ggplot(exp.special,
aes(x=wine,
y=value,
fill=factor(wine))) +
facet_grid(. ~ station) +
geom_bar(stat="identity") +
theme_bw() +
opts(legend.position = "none", axis.title.x=theme_text(vjust=0))
ggsave("actual/experiment_2_ABC.pdf")
|
/analysis.r
|
no_license
|
sandbox/wine-test
|
R
| false
| false
| 6,904
|
r
|
library("ggplot2")
experiment.one <- read.table("samples/survey_1_2.csv", header = TRUE, sep=",")
# Tables for the follow the herd experiment
AB.herd <- matrix(c(
# control A , treatment A added
experiment.one[c(1,3),3] - experiment.one[c(1,3), 6],
# control B , treatment B
experiment.one[c(1,3),4]
), nr=2, dimnames=list(c("control", "treatment"), c("A", "B")))
AC.herd <- matrix(c(
# control A , treatment A
experiment.one[2,3], experiment.one[4,4],
# control C , treatment C added
experiment.one[2,4], experiment.one[4,3] - experiment.one[4,6]
), nr=2, dimnames=list(c("control", "treatment"), c("A", "C")))
# Tables for the follow the vip special access experiment
AB.special <- matrix(c(
# control A , treatment A
experiment.one[1,3], experiment.one[5,3],
# control B , treatment B special
experiment.one[1,4], experiment.one[5,4]
), nr=2, dimnames=list(c("control", "treatment"), c("A", "B")))
AC.special <- matrix(c(
# control A , treatment A
experiment.one[2,3], experiment.one[6,3],
# control C , treatment C special
experiment.one[2,4], experiment.one[6,4]
), nr=2, dimnames=list(c("control", "treatment"), c("A", "C")))
# Running Analysis on the experiment 1 and 2 results
# the fisher test produces a more accurate value for p than the chisq.test
# feel free to change this to chisq.test though
AB.herd.result <- fisher.test(AB.herd)
AC.herd.result <- fisher.test(AC.herd)
AB.special.result <- fisher.test(AB.special)
AC.special.result <- fisher.test(AC.special)
# if p.value < some alpha, then there is a significant association between
# seeing more votes for one wine and wine preference
# => to some extent people follow the herd when making judgments of taste
alpha <- 0.05
if(AB.herd.result$p.value < alpha){ "some herdiness" }else{ "unlikely to have herdiness"}
if(AC.herd.result$p.value < alpha){ "some herdiness" }else{ "unlikely to have herdiness"}
# having a vip special access deal and wine preference
# => to some extent people are affected by the allusion of special treatment
if(AB.special.result$p.value < alpha){ "vip matters" }else{ "unlikely that vip matters"}
if(AC.special.result$p.value < alpha){ "vip matters" }else{ "unlikely that vip matters"}
dist.AB.herd <- AB.herd / apply(AB.herd, 1, sum)
dist.AC.herd <- AC.herd / apply(AC.herd, 1, sum)
dist.AB.special <- AB.special / apply(AB.special, 1, sum)
dist.AC.special <- AC.special / apply(AC.special, 1, sum)
# still unsure how to show the proportion on this
# (additional votes) / (total real vote sum at the station)
# proportion is calculated from within the experiment
added.to.A.treatment <- experiment.one[3,6] / sum(experiment.one[3,3:5])
added.to.C.treatment <- experiment.one[4,6] / sum(experiment.one[4,3:5])
# These bar charts don't look very good.
barplot(dist.AB.herd, main="Follow the Herd? Distribution of Wine Preference",
xlab="Wine Tasted",
legend = rownames(AB.herd), beside=TRUE)
barplot(dist.AC.herd, main="Follow the Herd? Distribution of Wine Preference",
xlab="Wine Tasted",
legend = rownames(AC.herd), beside=TRUE)
barplot(dist.AB.special, main="VIP syndrome: Distribution of Wine Preference",
xlab="Wine Tasted",
legend = rownames(AB.special), beside=TRUE)
barplot(dist.AC.special, main="VIP syndrome: Distribution of Wine Preference",
xlab="Wine Tasted",
legend = rownames(AC.special), beside=TRUE)
# ggplot bar charts (need to add titles and other things legend markers)
nice.plot <- function(exptable, filename="barplot.pdf", fillvals=c("#00C0c3", "#fa736f"), added_votes=c(0,0,0,0)) {
df = data.frame(prop=c(exptable[,1],
exptable[,2],
added_votes),
group=rownames(exptable),
wine=rep(rep(colnames(exptable), each=2), 2))
# do things to change color here or potentially even save to a pdf
# switch group and wine in this line to change grouping (group by treatment or group by wine)
ggplot(df, aes(wine, prop, fill=wine, colour=wine)) +
geom_bar(stat="identity") +
facet_grid(. ~ group) +
scale_colour_manual(values=c("#000000","#000000")) +
scale_fill_manual(values=fillvals) +
theme_bw()
ggsave(filename)
}
# pick the wine colors as represented in the bar graph
wine.A <- "#00C0c3"
wine.B <- "#fa736f"
wine.C <- "#e79d31"
# votes added to a
# 0,0.22,0,0.0
nice.plot(dist.AB.herd, "experiment_1_AB.pdf", c(wine.A, wine.B), c(0,added.to.A.treatment,0,0))
# votes added to c
# 0,0.00,0,0.1
nice.plot(dist.AC.herd, "experiment_1_AC.pdf", c(wine.A, wine.C), c(0,0,0,added.to.C.treatment))
nice.plot(dist.AB.special, "experiment_2_AB.pdf", c(wine.A, wine.B), c(0,0,0,0))
nice.plot(dist.AC.special, "experiment_2_AC.pdf", c(wine.A, wine.C), c(0,0,0,0))
######
# ACTUAL ANALYSIS FOR BAR CHARTS SINCE THE STUFF CAME OUT VERY DIFFERENTLY
######
act.one <- read.table("actual/survey_1_2.csv", header = TRUE, sep=",")
A.merlot.normal <- act.one[1, 3]
C.pinot.special <- act.one[1, 5]
A.merlot.normal.two <- act.one[2, 3]
C.crap.special <- act.one[2, 5]
A.merlot.treatment <- act.one[3, 3]
B.crap.treatment <- act.one[3, 5]
A.merlot.control <- act.one[4, 3]
B.crap.control <- act.one[4, 5]
# experiment 1
AB.herd <- matrix(c(
A.merlot.control, A.merlot.treatment,
B.crap.control, B.crap.treatment
), nr=2, dimnames=list(c("control", "treatment"), c("A", "B")))
nice.plot(AB.herd, "actual/experiment_1_AB.pdf", c(wine.B, wine.A))
# experiment 2
exp.special <- data.frame(value=c(
A.merlot.normal, C.pinot.special,
A.merlot.normal.two, C.crap.special,
A.merlot.control, B.crap.control),
station=rep(c("station 1", "station 2", "station 4"), each=2),
wine=c("A", "B", "A", "B", "A", "B"))
ggplot(exp.special,
aes(x=wine,
y=value,
fill=factor(wine))) +
facet_grid(. ~ station) +
geom_bar(stat="identity") +
theme_bw() +
opts(legend.position = "none", axis.title.x=theme_text(vjust=0))
ggsave("actual/experiment_2_ABC.pdf")
|
library(plotly)
d<-read.csv("/Users/darshak/Documents/Projects/YALM/uniformResults.csv")
matplot(d$Hot....Almost., cbind(d$tier1.time, d$tier2.time, d$Sum), pch=c(0,1), col = c("Red", "Blue", "Green"), main="Hot % vs Time", ylab = "Values", xlab = "Hot %", type = "l", lw = c(1,0.75), lt = c(1,1,1))
legend("topright", lty = c(1, 1), col = c("Red", "Blue", "Green"), legend = c("Cold Time", "Hot time" , "Total Time"), box.lwd = 0, inset=c(-0.2,0))
d2<-read.csv("/Users/darshak/Documents/Projects/YALM/randomSize.csv")
matplot(d2$Hot....Almost., cbind(d2$Tier.1..time.size., d2$Tier.2..time.size., d2$Sum), pch=c(0,1), col = c("Red", "Blue", "Green"), main="Hot % vs Time", ylab = "Values", xlab = "Hot %", type = "l", lw = c(1,0.75), lt = c(1,1,1))
legend("topright", lty = c(1, 1), col = c("Red", "Blue", "Green"), legend = c("Cold Time", "Hot time" , "Total Time"), box.lwd = 0, inset=c(-0.2,0))
plot1<-plot_ly(x = d2$Hot....Almost., y = d2$Total.Cost, name = "Total Cost",type = "scatter", mode = c("markers", "lines"))%>% add_trace(y = d2$Hot.Cost, name = "Hot Cost", mode = 'lines+markers') %>%
add_trace(y = d2$Cold.Cost, name = "Cold Cost", mode = 'lines+markers') %>% layout(title = "Total Cost vs Hot % (Random Size Objs)", xaxis = list(title = 'Hot %'), yaxis = list(title = 'Total Cost'))
plot2<-plot_ly(x = d$Hot....Almost., y = d$tier1.time, name = "Cold Time",type = "scatter", mode = c("markers", "lines"))%>% add_trace(y = d$tier2.time, name = "Hot Time", mode = 'lines+markers') %>%
add_trace(y = d$Sum, name = "Total Time", mode = 'lines+markers') %>% layout(title = "Time vs Hot % (Uniform Size Objs)", xaxis = list(title = 'Hot %'), yaxis = list(title = 'Access Time'))
plot3<-plot_ly(x = d2$Hot....Almost., y = d2$Tier.1..time.size., name = "Cold Time",type = "scatter", mode = c("markers", "lines"))%>% add_trace(y = d2$Tier.2..time.size., name = "Hot Time", mode = 'lines+markers') %>%
add_trace(y = d2$Sum, name = "Total Time", mode = 'lines+markers') %>% layout(title = "Time vs Hot % (Random Size Objs)", xaxis = list(title = 'Hot %'), yaxis = list(title = 'Access Time'))
plot4<-plot_ly(x = d2$Total.Cost, y = d2$Tier.1..time.size., name = "Cold Time",type = "scatter", mode = c("markers", "lines"))%>% add_trace(y = d2$Tier.2..time.size., name = "Hot Time", mode = 'lines+markers') %>%
add_trace(y = d2$Sum, name = "Total Time", mode = 'lines+markers') %>% layout(title = "Time vs Total Cost (Random Size Objs)", xaxis = list(title = 'Total Cost'), yaxis = list(title = 'Access Time'))
d = read.csv("/Users/darshak/Documents/Projects/YALM/powerlaw15.csv", header=FALSE)
hist(d$V1, breaks = 1000, main = "A = 15")
d = read.csv("/Users/darshak/Documents/Projects/YALM/powerlaw10.csv", header=FALSE)
hist(d$V1, breaks = 1000, main = "A = 10")
d = read.csv("/Users/darshak/Documents/Projects/YALM/powerlaw5.csv", header=FALSE)
hist(d$V1, breaks = 1000, main = "A = 5")
d = read.csv("/Users/darshak/Documents/Projects/YALM/powerlaw1.csv", header=FALSE)
hist(d$V1, breaks = 1000, main = "A = 1")
d3 <- read.csv("/Users/darshak/Documents/Projects/YALM/randomSize\ Powerlaw.csv")
plot3<-plot_ly(x = d3$Hot....Almost., y = d3$Tier.1..time.size., name = "Cold Time",type = "scatter", mode = c("markers", "lines"))%>% add_trace(y = d3$Tier.2..time.size., name = "Hot Time", mode = 'lines+markers') %>%
add_trace(y = d3$Sum..time., name = "Total Time", mode = 'lines+markers') %>% layout(title = "Time vs Hot % (Random Size Objs - PowerLaw)", xaxis = list(title = 'Hot %'), yaxis = list(title = 'Access Time'))
plot5<-plot_ly(x = d$hot.p, y = d$Hot_time/d$Hot_accessed_size, name = "Hot Time",type = "scatter", mode = c("markers", "lines"))%>% add_trace(y = d$Cold_time/d$Cold_accessed_size, name = "Cold Time", mode = 'lines+markers') %>%layout(title = "Time vs Hot % (Random Size Objs)", xaxis = list(title = 'Hot %'), yaxis = list(title = 'Access Time'))
plot9<-plot_ly(x = d3$hot.p, y = d3$Hot_time, name = "Hot Time",type = "scatter", mode = c("markers", "lines"))%>% add_trace(y = d3$Cold_time, name = "Cold Time", mode = 'lines+markers') %>%layout(title = "Time vs Hot % (Random Size Objs)", xaxis = list(title = 'Hot %'), yaxis = list(title = 'Access Time'))
plot9
plot10<-plot_ly(x = d3$hot.p, y = d3$Hot_time + d3$Cold_time, name = "Hot Time",type = "scatter", mode = c("markers", "lines"))%
plot10<-plot_ly(x = d3$hot.p, y = d3$Hot_time + d3$Cold_time, name = "Hot Time",type = "scatter", mode = c("markers", "lines"))
plot10
plot10<-plot_ly(x = hp, y = tp, name = "Hot Time",type = "scatter", mode = c("markers", "lines"))%>%layout(title = "Time vs Hot % (Random Size Objs - PowerLaw w Mean Policy)", xaxis = list(title = 'Hot %'), yaxis = list(title = 'Access Time'))
|
/Other/Graphs.R
|
no_license
|
NeeharikaSompalli/YALM
|
R
| false
| false
| 4,731
|
r
|
library(plotly)
d<-read.csv("/Users/darshak/Documents/Projects/YALM/uniformResults.csv")
matplot(d$Hot....Almost., cbind(d$tier1.time, d$tier2.time, d$Sum), pch=c(0,1), col = c("Red", "Blue", "Green"), main="Hot % vs Time", ylab = "Values", xlab = "Hot %", type = "l", lw = c(1,0.75), lt = c(1,1,1))
legend("topright", lty = c(1, 1), col = c("Red", "Blue", "Green"), legend = c("Cold Time", "Hot time" , "Total Time"), box.lwd = 0, inset=c(-0.2,0))
d2<-read.csv("/Users/darshak/Documents/Projects/YALM/randomSize.csv")
matplot(d2$Hot....Almost., cbind(d2$Tier.1..time.size., d2$Tier.2..time.size., d2$Sum), pch=c(0,1), col = c("Red", "Blue", "Green"), main="Hot % vs Time", ylab = "Values", xlab = "Hot %", type = "l", lw = c(1,0.75), lt = c(1,1,1))
legend("topright", lty = c(1, 1), col = c("Red", "Blue", "Green"), legend = c("Cold Time", "Hot time" , "Total Time"), box.lwd = 0, inset=c(-0.2,0))
plot1<-plot_ly(x = d2$Hot....Almost., y = d2$Total.Cost, name = "Total Cost",type = "scatter", mode = c("markers", "lines"))%>% add_trace(y = d2$Hot.Cost, name = "Hot Cost", mode = 'lines+markers') %>%
add_trace(y = d2$Cold.Cost, name = "Cold Cost", mode = 'lines+markers') %>% layout(title = "Total Cost vs Hot % (Random Size Objs)", xaxis = list(title = 'Hot %'), yaxis = list(title = 'Total Cost'))
plot2<-plot_ly(x = d$Hot....Almost., y = d$tier1.time, name = "Cold Time",type = "scatter", mode = c("markers", "lines"))%>% add_trace(y = d$tier2.time, name = "Hot Time", mode = 'lines+markers') %>%
add_trace(y = d$Sum, name = "Total Time", mode = 'lines+markers') %>% layout(title = "Time vs Hot % (Uniform Size Objs)", xaxis = list(title = 'Hot %'), yaxis = list(title = 'Access Time'))
plot3<-plot_ly(x = d2$Hot....Almost., y = d2$Tier.1..time.size., name = "Cold Time",type = "scatter", mode = c("markers", "lines"))%>% add_trace(y = d2$Tier.2..time.size., name = "Hot Time", mode = 'lines+markers') %>%
add_trace(y = d2$Sum, name = "Total Time", mode = 'lines+markers') %>% layout(title = "Time vs Hot % (Random Size Objs)", xaxis = list(title = 'Hot %'), yaxis = list(title = 'Access Time'))
plot4<-plot_ly(x = d2$Total.Cost, y = d2$Tier.1..time.size., name = "Cold Time",type = "scatter", mode = c("markers", "lines"))%>% add_trace(y = d2$Tier.2..time.size., name = "Hot Time", mode = 'lines+markers') %>%
add_trace(y = d2$Sum, name = "Total Time", mode = 'lines+markers') %>% layout(title = "Time vs Total Cost (Random Size Objs)", xaxis = list(title = 'Total Cost'), yaxis = list(title = 'Access Time'))
d = read.csv("/Users/darshak/Documents/Projects/YALM/powerlaw15.csv", header=FALSE)
hist(d$V1, breaks = 1000, main = "A = 15")
d = read.csv("/Users/darshak/Documents/Projects/YALM/powerlaw10.csv", header=FALSE)
hist(d$V1, breaks = 1000, main = "A = 10")
d = read.csv("/Users/darshak/Documents/Projects/YALM/powerlaw5.csv", header=FALSE)
hist(d$V1, breaks = 1000, main = "A = 5")
d = read.csv("/Users/darshak/Documents/Projects/YALM/powerlaw1.csv", header=FALSE)
hist(d$V1, breaks = 1000, main = "A = 1")
d3 <- read.csv("/Users/darshak/Documents/Projects/YALM/randomSize\ Powerlaw.csv")
plot3<-plot_ly(x = d3$Hot....Almost., y = d3$Tier.1..time.size., name = "Cold Time",type = "scatter", mode = c("markers", "lines"))%>% add_trace(y = d3$Tier.2..time.size., name = "Hot Time", mode = 'lines+markers') %>%
add_trace(y = d3$Sum..time., name = "Total Time", mode = 'lines+markers') %>% layout(title = "Time vs Hot % (Random Size Objs - PowerLaw)", xaxis = list(title = 'Hot %'), yaxis = list(title = 'Access Time'))
plot5<-plot_ly(x = d$hot.p, y = d$Hot_time/d$Hot_accessed_size, name = "Hot Time",type = "scatter", mode = c("markers", "lines"))%>% add_trace(y = d$Cold_time/d$Cold_accessed_size, name = "Cold Time", mode = 'lines+markers') %>%layout(title = "Time vs Hot % (Random Size Objs)", xaxis = list(title = 'Hot %'), yaxis = list(title = 'Access Time'))
plot9<-plot_ly(x = d3$hot.p, y = d3$Hot_time, name = "Hot Time",type = "scatter", mode = c("markers", "lines"))%>% add_trace(y = d3$Cold_time, name = "Cold Time", mode = 'lines+markers') %>%layout(title = "Time vs Hot % (Random Size Objs)", xaxis = list(title = 'Hot %'), yaxis = list(title = 'Access Time'))
plot9
plot10<-plot_ly(x = d3$hot.p, y = d3$Hot_time + d3$Cold_time, name = "Hot Time",type = "scatter", mode = c("markers", "lines"))%
plot10<-plot_ly(x = d3$hot.p, y = d3$Hot_time + d3$Cold_time, name = "Hot Time",type = "scatter", mode = c("markers", "lines"))
plot10
plot10<-plot_ly(x = hp, y = tp, name = "Hot Time",type = "scatter", mode = c("markers", "lines"))%>%layout(title = "Time vs Hot % (Random Size Objs - PowerLaw w Mean Policy)", xaxis = list(title = 'Hot %'), yaxis = list(title = 'Access Time'))
|
library(ape)
testtree <- read.tree("6434_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="6434_0_unrooted.txt")
|
/codeml_files/newick_trees_processed/6434_0/rinput.R
|
no_license
|
DaniBoo/cyanobacteria_project
|
R
| false
| false
| 135
|
r
|
library(ape)
testtree <- read.tree("6434_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="6434_0_unrooted.txt")
|
setwd("C:/Govinda/PU2376/project/Desktop/Desktop/Data Science/Edu Pristine/Yogesh Sir Batch/timeseriesandclustering")
rain=read.csv("rain.csv")
# converting to a time series data format
raints=ts(rain,start=c(1817))
plot(raints)
########################################################################
# Single Exponential Smoothening #
########################################################################
rainforecast=HoltWinters(raints,beta=F,gamma=F)
rainforecast
plot(rainforecast)
names(rainforecast)
rainforecast$fitted
r2=HoltWinters(raints,alpha=0.02,beta=F,gamma=F)
#Making future forecasts
library(forecast)
r2forecast=forecast.HoltWinters(r2,h=10)
plot(r2forecast)
# To figure out the randomness of the model
hist(r2forecast$residuals)
acf(r2forecast$residuals,lag.max=20)
qqnorm(r2forecast$residuals)
########################################################################
# Double Exponential Smoothening #
########################################################################
skirts=read.csv("skirts.csv")
skirts=ts(skirts,start=c(1866))
plot(skirts)
class(skirts)
#as you can see this time series has trend and a simple "level" forecast will not be enough.
skirtforecast=HoltWinters(skirts,gamma=F)
plot(skirtforecast)
skirtforecast
#Making future forecast
skirtfuture=forecast.HoltWinters(skirtforecast,h=19)
plot(skirtfuture)
skirts
#To figure out the randomness of the model
qqnorm(skirtfuture$residual)
########################################################################
# Triple Exponential Smoothening #
########################################################################
souvenir=read.csv("souvenir.csv")
souvenirts <- ts(souvenir, frequency=12, start=c(1987,1))
plot(souvenirts)
#Removing the multiplicative trend
souvenirts=log(souvenirts)
plot(souvenirts)
souvenirforecast=HoltWinters(souvenirts)
souvenirforecast
plot(souvenirforecast)
souvenirfuture=forecast.HoltWinters(souvenirforecast,h=48)
plot(souvenirfuture)
#To figure out the randomness of the model
acf(souvenirfuture$residuals,lag.max=30)
qqnorm(souvenirfuture$residuals)
plot(souvenirfuture$residuals)
########################################################################
# ARIMA #
########################################################################
#ARIMA models have 3 parameters and is generally written as ARIMA(p,d,q)
library(forecast)
souvenir=read.csv("souvenir.csv")
souvenirts <- ts(souvenir, frequency=12, start=c(1987,1))
plot(souvenirts)
plot(diff(souvenirts,1))
plot(diff(souvenirts,12))
plot(diff(diff(souvenirts,1),12))
auto.arima(souvenirts)
#ARIMA(2,0,0)(0,1,1)[12]
#p=2 ,d=0, q=0 | P=0 , D=1 , Q=1 | m=12
arimafit=arima(souvenirts,order=c(1,1,1),seasonal=c(0,1,1))
arimafuture=forecast.Arima(arimafit,h=48)
plot(arimafuture)
acf(arimafuture$residuals,lag.max=20)
hist(arimafuture$residuals)
qqnorm(arimafuture$residuals)
|
/R/9. Time Series/TSA.R
|
no_license
|
abhijitvp/DS
|
R
| false
| false
| 3,065
|
r
|
setwd("C:/Govinda/PU2376/project/Desktop/Desktop/Data Science/Edu Pristine/Yogesh Sir Batch/timeseriesandclustering")
rain=read.csv("rain.csv")
# converting to a time series data format
raints=ts(rain,start=c(1817))
plot(raints)
########################################################################
# Single Exponential Smoothening #
########################################################################
rainforecast=HoltWinters(raints,beta=F,gamma=F)
rainforecast
plot(rainforecast)
names(rainforecast)
rainforecast$fitted
r2=HoltWinters(raints,alpha=0.02,beta=F,gamma=F)
#Making future forecasts
library(forecast)
r2forecast=forecast.HoltWinters(r2,h=10)
plot(r2forecast)
# To figure out the randomness of the model
hist(r2forecast$residuals)
acf(r2forecast$residuals,lag.max=20)
qqnorm(r2forecast$residuals)
########################################################################
# Double Exponential Smoothening #
########################################################################
skirts=read.csv("skirts.csv")
skirts=ts(skirts,start=c(1866))
plot(skirts)
class(skirts)
#as you can see this time series has trend and a simple "level" forecast will not be enough.
skirtforecast=HoltWinters(skirts,gamma=F)
plot(skirtforecast)
skirtforecast
#Making future forecast
skirtfuture=forecast.HoltWinters(skirtforecast,h=19)
plot(skirtfuture)
skirts
#To figure out the randomness of the model
qqnorm(skirtfuture$residual)
########################################################################
# Triple Exponential Smoothening #
########################################################################
souvenir=read.csv("souvenir.csv")
souvenirts <- ts(souvenir, frequency=12, start=c(1987,1))
plot(souvenirts)
#Removing the multiplicative trend
souvenirts=log(souvenirts)
plot(souvenirts)
souvenirforecast=HoltWinters(souvenirts)
souvenirforecast
plot(souvenirforecast)
souvenirfuture=forecast.HoltWinters(souvenirforecast,h=48)
plot(souvenirfuture)
#To figure out the randomness of the model
acf(souvenirfuture$residuals,lag.max=30)
qqnorm(souvenirfuture$residuals)
plot(souvenirfuture$residuals)
########################################################################
# ARIMA #
########################################################################
#ARIMA models have 3 parameters and is generally written as ARIMA(p,d,q)
library(forecast)
souvenir=read.csv("souvenir.csv")
souvenirts <- ts(souvenir, frequency=12, start=c(1987,1))
plot(souvenirts)
plot(diff(souvenirts,1))
plot(diff(souvenirts,12))
plot(diff(diff(souvenirts,1),12))
auto.arima(souvenirts)
#ARIMA(2,0,0)(0,1,1)[12]
#p=2 ,d=0, q=0 | P=0 , D=1 , Q=1 | m=12
arimafit=arima(souvenirts,order=c(1,1,1),seasonal=c(0,1,1))
arimafuture=forecast.Arima(arimafit,h=48)
plot(arimafuture)
acf(arimafuture$residuals,lag.max=20)
hist(arimafuture$residuals)
qqnorm(arimafuture$residuals)
|
n1=function(perm){ #swap two neighbour elements
l=length(perm)
res=matrix(nrow=l-1,ncol=l)
for(i in 1:(l-1)){
tmp=perm
tmp[i:(i+1)]=rev(tmp[i:(i+1)])
#print(tmp)
res[i,]=tmp
}
res
}
n2=function(perm){#swap two parts of permutation
l=length(perm)
res=matrix(nrow=l-1,ncol=l)
for(i in 1:(l-1)){
res[i,]=c(perm[(i+1):length(perm)],perm[1:i])
}
res
}
n3=function(perm){#swap any two elements
l=length(perm)
res=matrix(nrow=l*(l-1)/2,ncol=l)
k=1
for(i in 1:(l-1)){
for(j in (i+1):l){
#print(c(i,j,k))
tmp=perm
t=tmp[i]
tmp[i]=tmp[j]
tmp[j]=t
res[k,]=tmp
k=k+1
}
}
res
}
n4=function(perm){#reverse part of permutation
l=length(perm)
res=matrix(nrow=l*(l-1)/2,ncol=l)
k=1
for(i in 1:(l-1)){
for(j in (i+1):l){
tmp=perm
tmp[i:j]=rev(tmp[i:j])
res[k,]=tmp
k=k+1
}
}
res
}
combine=function(na,nb){#combine two neighborhoods
function(perm){
l=length(perm)
res=matrix(ncol=l,nrow=0)
part=na(perm)
for(i in 1:nrow(part)){
res=rbind(res,nb(part[i,]))
}
res
}
}
vnsearch=function(start,obj,neighs,printsteps=F,printneighs=F){
best=start
searching=T
if(printneighs)
nUsed=rep(0,length(neighs))
while(searching){
searching=F
start=best
if(printsteps)
print(start)
besti=-1
bestobj=obj(start)
for(n in 1:length(neighs)){
neigh=neighs[[n]](start)
for(i in 1:nrow(neigh)){
o=obj(neigh[i,])
if(o>bestobj){
bestobj=o
best=neigh[i,]
besti=i
}
}
if(besti!=-1){
searching=T
if(printneighs)
nUsed[n]=nUsed[n]+1
break
}
}
}
if(printneighs)
print(nUsed)
best
}
|
/vnsearch.R
|
no_license
|
mtsch/rzhp-seminarska
|
R
| false
| false
| 1,637
|
r
|
n1=function(perm){ #swap two neighbour elements
l=length(perm)
res=matrix(nrow=l-1,ncol=l)
for(i in 1:(l-1)){
tmp=perm
tmp[i:(i+1)]=rev(tmp[i:(i+1)])
#print(tmp)
res[i,]=tmp
}
res
}
n2=function(perm){#swap two parts of permutation
l=length(perm)
res=matrix(nrow=l-1,ncol=l)
for(i in 1:(l-1)){
res[i,]=c(perm[(i+1):length(perm)],perm[1:i])
}
res
}
n3=function(perm){#swap any two elements
l=length(perm)
res=matrix(nrow=l*(l-1)/2,ncol=l)
k=1
for(i in 1:(l-1)){
for(j in (i+1):l){
#print(c(i,j,k))
tmp=perm
t=tmp[i]
tmp[i]=tmp[j]
tmp[j]=t
res[k,]=tmp
k=k+1
}
}
res
}
n4=function(perm){#reverse part of permutation
l=length(perm)
res=matrix(nrow=l*(l-1)/2,ncol=l)
k=1
for(i in 1:(l-1)){
for(j in (i+1):l){
tmp=perm
tmp[i:j]=rev(tmp[i:j])
res[k,]=tmp
k=k+1
}
}
res
}
combine=function(na,nb){#combine two neighborhoods
function(perm){
l=length(perm)
res=matrix(ncol=l,nrow=0)
part=na(perm)
for(i in 1:nrow(part)){
res=rbind(res,nb(part[i,]))
}
res
}
}
vnsearch=function(start,obj,neighs,printsteps=F,printneighs=F){
best=start
searching=T
if(printneighs)
nUsed=rep(0,length(neighs))
while(searching){
searching=F
start=best
if(printsteps)
print(start)
besti=-1
bestobj=obj(start)
for(n in 1:length(neighs)){
neigh=neighs[[n]](start)
for(i in 1:nrow(neigh)){
o=obj(neigh[i,])
if(o>bestobj){
bestobj=o
best=neigh[i,]
besti=i
}
}
if(besti!=-1){
searching=T
if(printneighs)
nUsed[n]=nUsed[n]+1
break
}
}
}
if(printneighs)
print(nUsed)
best
}
|
mark_type <- function(cls){
stopifnot(is.character(cls))
if (length(cls) == 1) return(cls)
support_types = c('tbl_df', 'data.frame', 'list', 'matrix', 'factor',
'logical', 'integer', 'numeric', 'character')
for (i in support_types){
if (i %in% cls) return(i)
}
return(cls[1])
}
#' Provide human-readable comparison of two objects
#'
#' `compare` is similar to [base::all.equal()], but shows
#' you examples of where the failures occured.
#' Support data types: integer, numeric, logical, character, factor, list, matrix, data.frame
#'
#' @export
#' @param x,y Objects to compare
#' @param tolerance Numerical tolerance: any differences smaller than this
#' value will be ignored. Default, 1e-5.
#' @param test.names If x and y are lists, whether to test the names of elements. Default, TRUE.
#' @param test.rowname If x and y are data frames, whether to test the row names. Default, FALSE.
#' @param test.colname If x and y are data frames, whether to test the column names. Default, TRUE.
#' @param ... Additional arguments used to control specifics of comparison
#' @description The parameters `test.rowname` and `test.colname` only works for data frames. While
#' the `test.names` parameter only works for list.
compare <- function(x, y, ...) {
x_class = class(x); y_class = class(y)
if (identical(x, y)) return(comparison())
try(
if (anyNA(x)){
msg = 'Your answer conatins missing values NA, please check again.'
return(comparison(FALSE, msg))
},
silent=TRUE
)
if ( is.numeric(x) && is.numeric(y) ){
UseMethod("compare", y)
}else if (x_class != y_class && !inherits(x, y_class)){
msg = sprintf('We expect your answer returns type "%s", but it returns "%s" instead.', mark_type(y_class), mark_type(x_class))
if (all(is.null(x))){
msg = paste(msg,
'\nDid you forget to return something in your function definition?',
sep = '\n'
)
}
return(comparison(FALSE, msg))
}
UseMethod("compare", x)
}
comparison <- function(equal = TRUE, message = "Equal") {
stopifnot(is.logical(equal), length(equal) == 1)
stopifnot(is.character(message))
structure(
list(
equal = equal,
message = paste(message, collapse = "\n")
),
class = "comparison"
)
}
difference <- function(..., fmt = "%s") {
comparison(FALSE, sprintf(fmt, ...))
}
no_difference <- function() {
comparison()
}
#' @export
print.comparison <- function(x, ...) {
if (x$equal) {
cat("Equal\n")
return()
}
cat(x$message)
}
print_out <- function(x, ...) {
lines <- capture_output_lines(x, ..., print = TRUE)
paste0(lines, collapse = "\n")
}
#' @export
compare_length <- function(x, y){
x_length = length(x); y_length = length(y)
if (x_length != y_length){
msg = sprintf('The length of your answer is %s, which is different from the length of the correct answer, %s.', x_length, y_length)
return(comparison(FALSE, msg))
}else{
return(list(equal=TRUE))
}
}
## TODO
# 1. time object?
# 2. ggplot object?
# 3. shiny object?
# Compare methods ---------------------------------------------------------------
#' @export
#' @rdname compare
compare.default <- function(x, y, ..., max_diffs = 9){
same <- all.equal(x, y, ...)
if (length(same) > max_diffs) {
same <- c(same[1:max_diffs], "...")
}
comparison(identical(same, TRUE), as.character(same))
}
#' @export
compare.integer <- function(x, y, ..., tolerance=1e-15){
# test length
x_length = length(x); y_length = length(y)
length_res = compare_length(x, y)
if (length_res$equal != TRUE) return(length_res)
# test values
if (all(abs(x - y) < tolerance) || all(x == y)){
return(comparison())
}
if (x_length == 1){
msg = sprintf('Your answer is %s, which is not equal to the correct answer, %s.', x, y)
return(comparison(FALSE, msg))
}else{
index = which(x != y)[1]
msg = sprintf('Element number %s of your vector is %s, which is not equal to the correct answer, %s.', index, x[index], y[index])
return(comparison(FALSE, msg))
}
}
#' @export
compare.logical <- compare.integer
#' @export
compare.character <- function(x, y, ...){
# test length
x_length = length(x); y_length = length(y)
length_res = compare_length(x, y)
if (length_res$equal != TRUE) return(length_res)
# test values
if (identical(as.character(x), as.character(y))) return(comparison())
if (x_length == 1){
msg = sprintf('Your answer is "%s", which is not equal to the correct answer "%s"', x, y)
return(comparison(FALSE, msg))
}else{
index = which(x != y)[1]
msg = sprintf('Element number %s of your vector is "%s", which is not equal to the correct answer "%s"', index, x[index], y[index])
return(comparison(FALSE, msg))
}
}
#' @export
compare.numeric <- function(x, y, ..., tolerance = 1e-5){
# test length
x_length = length(x); y_length = length(y)
length_res = compare_length(x, y)
if (length_res$equal != TRUE) return(length_res)
# test values
compare_result = abs(x - y) <= tolerance | x == y
if (all(compare_result)) return(comparison())
if (x_length == 1){
msg = sprintf('Your answer is %s, which is not equal to the correct answer %s', x, y)
}else{
index = which(!compare_result)[1]
msg = sprintf('Element number %s of your vector is %s, which is not equal to the correct answer %s', index, x[index], y[index])
}
msg = paste(msg, '\nThe maximum tolerance is ', tolerance, sep='')
return(comparison(FALSE, msg))
}
#' @export
compare.factor <- function(x, y, ...){
# test length
x_length = length(x); y_length = length(y)
length_res = compare_length(x, y)
if (length_res$equal != TRUE) return(length_res)
# test sorted factor
if (is.ordered(y) && !is.ordered(x)){
msg = 'The answer is an ordered factor, your factor is unordered.\nUse the `as.ordered` function to convert you answer to an ordered factor.'
return(comparison(FALSE, msg))
}
# test levels
x_levels = levels(x); y_levels = levels(y)
if ( !identical(x_levels, y_levels) ){
msg = sprintf('The levels of your factor are: [%s].\nWhile the levels of the correct answer are: [%s]',
paste0(x_levels, collapse = ', '),
paste0(y_levels, collapse = ', '))
return(comparison(FALSE, msg))
}
# test values
compare_res = as.character(x) == as.character(y)
if ( !all(compare_res) ){
index = which(!compare_res)[1]
msg = sprintf('Element number %s of your factor is %s, which is not equal to the correct answer %s',
index, as.character(x[index]), as.character(y[index]))
return(comparison(FALSE, msg))
}
return(comparison())
}
#' @export
compare.list <- function(x, y, ..., test.names = TRUE){
# test length
x_length = length(x); y_length = length(y)
length_res = compare_length(x, y)
if (length_res$equal != TRUE) return(length_res)
# test names
if ( test.names && any(names(x) != names(y)) ){
msg=sprintf('The names of your list are [%s], which is not equal to the correct answer [%s]',
paste0(names(x), collapse = ', '),
paste0(names(y), collapse = ', '))
return(comparison(FALSE, msg))
}
# test values
for (i in seq_along(x)){
res = compare(x[[i]], y[[i]], ...)
if (!res$equal) {
msg = sprintf('The type of element number %d in your list is `%s`.\nIn testing element number %d:\n%s',
i, class(x[[i]]), i, res$message)
return(comparison(FALSE, msg))
}
}
return(comparison())
}
#' @export
compare.matrix <- function(x, y, ..., tolerance = 1e-5){
# test dimension
x_dim=dim(x); y_dim = dim(y)
if (any(x_dim != y_dim)){
msg = sprintf('The dimension of your matrix are (%s), which are not equal to the dimension of the correct answer: (%s).',
paste0(x_dim, collapse = ','),
paste0(y_dim, collapse = ',')
)
return(comparison(FALSE, msg))
}
# test values
if ( is.numeric(x) && is.numeric(y) ){
compare_res = abs(x - y) <= tolerance | x == y
}else if (class(x[1,1]) != class(y[1,1])){
msg = sprintf('The type of the data in your matrix is `%s`, when it should be `%s`',
class(x[1,1]), class(y[1,1])
)
return(comparison(FALSE, msg))
}else{
compare_res = x == y
}
if (any(!compare_res)){
index = which(!compare_res, arr.ind=T)[1, ]
msg = 'The value in %sth row, %sth column of your matrix is %s, which is not equal to the correct answer, %s.'
msg = sprintf(msg, index[1], index[2], x[index[1], index[2]], y[index[1], index[2]])
return(comparison(FALSE, msg))
}
return(comparison())
}
#' @export
compare.data.frame <- function(x, y, ..., tolerance = 1e-5, test.rowname=FALSE, test.colname=TRUE){
# test dimension
x_dim=dim(x); y_dim = dim(y)
if (!all(x_dim == y_dim)){
msg = sprintf('The dimensions of your data.frame are (%s), which are not equal to the dimension of the correct answer: (%s).',
paste0(x_dim, collapse = ','),
paste0(y_dim, collapse = ',')
)
return(comparison(FALSE, msg))
}
# test column names
if (test.colname){
col_diff = setdiff(colnames(y), colnames(x))
if ( length(col_diff) > 0){
msg = sprintf('The column names of your data.frame are [%s].\nThe column names should be: [%s].\nYour columns do not contain: [%s].',
paste(colnames(x), collapse = ','),
paste(colnames(y), collapse = ','),
paste(col_diff, collapse = ',')
)
return(comparison(FALSE, msg))
}
}
# test row names
# if (test.rowname){
# col_diff = setdiff(rownames(y), rownames(x))
# }
# test values
for (col in colnames(y)){
res = compare(x[[col]], y[[col]], tolerance=tolerance, ...)
if (!res$equal) {
msg = sprintf('Testing the column `%s` in your data frame:\n%s',
col, res$message)
return(comparison(FALSE, msg))
}
}
return(comparison())
}
#' @export
compare.tbl_df <- function(x, y, ...){
x = as.data.frame(x)
y = as.data.frame(y)
compare.data.frame(x, y, ...)
}
# Common helpers ---------------------------------------------------------------
same_length <- function(x, y) length(x) == length(y)
diff_length <- function(x, y) difference(fmt = "Lengths differ: %i vs. %i", length(x), length(y))
same_type <- function(x, y) identical(typeof(x), typeof(y))
diff_type <- function(x, y) difference(fmt = "Types not compatible: %s vs. %s", typeof(x), typeof(y))
same_class <- function(x, y) {
if (!is.object(x) && !is.object(y))
return(TRUE)
identical(class(x), class(y))
}
diff_class <- function(x, y) {
difference(fmt = "Classes differ: %s vs. %s", klass(x), klass(y))
}
same_attr <- function(x, y) {
is.null(attr.all.equal(x, y))
}
diff_attr <- function(x, y) {
old <- options(useFancyQuotes = FALSE)
on.exit(options(old), add = TRUE)
out <- attr.all.equal(x, y)
difference(out)
}
vector_equal <- function(x, y) {
(is.na(x) & is.na(y)) | (!is.na(x) & !is.na(y) & x == y)
}
vector_equal_tol <- function(x, y, tolerance = .Machine$double.eps ^ 0.5) {
(is.na(x) & is.na(y)) | (!is.na(x) & !is.na(y) & abs(x - y) < tolerance)
}
|
/R/compare.R
|
no_license
|
vishalbelsare/autotest
|
R
| false
| false
| 11,318
|
r
|
mark_type <- function(cls){
stopifnot(is.character(cls))
if (length(cls) == 1) return(cls)
support_types = c('tbl_df', 'data.frame', 'list', 'matrix', 'factor',
'logical', 'integer', 'numeric', 'character')
for (i in support_types){
if (i %in% cls) return(i)
}
return(cls[1])
}
#' Provide human-readable comparison of two objects
#'
#' `compare` is similar to [base::all.equal()], but shows
#' you examples of where the failures occured.
#' Support data types: integer, numeric, logical, character, factor, list, matrix, data.frame
#'
#' @export
#' @param x,y Objects to compare
#' @param tolerance Numerical tolerance: any differences smaller than this
#' value will be ignored. Default, 1e-5.
#' @param test.names If x and y are lists, whether to test the names of elements. Default, TRUE.
#' @param test.rowname If x and y are data frames, whether to test the row names. Default, FALSE.
#' @param test.colname If x and y are data frames, whether to test the column names. Default, TRUE.
#' @param ... Additional arguments used to control specifics of comparison
#' @description The parameters `test.rowname` and `test.colname` only works for data frames. While
#' the `test.names` parameter only works for list.
compare <- function(x, y, ...) {
x_class = class(x); y_class = class(y)
if (identical(x, y)) return(comparison())
try(
if (anyNA(x)){
msg = 'Your answer conatins missing values NA, please check again.'
return(comparison(FALSE, msg))
},
silent=TRUE
)
if ( is.numeric(x) && is.numeric(y) ){
UseMethod("compare", y)
}else if (x_class != y_class && !inherits(x, y_class)){
msg = sprintf('We expect your answer returns type "%s", but it returns "%s" instead.', mark_type(y_class), mark_type(x_class))
if (all(is.null(x))){
msg = paste(msg,
'\nDid you forget to return something in your function definition?',
sep = '\n'
)
}
return(comparison(FALSE, msg))
}
UseMethod("compare", x)
}
comparison <- function(equal = TRUE, message = "Equal") {
stopifnot(is.logical(equal), length(equal) == 1)
stopifnot(is.character(message))
structure(
list(
equal = equal,
message = paste(message, collapse = "\n")
),
class = "comparison"
)
}
difference <- function(..., fmt = "%s") {
comparison(FALSE, sprintf(fmt, ...))
}
no_difference <- function() {
comparison()
}
#' @export
print.comparison <- function(x, ...) {
if (x$equal) {
cat("Equal\n")
return()
}
cat(x$message)
}
print_out <- function(x, ...) {
lines <- capture_output_lines(x, ..., print = TRUE)
paste0(lines, collapse = "\n")
}
#' @export
compare_length <- function(x, y){
x_length = length(x); y_length = length(y)
if (x_length != y_length){
msg = sprintf('The length of your answer is %s, which is different from the length of the correct answer, %s.', x_length, y_length)
return(comparison(FALSE, msg))
}else{
return(list(equal=TRUE))
}
}
## TODO
# 1. time object?
# 2. ggplot object?
# 3. shiny object?
# Compare methods ---------------------------------------------------------------
#' @export
#' @rdname compare
compare.default <- function(x, y, ..., max_diffs = 9){
same <- all.equal(x, y, ...)
if (length(same) > max_diffs) {
same <- c(same[1:max_diffs], "...")
}
comparison(identical(same, TRUE), as.character(same))
}
#' @export
compare.integer <- function(x, y, ..., tolerance=1e-15){
# test length
x_length = length(x); y_length = length(y)
length_res = compare_length(x, y)
if (length_res$equal != TRUE) return(length_res)
# test values
if (all(abs(x - y) < tolerance) || all(x == y)){
return(comparison())
}
if (x_length == 1){
msg = sprintf('Your answer is %s, which is not equal to the correct answer, %s.', x, y)
return(comparison(FALSE, msg))
}else{
index = which(x != y)[1]
msg = sprintf('Element number %s of your vector is %s, which is not equal to the correct answer, %s.', index, x[index], y[index])
return(comparison(FALSE, msg))
}
}
#' @export
compare.logical <- compare.integer
#' @export
compare.character <- function(x, y, ...){
# test length
x_length = length(x); y_length = length(y)
length_res = compare_length(x, y)
if (length_res$equal != TRUE) return(length_res)
# test values
if (identical(as.character(x), as.character(y))) return(comparison())
if (x_length == 1){
msg = sprintf('Your answer is "%s", which is not equal to the correct answer "%s"', x, y)
return(comparison(FALSE, msg))
}else{
index = which(x != y)[1]
msg = sprintf('Element number %s of your vector is "%s", which is not equal to the correct answer "%s"', index, x[index], y[index])
return(comparison(FALSE, msg))
}
}
#' @export
compare.numeric <- function(x, y, ..., tolerance = 1e-5){
# test length
x_length = length(x); y_length = length(y)
length_res = compare_length(x, y)
if (length_res$equal != TRUE) return(length_res)
# test values
compare_result = abs(x - y) <= tolerance | x == y
if (all(compare_result)) return(comparison())
if (x_length == 1){
msg = sprintf('Your answer is %s, which is not equal to the correct answer %s', x, y)
}else{
index = which(!compare_result)[1]
msg = sprintf('Element number %s of your vector is %s, which is not equal to the correct answer %s', index, x[index], y[index])
}
msg = paste(msg, '\nThe maximum tolerance is ', tolerance, sep='')
return(comparison(FALSE, msg))
}
#' @export
compare.factor <- function(x, y, ...){
# test length
x_length = length(x); y_length = length(y)
length_res = compare_length(x, y)
if (length_res$equal != TRUE) return(length_res)
# test sorted factor
if (is.ordered(y) && !is.ordered(x)){
msg = 'The answer is an ordered factor, your factor is unordered.\nUse the `as.ordered` function to convert you answer to an ordered factor.'
return(comparison(FALSE, msg))
}
# test levels
x_levels = levels(x); y_levels = levels(y)
if ( !identical(x_levels, y_levels) ){
msg = sprintf('The levels of your factor are: [%s].\nWhile the levels of the correct answer are: [%s]',
paste0(x_levels, collapse = ', '),
paste0(y_levels, collapse = ', '))
return(comparison(FALSE, msg))
}
# test values
compare_res = as.character(x) == as.character(y)
if ( !all(compare_res) ){
index = which(!compare_res)[1]
msg = sprintf('Element number %s of your factor is %s, which is not equal to the correct answer %s',
index, as.character(x[index]), as.character(y[index]))
return(comparison(FALSE, msg))
}
return(comparison())
}
#' @export
compare.list <- function(x, y, ..., test.names = TRUE){
# test length
x_length = length(x); y_length = length(y)
length_res = compare_length(x, y)
if (length_res$equal != TRUE) return(length_res)
# test names
if ( test.names && any(names(x) != names(y)) ){
msg=sprintf('The names of your list are [%s], which is not equal to the correct answer [%s]',
paste0(names(x), collapse = ', '),
paste0(names(y), collapse = ', '))
return(comparison(FALSE, msg))
}
# test values
for (i in seq_along(x)){
res = compare(x[[i]], y[[i]], ...)
if (!res$equal) {
msg = sprintf('The type of element number %d in your list is `%s`.\nIn testing element number %d:\n%s',
i, class(x[[i]]), i, res$message)
return(comparison(FALSE, msg))
}
}
return(comparison())
}
#' @export
compare.matrix <- function(x, y, ..., tolerance = 1e-5){
# test dimension
x_dim=dim(x); y_dim = dim(y)
if (any(x_dim != y_dim)){
msg = sprintf('The dimension of your matrix are (%s), which are not equal to the dimension of the correct answer: (%s).',
paste0(x_dim, collapse = ','),
paste0(y_dim, collapse = ',')
)
return(comparison(FALSE, msg))
}
# test values
if ( is.numeric(x) && is.numeric(y) ){
compare_res = abs(x - y) <= tolerance | x == y
}else if (class(x[1,1]) != class(y[1,1])){
msg = sprintf('The type of the data in your matrix is `%s`, when it should be `%s`',
class(x[1,1]), class(y[1,1])
)
return(comparison(FALSE, msg))
}else{
compare_res = x == y
}
if (any(!compare_res)){
index = which(!compare_res, arr.ind=T)[1, ]
msg = 'The value in %sth row, %sth column of your matrix is %s, which is not equal to the correct answer, %s.'
msg = sprintf(msg, index[1], index[2], x[index[1], index[2]], y[index[1], index[2]])
return(comparison(FALSE, msg))
}
return(comparison())
}
#' @export
compare.data.frame <- function(x, y, ..., tolerance = 1e-5, test.rowname=FALSE, test.colname=TRUE){
# test dimension
x_dim=dim(x); y_dim = dim(y)
if (!all(x_dim == y_dim)){
msg = sprintf('The dimensions of your data.frame are (%s), which are not equal to the dimension of the correct answer: (%s).',
paste0(x_dim, collapse = ','),
paste0(y_dim, collapse = ',')
)
return(comparison(FALSE, msg))
}
# test column names
if (test.colname){
col_diff = setdiff(colnames(y), colnames(x))
if ( length(col_diff) > 0){
msg = sprintf('The column names of your data.frame are [%s].\nThe column names should be: [%s].\nYour columns do not contain: [%s].',
paste(colnames(x), collapse = ','),
paste(colnames(y), collapse = ','),
paste(col_diff, collapse = ',')
)
return(comparison(FALSE, msg))
}
}
# test row names
# if (test.rowname){
# col_diff = setdiff(rownames(y), rownames(x))
# }
# test values
for (col in colnames(y)){
res = compare(x[[col]], y[[col]], tolerance=tolerance, ...)
if (!res$equal) {
msg = sprintf('Testing the column `%s` in your data frame:\n%s',
col, res$message)
return(comparison(FALSE, msg))
}
}
return(comparison())
}
#' @export
compare.tbl_df <- function(x, y, ...){
x = as.data.frame(x)
y = as.data.frame(y)
compare.data.frame(x, y, ...)
}
# Common helpers ---------------------------------------------------------------
same_length <- function(x, y) length(x) == length(y)
diff_length <- function(x, y) difference(fmt = "Lengths differ: %i vs. %i", length(x), length(y))
same_type <- function(x, y) identical(typeof(x), typeof(y))
diff_type <- function(x, y) difference(fmt = "Types not compatible: %s vs. %s", typeof(x), typeof(y))
same_class <- function(x, y) {
if (!is.object(x) && !is.object(y))
return(TRUE)
identical(class(x), class(y))
}
diff_class <- function(x, y) {
difference(fmt = "Classes differ: %s vs. %s", klass(x), klass(y))
}
same_attr <- function(x, y) {
is.null(attr.all.equal(x, y))
}
diff_attr <- function(x, y) {
old <- options(useFancyQuotes = FALSE)
on.exit(options(old), add = TRUE)
out <- attr.all.equal(x, y)
difference(out)
}
vector_equal <- function(x, y) {
(is.na(x) & is.na(y)) | (!is.na(x) & !is.na(y) & x == y)
}
vector_equal_tol <- function(x, y, tolerance = .Machine$double.eps ^ 0.5) {
(is.na(x) & is.na(y)) | (!is.na(x) & !is.na(y) & abs(x - y) < tolerance)
}
|
library(data.table)
library(lubridate)
HousingData = fread("https://s3.amazonaws.com/reubenworkshopdata/Sessions/Session+1/kc_house_data.csv")
head(HousingData)
str(HousingData)
summary(HousingData)
HousingData = HousingData[,.(
price,
bedrooms = ceiling(bedrooms),
bathrooms = ceiling(bathrooms),
sqft_living,
sqft_lot,
floors,
waterfront,
view,
condition,
grade,
sqft_above,
sqft_basement,
yr_built,
yr_renovated,
zipcode = as.numeric(zipcode),
lat,
long,
sqft_living15,
sqft_lot15)]
HousingData[which(is.na(HousingData))]
HousingData = HousingData[,.(
price,
bedrooms = ceiling(bedrooms),
bathrooms = ceiling(bathrooms),
sqft_living,
sqft_lot,
floors,
waterfront,
view,
condition,
grade,
sqft_above,
sqft_basement,
yr_built,
yr_renovated,
zipcode = as.numeric(zipcode),
lat,
long,
sqft_living15,
sqft_lot15)]
HousingData$bedrooms[1:5] = NA
HousingData$bathrooms[1:5] = NA
HousingData$`floor1.5` = (HousingData$floors == "1.5")*1
HousingData$`floor2` = (HousingData$floors == "2")*1
HousingData$`floor2.5` = (HousingData$floors == "2.5")*1
HousingData$`floor3` = (HousingData$floors == "3")*1
HousingData$`floor3.5` = (HousingData$floors == "3.5")*1
HousingData$floors= NULL
rows = 1:nrow(HousingData)
#Make a vector called 'train_rows' by randomly sampling 80% of the elements of 'rows'
train_rows = sample(x = rows,size= .8*nrow(HousingData))
#Make a vector called 'test_ rows' from the elements of 'rows' that are not in 'train_rows'
test_rows = rows[!rows %in% train_rows]
minmax = Vectorize(function(x){
return((x - min(x,na.rm = T)/diff(range(x,na.rm = T))))
})
HousingData [,`:=`(
bedrooms=(bedrooms - min(bedrooms[train_rows]))/diff(range(bedrooms[train_rows])),
bathrooms=(bathrooms - min(bathrooms[train_rows]))/diff(range(bathrooms[train_rows])),
sqft_living=(sqft_living - min(sqft_living[train_rows]))/diff(range(sqft_living[train_rows])),
view = (view - min(view[train_rows]))/diff(range(view[train_rows])),
condition=(condition - min(condition[train_rows]))/diff(range(condition[train_rows])),
grade=(grade - min(grade[train_rows]))/diff(range(grade[train_rows])),
yr_built=(yr_built - min(yr_built[train_rows]))/diff(range(yr_built[train_rows])),
yr_renovated=(yr_renovated - min(yr_renovated))/diff(range(yr_renovated[train_rows])),
lat=(lat - min(lat[train_rows]))/diff(range(lat[train_rows])),
long=(long - min(long[train_rows]))/diff(range(long[train_rows]))
)]
HousingData_train = HousingData[train_rows]
HousingData_test = HousingData[test_rows]
x = HousingData_train[,.( bedrooms,bathrooms,sqft_living,waterfront,view,condition,grade,yr_built,yr_renovated,lat,long,floor1.5,floor2,floor2.5,floor3,floor3.5
)]
y = HousingData_train$price
# Define the loss function
loss = function(X, y, beta) {
sum( (as.matrix(X) %*% beta - y)^2 ) / (2*length(y))
}
alpha = 0.1
num_iters = 1000
loss_history = rep(0,num_iters)
beta_history = list(num_iters)
beta = rep(0,ncol(x)+1)
X = as.matrix(cbind(1,x))
for (i in 1:num_iters) {
beta[1] = beta[1] - alpha * (1/length(y)) * sum(((X%*%beta)- y))
for(j in 2:length(beta)){
beta[j] = beta[j] - alpha * (1/length(y)) * sum(((X%*%beta)- y)*X[,j])
}
loss_history[i] = loss(X, y, beta)
beta_history[[i]] = beta
}
plot(loss_history/1000000,type = "l",ylab = "Loss (100000s)",xlab = "Iterations")
X = as.matrix(cbind(1,HousingData_test[,.( bedrooms,bathrooms,sqft_living,waterfront,view,condition,grade,yr_built,yr_renovated,lat,long,floor1.5,floor2,floor2.5,floor3,floor3.5
)]))
#Multiply the beta's you calculated in the previous section to X to get the predictions
HousingData_test$price_pred = X%*%beta
#Calculate the median of the absolute difference between the predicted sale prices and the actual sale prices
median(HousingData_test[,.(absdiff=abs(price-price_pred))]$absdiff)
|
/dataR 12_6_2017.R
|
no_license
|
kbwcan/Datascience
|
R
| false
| false
| 4,010
|
r
|
library(data.table)
library(lubridate)
HousingData = fread("https://s3.amazonaws.com/reubenworkshopdata/Sessions/Session+1/kc_house_data.csv")
head(HousingData)
str(HousingData)
summary(HousingData)
HousingData = HousingData[,.(
price,
bedrooms = ceiling(bedrooms),
bathrooms = ceiling(bathrooms),
sqft_living,
sqft_lot,
floors,
waterfront,
view,
condition,
grade,
sqft_above,
sqft_basement,
yr_built,
yr_renovated,
zipcode = as.numeric(zipcode),
lat,
long,
sqft_living15,
sqft_lot15)]
HousingData[which(is.na(HousingData))]
HousingData = HousingData[,.(
price,
bedrooms = ceiling(bedrooms),
bathrooms = ceiling(bathrooms),
sqft_living,
sqft_lot,
floors,
waterfront,
view,
condition,
grade,
sqft_above,
sqft_basement,
yr_built,
yr_renovated,
zipcode = as.numeric(zipcode),
lat,
long,
sqft_living15,
sqft_lot15)]
HousingData$bedrooms[1:5] = NA
HousingData$bathrooms[1:5] = NA
HousingData$`floor1.5` = (HousingData$floors == "1.5")*1
HousingData$`floor2` = (HousingData$floors == "2")*1
HousingData$`floor2.5` = (HousingData$floors == "2.5")*1
HousingData$`floor3` = (HousingData$floors == "3")*1
HousingData$`floor3.5` = (HousingData$floors == "3.5")*1
HousingData$floors= NULL
rows = 1:nrow(HousingData)
#Make a vector called 'train_rows' by randomly sampling 80% of the elements of 'rows'
train_rows = sample(x = rows,size= .8*nrow(HousingData))
#Make a vector called 'test_ rows' from the elements of 'rows' that are not in 'train_rows'
test_rows = rows[!rows %in% train_rows]
minmax = Vectorize(function(x){
return((x - min(x,na.rm = T)/diff(range(x,na.rm = T))))
})
HousingData [,`:=`(
bedrooms=(bedrooms - min(bedrooms[train_rows]))/diff(range(bedrooms[train_rows])),
bathrooms=(bathrooms - min(bathrooms[train_rows]))/diff(range(bathrooms[train_rows])),
sqft_living=(sqft_living - min(sqft_living[train_rows]))/diff(range(sqft_living[train_rows])),
view = (view - min(view[train_rows]))/diff(range(view[train_rows])),
condition=(condition - min(condition[train_rows]))/diff(range(condition[train_rows])),
grade=(grade - min(grade[train_rows]))/diff(range(grade[train_rows])),
yr_built=(yr_built - min(yr_built[train_rows]))/diff(range(yr_built[train_rows])),
yr_renovated=(yr_renovated - min(yr_renovated))/diff(range(yr_renovated[train_rows])),
lat=(lat - min(lat[train_rows]))/diff(range(lat[train_rows])),
long=(long - min(long[train_rows]))/diff(range(long[train_rows]))
)]
HousingData_train = HousingData[train_rows]
HousingData_test = HousingData[test_rows]
x = HousingData_train[,.( bedrooms,bathrooms,sqft_living,waterfront,view,condition,grade,yr_built,yr_renovated,lat,long,floor1.5,floor2,floor2.5,floor3,floor3.5
)]
y = HousingData_train$price
# Define the loss function
loss = function(X, y, beta) {
sum( (as.matrix(X) %*% beta - y)^2 ) / (2*length(y))
}
alpha = 0.1
num_iters = 1000
loss_history = rep(0,num_iters)
beta_history = list(num_iters)
beta = rep(0,ncol(x)+1)
X = as.matrix(cbind(1,x))
for (i in 1:num_iters) {
beta[1] = beta[1] - alpha * (1/length(y)) * sum(((X%*%beta)- y))
for(j in 2:length(beta)){
beta[j] = beta[j] - alpha * (1/length(y)) * sum(((X%*%beta)- y)*X[,j])
}
loss_history[i] = loss(X, y, beta)
beta_history[[i]] = beta
}
plot(loss_history/1000000,type = "l",ylab = "Loss (100000s)",xlab = "Iterations")
X = as.matrix(cbind(1,HousingData_test[,.( bedrooms,bathrooms,sqft_living,waterfront,view,condition,grade,yr_built,yr_renovated,lat,long,floor1.5,floor2,floor2.5,floor3,floor3.5
)]))
#Multiply the beta's you calculated in the previous section to X to get the predictions
HousingData_test$price_pred = X%*%beta
#Calculate the median of the absolute difference between the predicted sale prices and the actual sale prices
median(HousingData_test[,.(absdiff=abs(price-price_pred))]$absdiff)
|
# Geography ---------------------------------------------------------------
#' Swedish health care regional codes
#'
#' Key-value codes for the Swedish health care regions (1-6).
#'
#' @docType data
#' @keywords datasets
#' @name region
#' @family key_value_data
NULL
#' Swedish county codes
#'
#' Key-value codes for the Swedish counties (lan).
#' Two first digits of the LKF code.
#' Be aware of the spelling ("lan")!
#'
#' @docType data
#' @keywords datasets
#' @name lan
#' @family key_value_data
NULL
#' Swedish district codes
#'
#' Key-value codes for the Swedish districts (introduced 2016-01-01).
#'
#' @docType data
#' @keywords datasets
#' @name distrikt
#' @family key_value_data
#' @source \url{https://www.scb.se/hitta-statistik/regional-statistik-och-kartor/regionala-indelningar/distrikt/}
NULL
#' Geographical health care areas
#'
#' Key-value codes for health care areas.
#'
#' \itemize{
#' \item{Key: the first four digits from the LKF-code}
#' \item{Value: A geographical area (sub area of county/lan) with special
#' interest}
#' }
#'
#' The object currently only applies to the Western health care region
#' \itemize{
#' \item{Storgoteborg}
#' \item{Fyrbodal}
#' \item{Sodra_Alvsborg}
#' \item{Skaraborg}
#' \item{Norra_Halland}
#' }
#' Updates for other regions needs to be requested (please do!).
#'
#' @section Kungalv:
#' Kungalv is an independent area by default. There are situations when Kungalv
#' should be dealt with as an
#' independent health care region or as a part of greater Gothenburg.
#' See section "extra_functions" in \link{decode}
#' to handle this.
#'
#' @docType data
#' @keywords datasets
#' @name sjukvardsomrade
#' @seealso \link{extra_functions}
#' @family key_value_data
NULL
#' HSN code (Hälso- och sjukvårdsnamnd)
#'
#' Key-value codes for the HSN:s in VGR valid from 2015.
#' Based on municipality codes (\code{\link{kommun}}).
#' Data from 2014-12-17.
#'
#' Categorization:
#' \describe{
#' \item{Norra}{Lysekil, Munkedal, Orust, Sotenäs, Strömstad, Tanum, Bengtsfors,
#' Dals-Ed, Färgelanda, Mellerud, Åmål, Trollhättan, Uddevalla och Vänersborg}
#' \item{Västra}{Lilla Edet, Ale, Kungälv, Stenungsund, Tjörn, Öckerö, Härryda,
#' Mölndal, Partille, Lerum och Alingsås}
#' \item{Södra}{Herrljunga, Vårgårda, Bollebygd, Borås, Mark, Svenljunga,
#' Tranemo och Ulricehamn}
#' \item{Göteborg}{Göteborg}
#' \item{Östra}{Essunga, Falköping, Grästorp, Götene, Lidköping, Skara, Vara,
#' Gullspång, Hjo, Karlsborg, Mariestad, Skövde, Tibro, Tidaholm och Töreboda}
#' }
#' @docType data
#' @keywords datasets
#' @name hsn
#' @family key_value_data
NULL
#' Swedish municipality codes
#'
#' Key-value codes for the Swedish municipalities.
#' First four digits in the LKF code.
#' Data from 2014-08-12.
#'
#' @docType data
#' @keywords datasets
#' @name kommun
#' @family key_value_data
#' @source \url{https://www.scb.se/hitta-statistik/regional-statistik-och-kartor/regionala-indelningar/lan-och-kommuner/}
NULL
#' Swedish parish codes
#'
#' Key-value codes for the Swedish parishes (forsamlingar).
#' All six digits in the LKF code.
#'
#' @docType data
#' @keywords datasets
#' @name forsamling
#' @family key_value_data
#' @source \url{https://www.scb.se/hitta-statistik/regional-statistik-och-kartor/regionala-indelningar/lan-och-kommuner/}
NULL
#' hemort and hemort2 codes (geographical codes)
#'
#' Both \code{hemort} and \code{hemort2} combines regional codes for lan,
#' kommun and forsamling.
#' \code{hemort} is the official (but rather old) code used within RCC.
#' \code{hemort2} is an updated version combining \code{\link{lan}},
#' \code{\link{kommun}} and \code{\link{forsamling}}
#'
#' @docType data
#' @keywords datasets
#' @name hemort
#' @family key_value_data
NULL
#' @rdname hemort
#' @name hemort2
NULL
# Administration ----------------------------------------------------------
#' Hospital codes
#'
#' Key-value codes for Swedish hospitals and some primary health care units.
#'
#' Data from 'Rockan'. Note that this is an old version of the classification!
#' See the reference link below for the new version.
#' The old version should be used of historical reasons.
#' One difference is for example that the Sahlgrenska university hospital is
#' one hospital
#' in the new version of the classification but the Swedish regional cancer
#' centers
#' still classify it as three different hospitals.
#' Note also that primary health care units did receive their own codes until
#' 1992 (?)
#' but not later!
#'
#' @docType data
#' @keywords datasets
#' @name sjukhus
#' @family key_value_data
NULL
#' Hospital codes used by Socialstyrelsen and the National Patient Register
#'
#' Codes taken from Excel sheets 2014 from link below.
#'
#' @docType data
#' @keywords datasets
#' @name sjukhus_par
#' @family key_value_data
NULL
#' Hospital codes used by INCA
#'
#' Codes taken from INCA's organizational register 2017-02-03.
#'
#' @docType data
#' @keywords datasets
#' @name sjukhus_inca
#' @family key_value_data
NULL
#' Clinic codes
#'
#' Key-value codes for the Swedish clinic types.
#' Data mainly from url below (taken from the PDF).
#' Some extra codes are added from Rockan (290, 291, 292, 446, 921 and 999 for unknown).
#'
#' @docType data
#' @keywords datasets
#' @name klinik
#' @family key_value_data
NULL
#' Pathology department codes
#'
#' Key-value codes for the Swedish "laboratories".
#' Data is combined from two sources.
#' \enumerate{
#' \item The official list from SOFS 2006:15 (see url below).
#' This is the primary source for codes
#' appearing in both sources.
#' \item Extra codes are also added from the old Rockan registry for historical
#' reasons.
#' }
#' @docType data
#' @keywords datasets
#' @name patologiavdelning
#' @family key_value_data
NULL
# Diagnostics -------------------------------------------------------------
#' Klassifikation av vardatgarder (KVA)
#'
#' Key-value codes for KVA (for surgery and medicine.
#'
#' @docType data
#' @keywords datasets
#' @name kva
#' @family key_value_data
#' @source \url{https://www.socialstyrelsen.se/utveckla-verksamhet/e-halsa/klassificering-och-koder/kva/}
NULL
#' M-stadium
#'
#' Key-value codes for M-stadium (sep-03).
#' Data from Rockan (Rocen).
#'
#' @docType data
#' @keywords datasets
#' @name m_rtr
#' @family key_value_data
NULL
#' N-stadium
#'
#' Key-value codes for N-stadium (sep-03).
#' Data from Rockan (Rocen).
#'
#' @docType data
#' @keywords datasets
#' @name n_rtr
#' @family key_value_data
NULL
#' T-stadium
#'
#' Key-value codes for T-stadium (sep 2003).
#' Data from Rockan (Rocen).
#'
#' @docType data
#' @keywords datasets
#' @name t_rtr
#' @family key_value_data
NULL
#' FIGO-stadium
#'
#' Key-value codes for FIGO-stadium (Tumorutbredning enl FIGO, sep 2003).
#' Data from Rockan (Rocen).
#'
#' @docType data
#' @keywords datasets
#' @name figo
#' @family key_value_data
NULL
#' ICD-7 Grov
#'
#' Key-value codes for just the first three digits of the ICDO-7 code.
#' This gives broader grouping.
#'
#' @docType data
#' @keywords datasets
#' @name icd7_grov
#' @family key_value_data
NULL
#' ICD-7
#'
#' Key-value codes for ICD-7.
#' Data from Rockan (Rocen).
#'
#' @docType data
#' @keywords datasets
#' @name icd7
#' @family key_value_data
NULL
#' ICD-9
#'
#' Key-value codes for ICD-9.
#' Data from Rockan (Rocen).
#'
#' @docType data
#' @keywords datasets
#' @name icd9
#' @family key_value_data
NULL
#' ICD-O
#'
#' Key-value codes for ICD-O.
#' Data from Rockan (Rocen).
#'
#' @docType data
#' @keywords datasets
#' @name icdo
#' @family key_value_data
NULL
#' ICD-O3
#'
#' Key-value codes for ICD-O3.
#' Data from Rockan (Rocen).
#'
#' @docType data
#' @keywords datasets
#' @name icdo3
#' @family key_value_data
NULL
#' ICD-O3 Grov
#'
#' Key-value codes for just the first two digits of the ICDO-3 code.
#' This gives broader grouping.
#' Based on the table of contents from:
#' http://www.socialstyrelsen.se/Lists/Artikelkatalog/Attachments/19446/2014-5-12.pdf
#'
#' @docType data
#' @keywords datasets
#' @name icdo3_grov
#' @family key_value_data
NULL
#' Sida
#'
#' Key-value codes for sida (Sida, 1 = Hoger, 2 = Vanster, 9 = Okant).
#' Data from Rockan (Rocen).
#'
#' @docType data
#' @keywords datasets
#' @name sida
#' @family key_value_data
NULL
#' Grund till TNM (patologisk/klinisk)
#'
#' Key-value codes for TNM-grund (Grund till TNM, 1=patologisk, 2=klinisk).
#' Data from Rockan (Rocen).
#'
#' @docType data
#' @keywords datasets
#' @name tnmgrund
#' @family key_value_data
NULL
#' ICD-10-SE code
#'
#' Key-value codes for ICD-10-SE 2020 (Swedish version).
#' Note that key codes are given without dots, i e C569, not C56.9.
#'
#' @docType data
#' @keywords datasets
#' @name icd10se
#' @family key_value_data
#' @source \url{https://www.socialstyrelsen.se/utveckla-verksamhet/e-halsa/klassificering-och-koder/kodtextfiler/}
NULL
#' ICD-10-CM code
#'
#' Key-value codes for ICD-10-CM 2020.
#' Note that key codes are given without dots, i e C569, not C56.9.
#'
#' @docType data
#' @keywords datasets
#' @name icd10cm
#' @family key_value_data
#' @source \url{ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/Publications/ICD10CM/2020/}
NULL
#' ICD-9-CM diagnosis and procedure codes
#'
#' Key-value codes for ICD-9-CM diagnostics (\code{icd9cmd}) and
#' procedure (\code{icd9cmp}) codes (version 32).
#'
#' @docType data
#' @keywords datasets
#' @name icd9cmd
#' @family key_value_data
#' @source \url{https://www.cms.gov/Medicare/Coding/ICD9ProviderDiagnosticCodes/codes}
NULL
#' @rdname icd9cmd
#' @name icd9cmp
NULL
#' Snomed code
#'
#' Key-value codes for Snomed. Data from Rockan 2012-10-05.
#' Note tht this variable is the same as \code{snomed} from Rockan.
#' It should not be confused with the later version \code{snomed3}!
#'
#' @docType data
#' @keywords datasets
#' @name snomed
#' @family key_value_data
NULL
#' Snomed 3
#'
#' Key-value codes for Snomed3 (fr 2005).
#' Data from Rockan (Rocen).
#'
#' @docType data
#' @keywords datasets
#' @name snomed3
#' @family key_value_data
NULL
#' PAD (C24) code
#'
#' Key-value codes for PAD (C24) Data from Rockan 2012-09-18.
#'
#' @docType data
#' @keywords datasets
#' @name pad
#' @family key_value_data
NULL
# Misc --------------------------------------------------------------------
#' Gender code (kon)
#'
#' Key-value codes for gender (1 = man (Male) and 2 = Kvinna (female)).
#' Be aware of the spelling ("kon")!
#'
#' @docType data
#' @keywords datasets
#' @name kon
#' @family key_value_data
NULL
#' Anatomical Therapeutic Chemical (ATC) Classification System codes
#'
#' Key-value codes for ATC from the Swedish Medical Products Agency (2020-04-07).
#'
#' @docType data
#' @keywords datasets
#' @name atc
#' @family key_value_data
#' @source \url{https://nsl.mpa.se/}
NULL
#' Help tables from Rockan
#'
#' Key-value codes from old help tables used with Rockan
#' (by Swedish regional cancer centers)
#' @docType data
#' @keywords datasets
#' @name rockan
#' @family key_value_data
#' @aliases avgm ben digr dodca manuell obd status manuell
NULL
|
/R/datasets_keyvalue.R
|
no_license
|
gil906/decoder
|
R
| false
| false
| 11,653
|
r
|
# Geography ---------------------------------------------------------------
#' Swedish health care regional codes
#'
#' Key-value codes for the Swedish health care regions (1-6).
#'
#' @docType data
#' @keywords datasets
#' @name region
#' @family key_value_data
NULL
#' Swedish county codes
#'
#' Key-value codes for the Swedish counties (lan).
#' Two first digits of the LKF code.
#' Be aware of the spelling ("lan")!
#'
#' @docType data
#' @keywords datasets
#' @name lan
#' @family key_value_data
NULL
#' Swedish district codes
#'
#' Key-value codes for the Swedish districts (introduced 2016-01-01).
#'
#' @docType data
#' @keywords datasets
#' @name distrikt
#' @family key_value_data
#' @source \url{https://www.scb.se/hitta-statistik/regional-statistik-och-kartor/regionala-indelningar/distrikt/}
NULL
#' Geographical health care areas
#'
#' Key-value codes for health care areas.
#'
#' \itemize{
#' \item{Key: the first four digits from the LKF-code}
#' \item{Value: A geographical area (sub area of county/lan) with special
#' interest}
#' }
#'
#' The object currently only applies to the Western health care region
#' \itemize{
#' \item{Storgoteborg}
#' \item{Fyrbodal}
#' \item{Sodra_Alvsborg}
#' \item{Skaraborg}
#' \item{Norra_Halland}
#' }
#' Updates for other regions needs to be requested (please do!).
#'
#' @section Kungalv:
#' Kungalv is an independent area by default. There are situations when Kungalv
#' should be dealt with as an
#' independent health care region or as a part of greater Gothenburg.
#' See section "extra_functions" in \link{decode}
#' to handle this.
#'
#' @docType data
#' @keywords datasets
#' @name sjukvardsomrade
#' @seealso \link{extra_functions}
#' @family key_value_data
NULL
#' HSN code (Hälso- och sjukvårdsnamnd)
#'
#' Key-value codes for the HSN:s in VGR valid from 2015.
#' Based on municipality codes (\code{\link{kommun}}).
#' Data from 2014-12-17.
#'
#' Categorization:
#' \describe{
#' \item{Norra}{Lysekil, Munkedal, Orust, Sotenäs, Strömstad, Tanum, Bengtsfors,
#' Dals-Ed, Färgelanda, Mellerud, Åmål, Trollhättan, Uddevalla och Vänersborg}
#' \item{Västra}{Lilla Edet, Ale, Kungälv, Stenungsund, Tjörn, Öckerö, Härryda,
#' Mölndal, Partille, Lerum och Alingsås}
#' \item{Södra}{Herrljunga, Vårgårda, Bollebygd, Borås, Mark, Svenljunga,
#' Tranemo och Ulricehamn}
#' \item{Göteborg}{Göteborg}
#' \item{Östra}{Essunga, Falköping, Grästorp, Götene, Lidköping, Skara, Vara,
#' Gullspång, Hjo, Karlsborg, Mariestad, Skövde, Tibro, Tidaholm och Töreboda}
#' }
#' @docType data
#' @keywords datasets
#' @name hsn
#' @family key_value_data
NULL
#' Swedish municipality codes
#'
#' Key-value codes for the Swedish municipalities.
#' First four digits in the LKF code.
#' Data from 2014-08-12.
#'
#' @docType data
#' @keywords datasets
#' @name kommun
#' @family key_value_data
#' @source \url{https://www.scb.se/hitta-statistik/regional-statistik-och-kartor/regionala-indelningar/lan-och-kommuner/}
NULL
#' Swedish parish codes
#'
#' Key-value codes for the Swedish parishes (forsamlingar).
#' All six digits in the LKF code.
#'
#' @docType data
#' @keywords datasets
#' @name forsamling
#' @family key_value_data
#' @source \url{https://www.scb.se/hitta-statistik/regional-statistik-och-kartor/regionala-indelningar/lan-och-kommuner/}
NULL
#' hemort and hemort2 codes (geographical codes)
#'
#' Both \code{hemort} and \code{hemort2} combines regional codes for lan,
#' kommun and forsamling.
#' \code{hemort} is the official (but rather old) code used within RCC.
#' \code{hemort2} is an updated version combining \code{\link{lan}},
#' \code{\link{kommun}} and \code{\link{forsamling}}
#'
#' @docType data
#' @keywords datasets
#' @name hemort
#' @family key_value_data
NULL
#' @rdname hemort
#' @name hemort2
NULL
# Administration ----------------------------------------------------------
#' Hospital codes
#'
#' Key-value codes for Swedish hospitals and some primary health care units.
#'
#' Data from 'Rockan'. Note that this is an old version of the classification!
#' See the reference link below for the new version.
#' The old version should be used of historical reasons.
#' One difference is for example that the Sahlgrenska university hospital is
#' one hospital
#' in the new version of the classification but the Swedish regional cancer
#' centers
#' still classify it as three different hospitals.
#' Note also that primary health care units did receive their own codes until
#' 1992 (?)
#' but not later!
#'
#' @docType data
#' @keywords datasets
#' @name sjukhus
#' @family key_value_data
NULL
#' Hospital codes used by Socialstyrelsen and the National Patient Register
#'
#' Codes taken from Excel sheets 2014 from link below.
#'
#' @docType data
#' @keywords datasets
#' @name sjukhus_par
#' @family key_value_data
NULL
#' Hospital codes used by INCA
#'
#' Codes taken from INCA's organizational register 2017-02-03.
#'
#' @docType data
#' @keywords datasets
#' @name sjukhus_inca
#' @family key_value_data
NULL
#' Clinic codes
#'
#' Key-value codes for the Swedish clinic types.
#' Data mainly from url below (taken from the PDF).
#' Some extra codes are added from Rockan (290, 291, 292, 446, 921 and 999 for unknown).
#'
#' @docType data
#' @keywords datasets
#' @name klinik
#' @family key_value_data
NULL
#' Pathology department codes
#'
#' Key-value codes for the Swedish "laboratories".
#' Data is combined from two sources.
#' \enumerate{
#' \item The official list from SOFS 2006:15 (see url below).
#' This is the primary source for codes
#' appearing in both sources.
#' \item Extra codes are also added from the old Rockan registry for historical
#' reasons.
#' }
#' @docType data
#' @keywords datasets
#' @name patologiavdelning
#' @family key_value_data
NULL
# Diagnostics -------------------------------------------------------------
#' Klassifikation av vardatgarder (KVA)
#'
#' Key-value codes for KVA (for surgery and medicine.
#'
#' @docType data
#' @keywords datasets
#' @name kva
#' @family key_value_data
#' @source \url{https://www.socialstyrelsen.se/utveckla-verksamhet/e-halsa/klassificering-och-koder/kva/}
NULL
#' M-stadium
#'
#' Key-value codes for M-stadium (sep-03).
#' Data from Rockan (Rocen).
#'
#' @docType data
#' @keywords datasets
#' @name m_rtr
#' @family key_value_data
NULL
#' N-stadium
#'
#' Key-value codes for N-stadium (sep-03).
#' Data from Rockan (Rocen).
#'
#' @docType data
#' @keywords datasets
#' @name n_rtr
#' @family key_value_data
NULL
#' T-stadium
#'
#' Key-value codes for T-stadium (sep 2003).
#' Data from Rockan (Rocen).
#'
#' @docType data
#' @keywords datasets
#' @name t_rtr
#' @family key_value_data
NULL
#' FIGO-stadium
#'
#' Key-value codes for FIGO-stadium (Tumorutbredning enl FIGO, sep 2003).
#' Data from Rockan (Rocen).
#'
#' @docType data
#' @keywords datasets
#' @name figo
#' @family key_value_data
NULL
#' ICD-7 Grov
#'
#' Key-value codes for just the first three digits of the ICDO-7 code.
#' This gives broader grouping.
#'
#' @docType data
#' @keywords datasets
#' @name icd7_grov
#' @family key_value_data
NULL
#' ICD-7
#'
#' Key-value codes for ICD-7.
#' Data from Rockan (Rocen).
#'
#' @docType data
#' @keywords datasets
#' @name icd7
#' @family key_value_data
NULL
#' ICD-9
#'
#' Key-value codes for ICD-9.
#' Data from Rockan (Rocen).
#'
#' @docType data
#' @keywords datasets
#' @name icd9
#' @family key_value_data
NULL
#' ICD-O
#'
#' Key-value codes for ICD-O.
#' Data from Rockan (Rocen).
#'
#' @docType data
#' @keywords datasets
#' @name icdo
#' @family key_value_data
NULL
#' ICD-O3
#'
#' Key-value codes for ICD-O3.
#' Data from Rockan (Rocen).
#'
#' @docType data
#' @keywords datasets
#' @name icdo3
#' @family key_value_data
NULL
#' ICD-O3 Grov
#'
#' Key-value codes for just the first two digits of the ICDO-3 code.
#' This gives broader grouping.
#' Based on the table of contents from:
#' http://www.socialstyrelsen.se/Lists/Artikelkatalog/Attachments/19446/2014-5-12.pdf
#'
#' @docType data
#' @keywords datasets
#' @name icdo3_grov
#' @family key_value_data
NULL
#' Sida
#'
#' Key-value codes for sida (Sida, 1 = Hoger, 2 = Vanster, 9 = Okant).
#' Data from Rockan (Rocen).
#'
#' @docType data
#' @keywords datasets
#' @name sida
#' @family key_value_data
NULL
#' Grund till TNM (patologisk/klinisk)
#'
#' Key-value codes for TNM-grund (Grund till TNM, 1=patologisk, 2=klinisk).
#' Data from Rockan (Rocen).
#'
#' @docType data
#' @keywords datasets
#' @name tnmgrund
#' @family key_value_data
NULL
#' ICD-10-SE code
#'
#' Key-value codes for ICD-10-SE 2020 (Swedish version).
#' Note that key codes are given without dots, i e C569, not C56.9.
#'
#' @docType data
#' @keywords datasets
#' @name icd10se
#' @family key_value_data
#' @source \url{https://www.socialstyrelsen.se/utveckla-verksamhet/e-halsa/klassificering-och-koder/kodtextfiler/}
NULL
#' ICD-10-CM code
#'
#' Key-value codes for ICD-10-CM 2020.
#' Note that key codes are given without dots, i e C569, not C56.9.
#'
#' @docType data
#' @keywords datasets
#' @name icd10cm
#' @family key_value_data
#' @source \url{ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/Publications/ICD10CM/2020/}
NULL
#' ICD-9-CM diagnosis and procedure codes
#'
#' Key-value codes for ICD-9-CM diagnostics (\code{icd9cmd}) and
#' procedure (\code{icd9cmp}) codes (version 32).
#'
#' @docType data
#' @keywords datasets
#' @name icd9cmd
#' @family key_value_data
#' @source \url{https://www.cms.gov/Medicare/Coding/ICD9ProviderDiagnosticCodes/codes}
NULL
#' @rdname icd9cmd
#' @name icd9cmp
NULL
#' Snomed code
#'
#' Key-value codes for Snomed. Data from Rockan 2012-10-05.
#' Note tht this variable is the same as \code{snomed} from Rockan.
#' It should not be confused with the later version \code{snomed3}!
#'
#' @docType data
#' @keywords datasets
#' @name snomed
#' @family key_value_data
NULL
#' Snomed 3
#'
#' Key-value codes for Snomed3 (fr 2005).
#' Data from Rockan (Rocen).
#'
#' @docType data
#' @keywords datasets
#' @name snomed3
#' @family key_value_data
NULL
#' PAD (C24) code
#'
#' Key-value codes for PAD (C24) Data from Rockan 2012-09-18.
#'
#' @docType data
#' @keywords datasets
#' @name pad
#' @family key_value_data
NULL
# Misc --------------------------------------------------------------------
#' Gender code (kon)
#'
#' Key-value codes for gender (1 = man (Male) and 2 = Kvinna (female)).
#' Be aware of the spelling ("kon")!
#'
#' @docType data
#' @keywords datasets
#' @name kon
#' @family key_value_data
NULL
#' Anatomical Therapeutic Chemical (ATC) Classification System codes
#'
#' Key-value codes for ATC from the Swedish Medical Products Agency (2020-04-07).
#'
#' @docType data
#' @keywords datasets
#' @name atc
#' @family key_value_data
#' @source \url{https://nsl.mpa.se/}
NULL
#' Help tables from Rockan
#'
#' Key-value codes from old help tables used with Rockan
#' (by Swedish regional cancer centers)
#' @docType data
#' @keywords datasets
#' @name rockan
#' @family key_value_data
#' @aliases avgm ben digr dodca manuell obd status manuell
NULL
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/FFTreesdata_doc.R
\docType{data}
\name{fertility}
\alias{fertility}
\title{Fertility data}
\format{
A data frame containing 100 rows and 10 columns.
\describe{
\item{season}{Season in which the analysis was performed. (winter, spring, summer, fall)}
\item{age}{Age at the time of analysis}
\item{child.dis}{Childish diseases (ie , chicken pox, measles, mumps, polio) (yes(1), no(0)) }
\item{trauma}{Accident or serious trauma (yes(1), no(0))}
\item{surgery}{Surgical intervention (yes(1), no(0))}
\item{fevers}{High fevers in the last year (less than three months ago(-1), more than three months ago (0), no. (1))}
\item{alcohol}{Frequency of alcohol consumption (several times a day, every day, several times a week, once a week, hardly ever or never)}
\item{smoking}{Smoking habit (never(-1), occasional (0)) daily (1))}
\item{sitting}{Number of hours spent sitting per day}
\item{diagnosis}{\emph{Criterion}: Diagnosis normal (TRUE) vs. altered (FALSE) (88.0\% vs.\ 22.0\%).}
}
}
\source{
\url{https://archive.ics.uci.edu/ml/datasets/Fertility}
Original contributors:
David Gil
Lucentia Research Group
Department of Computer Technology
University of Alicante
Jose Luis Girela
Department of Biotechnology
University of Alicante
}
\usage{
fertility
}
\description{
This dataset describes a sample of 100 volunteers providing a semen sample that was analyzed according to the WHO 2010 criteria.
}
\details{
Sperm concentration are related to socio-demographic data, environmental factors, health status, and life habits.
We made the following enhancements to the original data for improved usability:
\itemize{
\item{The criterion was redefined from a factor variable with two levels
(\code{N=Normal}, \code{O=Altered}) into a logical variable (\code{TRUE} vs. \code{FALSE}).}
}
Other than that, the data remains consistent with the original dataset.
}
\seealso{
Other datasets:
\code{\link{blood}},
\code{\link{breastcancer}},
\code{\link{car}},
\code{\link{contraceptive}},
\code{\link{creditapproval}},
\code{\link{forestfires}},
\code{\link{heart.cost}},
\code{\link{heart.test}},
\code{\link{heart.train}},
\code{\link{heartdisease}},
\code{\link{iris.v}},
\code{\link{mushrooms}},
\code{\link{sonar}},
\code{\link{titanic}},
\code{\link{voting}},
\code{\link{wine}}
}
\concept{datasets}
\keyword{datasets}
|
/man/fertility.Rd
|
no_license
|
cran/FFTrees
|
R
| false
| true
| 2,425
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/FFTreesdata_doc.R
\docType{data}
\name{fertility}
\alias{fertility}
\title{Fertility data}
\format{
A data frame containing 100 rows and 10 columns.
\describe{
\item{season}{Season in which the analysis was performed. (winter, spring, summer, fall)}
\item{age}{Age at the time of analysis}
\item{child.dis}{Childish diseases (ie , chicken pox, measles, mumps, polio) (yes(1), no(0)) }
\item{trauma}{Accident or serious trauma (yes(1), no(0))}
\item{surgery}{Surgical intervention (yes(1), no(0))}
\item{fevers}{High fevers in the last year (less than three months ago(-1), more than three months ago (0), no. (1))}
\item{alcohol}{Frequency of alcohol consumption (several times a day, every day, several times a week, once a week, hardly ever or never)}
\item{smoking}{Smoking habit (never(-1), occasional (0)) daily (1))}
\item{sitting}{Number of hours spent sitting per day}
\item{diagnosis}{\emph{Criterion}: Diagnosis normal (TRUE) vs. altered (FALSE) (88.0\% vs.\ 22.0\%).}
}
}
\source{
\url{https://archive.ics.uci.edu/ml/datasets/Fertility}
Original contributors:
David Gil
Lucentia Research Group
Department of Computer Technology
University of Alicante
Jose Luis Girela
Department of Biotechnology
University of Alicante
}
\usage{
fertility
}
\description{
This dataset describes a sample of 100 volunteers providing a semen sample that was analyzed according to the WHO 2010 criteria.
}
\details{
Sperm concentration are related to socio-demographic data, environmental factors, health status, and life habits.
We made the following enhancements to the original data for improved usability:
\itemize{
\item{The criterion was redefined from a factor variable with two levels
(\code{N=Normal}, \code{O=Altered}) into a logical variable (\code{TRUE} vs. \code{FALSE}).}
}
Other than that, the data remains consistent with the original dataset.
}
\seealso{
Other datasets:
\code{\link{blood}},
\code{\link{breastcancer}},
\code{\link{car}},
\code{\link{contraceptive}},
\code{\link{creditapproval}},
\code{\link{forestfires}},
\code{\link{heart.cost}},
\code{\link{heart.test}},
\code{\link{heart.train}},
\code{\link{heartdisease}},
\code{\link{iris.v}},
\code{\link{mushrooms}},
\code{\link{sonar}},
\code{\link{titanic}},
\code{\link{voting}},
\code{\link{wine}}
}
\concept{datasets}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vsnc.R
\name{vsnc}
\alias{vsnc}
\title{A package that provide data and analysis code in Agri}
\description{
That contains plant breeding, forest breeding, animal breeding etc.
It also have some functions that assist asreml package.
}
\author{
Dave<Dave@vsni.co.uk>
}
|
/man/vsnc.Rd
|
no_license
|
dengfei2013/learnasreml
|
R
| false
| true
| 345
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vsnc.R
\name{vsnc}
\alias{vsnc}
\title{A package that provide data and analysis code in Agri}
\description{
That contains plant breeding, forest breeding, animal breeding etc.
It also have some functions that assist asreml package.
}
\author{
Dave<Dave@vsni.co.uk>
}
|
#!/usr/bin/env Rscript
setwd("~/fb/")
source("fb_common.R")
#remDr <- remoteDriver(remoteServerAddr = "localhost", port = 4446L, browserName = "firefox")
remDr <- remoteDriver(remoteServerAddr = "localhost", port = 4446L, browserName = "firefox")
remDr$open(silent = T)
remDr$navigate("http://www.facebook.com")
fb_login(id = "melnykeo94@gmail.com", pass = "new_post[!new_post")
for (i in 1:50000) {
tryCatch({
if(check_sql(con)) con <- connect_sql()
jj <- exezekutor(persons[185:369])$result #205
jj <- jj[!is.na(jj)]
new_post <- unique(jj[!str_detect(jj, "l.facebook.com")])
con <- connect_sql()
old <- query(con) %>% pull(post_link)
new_post <- new_post[!new_post %in% old]
for(i in new_post) {
tryCatch({
request <- get_post_info(i)
if(!is.na(request$user)) {
interruptor(FUN = wtable, args = con, time.limit = 6,ALTFUN = wtable)
}
post = post + 1
}, error = function(e) NULL)
}
cat(paste0(Sys.time(),": ", post, " posts added"))
lapply(c("p", "act", "datr", "lu", "fr", "presence", "csm", "pl", "sb"), remDr$deleteCookieNamed)
}, error = function(e) NULL)
}
|
/fb_scraper2.R
|
no_license
|
RomanKyrychenko/fb
|
R
| false
| false
| 1,168
|
r
|
#!/usr/bin/env Rscript
setwd("~/fb/")
source("fb_common.R")
#remDr <- remoteDriver(remoteServerAddr = "localhost", port = 4446L, browserName = "firefox")
remDr <- remoteDriver(remoteServerAddr = "localhost", port = 4446L, browserName = "firefox")
remDr$open(silent = T)
remDr$navigate("http://www.facebook.com")
fb_login(id = "melnykeo94@gmail.com", pass = "new_post[!new_post")
for (i in 1:50000) {
tryCatch({
if(check_sql(con)) con <- connect_sql()
jj <- exezekutor(persons[185:369])$result #205
jj <- jj[!is.na(jj)]
new_post <- unique(jj[!str_detect(jj, "l.facebook.com")])
con <- connect_sql()
old <- query(con) %>% pull(post_link)
new_post <- new_post[!new_post %in% old]
for(i in new_post) {
tryCatch({
request <- get_post_info(i)
if(!is.na(request$user)) {
interruptor(FUN = wtable, args = con, time.limit = 6,ALTFUN = wtable)
}
post = post + 1
}, error = function(e) NULL)
}
cat(paste0(Sys.time(),": ", post, " posts added"))
lapply(c("p", "act", "datr", "lu", "fr", "presence", "csm", "pl", "sb"), remDr$deleteCookieNamed)
}, error = function(e) NULL)
}
|
# SETUP
install.packages("plyr")
library("plyr")
install.packages("gdata")
library("gdata")
install.packages("ROCR")
library(ROCR)
install.packages("leaps")
library(leaps)
install.packages("stat")
library("stat")
install.packages("glmnet")
install.packages("pROC")
library("glmnet")
library("pROC")
library(ggplot2)
library(dplyr)
library(plyr)
install.packages("xlsx")
library(xlsx)
# ---------------------------------------------------------------------------------------
# READ IN DATA
table_by_user <- read.csv("/Users/apaetsch/Desktop/imp_eng_conv_v1_10k_users_ROLLUP_v1.csv")
###########################################################################################
###########################################################################################
# PREPARATION: SIMPLIFY TABLE AND PUT INTO MATRIX FORMAT
table_by_user_v1 <- table_by_user
colnames(table_by_user_v1)[19] <- "NULL_touches"
table_by_user_v1$converted_to_VISIT <- ifelse(table_by_user_v1$converted_to_VISIT == "FALSE", 0, 1)
tbu_cleaned <- subset(table_by_user_v1, select = -c(X, ak_user_id,
total_conversions_VISIT, total_conversions_SALES, total_conversions_NULL,
converted_to_SALES,
timeframe_secs, timeframe_mins, timeframe_days,
channel_display_perc, channel_highimpact_perc, channel_search_perc,
channel_video_perc, channel_NULL_perc,
platform_NULL_abs, platform_desktop_perc, platform_mobile_perc,
platform_social_perc, platform_NULL_perc,
typeofbuy_NULL_abs, typeofbuy_contentamplification_perc, typeofbuy_endemic_perc,
typeofbuy_programmatic_perc, typeofbuy_publisherdirect_perc, typeofbuy_search_perc,
typeofbuy_network_perc, typeofbuy_NULL_perc,
sitetype_NULL_abs, sitetype_broadcast_perc, sitetype_contentamplification_perc,
sitetype_discoveryengine_perc, sitetype_endemic_perc, sitetype_internetradio_perc,
sitetype_lifestyle_perc, sitetype_majormedia_perc, sitetype_network_perc,
sitetype_portal_perc, sitetype_programmatic_perc, sitetype_sportsleague_perc,
sitetype_videoportal_perc, sitetype_NULL_perc,
overalltargeting_NULL_abs, overalltargeting_behavioral_perc,
overalltargeting_contextual_perc, overalltargeting_geo_perc,
overalltargeting_lookalike_perc, overalltargeting_predictive_perc,
overalltargeting_prospecting_perc, overalltargeting_remarketing_perc,
overalltargeting_retargeting_perc, overalltargeting_NULL_perc,
funnel_NULL_abs, funnel_lower_perc, funnel_middle_perc,
funnel_upper_perc, funnel_NULL_perc))
tbu_cleaned <- subset(tbu_cleaned, select=c(converted_to_VISIT,total_impressions,
first_touchpoint:channel_video_abs,
platform_desktop_abs:funnel_upper_abs,
NULL_touches))
tbu_final <- tbu_cleaned
tbu_final$first_touchpoint <- as.POSIXct(tbu_final$first_touchpoint)
tbu_final$last_touchpoint <- as.POSIXct(tbu_final$last_touchpoint)
tbu_final$first_touchpoint <- as.numeric(tbu_final$first_touchpoint)
tbu_final$last_touchpoint <- as.numeric(tbu_final$last_touchpoint)
write.csv(tbu_final, file = "/Users/apaetsch/Desktop/imp_eng_conv_v1_10k_users_ROLLUP_v2.csv")
# CREATE TEST AND TRAINING SET
set.seed(123)
sample_size <- floor(0.5 * nrow(tbu_final))
train_ind <- sample(seq_len(nrow(tbu_final)), size = sample_size)
train <- tbu_final[train_ind, ]
test <- tbu_final[-train_ind, ]
####################################################################################################
####################################################################################################
####################################################################################################
###### VERSION 1: LINEAR REGRESSION ################################################################
####################################################################################################
# RUN REGRESSION
fit.logit <- glm(converted_to_VISIT ~.,family=binomial(link = "logit"), data = train)
fit.logit <- glm(converted_to_VISIT ~ total_impressions + last_touchpoint +
platform_desktop_abs + overalltargeting_behavioral_abs +
overalltargeting_contextual_abs + funnel_lower_abs,
family=binomial(link = "logit"), data = train)
# GET MODEL SUMMARY
fit.logit.summary <- fit.logit %>% summary
fit.logit
fit.logit.summary
# get model AIC
fit.logit$aic
write.xlsx(fit.logit.summary$coefficients, file = "imp1_script_logreg_RESULTS.xlsx", sheetName = "fit.logit.summary")
#total_impressions + last_touchpoint + platform_desktop_abs + overalltargeting_behavioral_abs +
# overalltargeting_contextual_abs + overalltargeting_geo_abs + overalltargeting_predictive_abs +
# funnel_lower_abs
# get variables as text
fit.logit_coeff <- as.matrix(fit.logit$coefficients)
p0lm <- function(X){paste0(X, collapse = " + ")}
var_logit <- p0lm(rownames(fit.logit_coeff))
# GET PREDICTON [TRAIN]
train.logreg <- train
train.logreg$p_garbage <- predict(fit.logit, type = "response")
# calculate accuracy (~RMSE)
train.logreg$predicted_conversion <- ifelse(train.logreg$p_garbage > .5, 1, 0)
act_vs_pred.logreg.train <- table(train.logreg$converted_to_VISIT, train.logreg$predicted_conversion)
true_p.logreg.train <- mean(train.logreg$converted_to_VISIT==train.logreg$predicted_conversion)
true_pos_p.logreg.train <- act_vs_pred.logreg.train[2,2]/sum(act_vs_pred.logreg.train[,2])
act_vs_pred.logreg.train
true_p.logreg.train
true_pos_p.logreg.train
# calculate and plot ROC
ROC.garbage.train <- roc(converted_to_VISIT ~ p_garbage, data = train.logreg)
plot(ROC.garbage.train)
ROC.garbage.train
# GET PREDICTION [TEST]
test.logreg <- test
test.logreg$p_garbage <- predict(fit.logit, newdata = test.logreg, type = "response")
# calculate accuracy (~RMSE)
test.logreg$predicted_conversion <- ifelse(test.logreg$p_garbage > .5, 1, 0)
act_vs_pred.logreg.test <- table(test.logreg$converted_to_VISIT, test.logreg$predicted_conversion)
true_p.logreg.test <- mean(test.logreg$converted_to_VISIT==test.logreg$predicted_conversion)
true_pos_p.logreg.test <- act_vs_pred.logreg.test[2,2]/sum(act_vs_pred.logreg.test[,2])
act_vs_pred.logreg.test
true_p.logreg.test
true_pos_p.logreg.test
# calculate and plot ROC
ROC.garbage.test <- roc(converted_to_VISIT ~ p_garbage, data = test.logreg)
plot(ROC.garbage.test)
ROC.garbage.test
# how to plot predicted against actual
plot(test.logreg$converted_to_VISIT, test.logreg$predicted_conversion)
####################################################################################################
####################################################################################################
###### VERSION 1: LASSO VS. RIDGE ##################################################################
####################################################################################################
# SETUP FINAL RESULTS TABLES
# selected variable table - TEXT
var_store <- data.frame(matrix(ncol = 3, nrow = 6))
colnames(var_store)[1] <- "alpha"
conames(var_store)[3] <- "no_variables"
colnames(var_store)[2] <- "variables"
var_store[,1] <- c(0.0, 0.2, 0.4, 0.6, 0.8, 1.0)
# final results table: accuracy
fin_results <- data.frame(matrix(ncol = 8, nrow = 6))
colnames(fin_results)[1] <- "alpha"
colnames(fin_results)[2] <- "aic"
colnames(fin_results)[3] <- "true_p.rl.train"
colnames(fin_results)[4] <- "true_pos_p.rl.train"
colnames(fin_results)[5] <- "ROC.elastic.train$auc"
colnames(fin_results)[6] <- "true_p.rl.test"
colnames(fin_results)[7] <- "true_pos_p.rl.test"
colnames(fin_results)[8] <- "ROC.elastic.test$auc"
fin_results[,1] <- c(0.0, 0.2, 0.4, 0.6, 0.8, 1.0)
# final results table: elastic net logit plot
net_logit_val <- data.frame(matrix(ncol = 5, nrow = 6))
colnames(net_logit_val)[1] <- "alpha"
colnames(net_logit_val)[2] <- "X=0.01"
colnames(net_logit_val)[3] <- "X=0.02"
colnames(net_logit_val)[4] <- "X=0.03"
colnames(net_logit_val)[5] <- "X=0.10"
net_logit_val[,1] <- c(0.0, 0.2, 0.4, 0.6, 0.8, 1.0)
# DEFINE X AND Ys
X.val <- as.matrix(model.matrix(converted_to_VISIT ~., train)[,-1])
Y.val <- as.matrix(train[,1])
# DEFINE ALPHA
alp = 0.0
alp_char <- as.character(alp)
# DEFINE LAMBDA
fit.elastic.cv <- cv.glmnet(X.val, Y.val, alpha=alp, family="binomial",
nfolds = 10, type.measure = "deviance")
fit.elastic.1se <- glmnet(X.val, Y.val, alpha=alp, family="binomial",
lambda=fit.elastic.cv$lambda.1se)
fit.elastic.1se.beta <- coef(fit.elastic.1se)
# test and see values
plot(fit.elastic.cv)
#plot(fit.elastic.1se)
#fit.elastic.cv
#fit.elastic.1se
# FIND VARIABLES ACCORDING TO RIDGE/LASSO ELASTIC SELECTION
beta.elastic <- fit.elastic.1se.beta[which(fit.elastic.1se.beta !=0),]
beta.elastic <- as.matrix(beta.elastic)
# SAVE THE CHOSEN VALUES IN DF
# clean up text
rownames_list <- rownames(beta.elastic)
p0lm <- function(X){paste0(X, collapse = " + ")}
raw_text <- p0lm(rownames(beta.elastic))
clean_text <- gsub("\\(Intercept\\) \\+ ", "", raw_text)
# enter into table
var_store[1,2] <- ifelse(alp == 0.0, length(rownames_list))
var_store[1,3] <- ifelse(alp == 0.0, clean_text)
var_store[2,2] <- ifelse(alp == 0.2, length(rownames_list))
var_store[2,3] <- ifelse(alp == 0.2, clean_text)
var_store[3,2] <- ifelse(alp == 0.4, length(rownames_list))
var_store[3,3] <- ifelse(alp == 0.4, clean_text)
var_store[4,2] <- ifelse(alp == 0.6, length(rownames_list))
var_store[4,3] <- ifelse(alp == 0.6, clean_text)
var_store[5,2] <- ifelse(alp == 0.8, length(rownames_list))
var_store[5,3] <- ifelse(alp == 0.8, clean_text)
var_store[6,2] <- ifelse(alp == 1.0, length(rownames_list))
var_store[6,23] <- ifelse(alp == 1.0, clean_text)
# DO LOGISTIC REGRESSION WITH VARIABLES CHOSEN ABOVE
# pull variables as string
picked_var <- as.character(switch(alp_char, "0" = {var_store[1,3]},
"0.2" = {var_store[2,3]},
"0.4" = {var_store[3,3]},
"0.6" = {var_store[4,3]},
"0.8" = {var_store[5,3]},
"1" = {var_store[6,3]}))
# run regression
formula_text <- paste("converted_to_VISIT ~ ", picked_var)
fit.elastic <- glm(as.formula(formula_text), family=binomial(link = 'logit'),
data = train)
# GET MODEL SUMMARY [TRAIN DATA]
fit.elastic.summary <- fit.elastic %>% summary
fit.elastic
fit.elastic.summary
# get model AIC
fit.elastic$aic
# GET PREDICTON [TRAIN]
train.rl <- train
train.rl$p_elastic <- predict(fit.elastic, type = "response")
# calculate accuracy (~RMSE)
train.rl$predicted_conversion <- ifelse(train.rl$p_elastic > .5, 1, 0)
act_vs_pred.rl.train <- table(train.rl$converted_to_VISIT, train.rl$predicted_conversion)
true_p.rl.train <- mean(train.rl$converted_to_VISIT==train.rl$predicted_conversion)
true_pos_p.rl.train <- act_vs_pred.rl.train[2,2]/sum(act_vs_pred.rl.train[,2])
act_vs_pred.rl.train
true_p.rl.train
true_pos_p.rl.train
# calculate and plot ROC
ROC.elastic.train <- roc(converted_to_VISIT ~ p_elastic, data = train.rl)
plot(ROC.elastic.train)
ROC.elastic.train
# GET PREDICTON [TEST]
test.rl <- test
test.rl$p_elastic <- predict(fit.elastic, newdata = test.rl, type = "response")
# calculate accuracy rate
test.rl$predicted_conversion <- ifelse(test.rl$p_elastic > .5, 1, 0)
act_vs_pred.rl.test <- table(test.rl$converted_to_VISIT, test.rl$predicted_conversion)
true_p.rl.test <- mean(test.rl$converted_to_VISIT==test.rl$predicted_conversion)
true_pos_p.rl.test <- act_vs_pred.rl.test[2,2]/sum(act_vs_pred.rl.test[,2])
act_vs_pred.rl.test
true_p.rl.test
true_pos_p.logreg.test
# calculate and plot ROC
ROC.elastic.test <- roc(converted_to_VISIT ~ p_elastic, data = test.rl)
plot(ROC.elastic.test)
ROC.elastic.test
ROC.elastic.test$auc
# CUSTOM FUNCTION TO PLOT ELAST NET LOGIT PLOT
lapply.uvec <- function(X,fx){lapply(X,fx) %>% unlist %>% as.vector}
perc.f.75 <- function(i){sum(as.numeric(as.numeric(test.rl$p_elastic > i )
== (test.rl$converted_to_VISIT %>% unique)[2]) )/nrow(test.rl)}
pY.75 <- lapply.uvec(((1:1000)/1000),perc.f.75)
# PLOT ELASTIC NET LOGIT PLOT
results.elasticlogit <- data.frame(X=c(((1:1000)/1000)),p.75=pY.75)
results.elasticlogit.plot <- ggplot(results.elasticlogit , aes(X)) +
geom_line(aes(y = p.75, colour = "p.75")) +
ggtitle("Elastic Net Logit") + ylab("Accuracy Rate at p=X decision boundary")
results.elasticlogit.plot
# INSERT VALUES INTO FINAL RESULTS TABLES
# final results table: accuracy
fin_results[1,2] <- ifelse(alp == 0.0, fit.elastic$aic)
fin_results[1,3] <- ifelse(alp == 0.0, true_p.rl.train)
fin_results[1,4] <- ifelse(alp == 0.0, true_pos_p.rl.train)
fin_results[1,5] <- ifelse(alp == 0.0, ROC.elastic.train$auc)
fin_results[1,6] <- ifelse(alp == 0.0, true_p.rl.test)
fin_results[1,7] <- ifelse(alp == 0.0, true_pos_p.rl.test)
fin_results[1,8] <- ifelse(alp == 0.0, ROC.elastic.test$auc)
fin_results[2,2] <- ifelse(alp == 0.2, fit.elastic$aic)
fin_results[2,3] <- ifelse(alp == 0.2, true_p.rl.train)
fin_results[2,4] <- ifelse(alp == 0.2, true_pos_p.rl.train)
fin_results[2,5] <- ifelse(alp == 0.2, ROC.elastic.train$auc)
fin_results[2,6] <- ifelse(alp == 0.2, true_p.rl.test)
fin_results[2,7] <- ifelse(alp == 0.2, true_pos_p.rl.test)
fin_results[2,8] <- ifelse(alp == 0.2, ROC.elastic.test$auc)
fin_results[3,2] <- ifelse(alp == 0.4, fit.elastic$aic)
fin_results[3,3] <- ifelse(alp == 0.4, true_p.rl.train)
fin_results[3,4] <- ifelse(alp == 0.4, true_pos_p.rl.train)
fin_results[3,5] <- ifelse(alp == 0.4, ROC.elastic.train$auc)
fin_results[3,6] <- ifelse(alp == 0.4, true_p.rl.test)
fin_results[3,7] <- ifelse(alp == 0.4, true_pos_p.rl.test)
fin_results[3,8] <- ifelse(alp == 0.4, ROC.elastic.test$auc)
fin_results[4,2] <- ifelse(alp == 0.6, fit.elastic$aic)
fin_results[4,3] <- ifelse(alp == 0.6, true_p.rl.train)
fin_results[4,4] <- ifelse(alp == 0.6, true_pos_p.rl.train)
fin_results[4,5] <- ifelse(alp == 0.6, ROC.elastic.train$auc)
fin_results[4,6] <- ifelse(alp == 0.6, true_p.rl.test)
fin_results[4,7] <- ifelse(alp == 0.6, true_pos_p.rl.test)
fin_results[4,8] <- ifelse(alp == 0.6, ROC.elastic.test$auc)
fin_results[5,2] <- ifelse(alp == 0.8, fit.elastic$aic)
fin_results[5,3] <- ifelse(alp == 0.8, true_p.rl.train)
fin_results[5,4] <- ifelse(alp == 0.8, true_pos_p.rl.train)
fin_results[5,5] <- ifelse(alp == 0.8, ROC.elastic.train$auc)
fin_results[5,6] <- ifelse(alp == 0.8, true_p.rl.test)
fin_results[5,7] <- ifelse(alp == 0.8, true_pos_p.rl.test)
fin_results[5,8] <- ifelse(alp == 0.8, ROC.elastic.test$auc)
fin_results[6,2] <- ifelse(alp == 1.0, fit.elastic$aic)
fin_results[6,3] <- ifelse(alp == 1.0, true_p.rl.train)
fin_results[6,4] <- ifelse(alp == 1.0, true_pos_p.rl.train)
fin_results[6,5] <- ifelse(alp == 1.0, ROC.elastic.train$auc)
fin_results[6,6] <- ifelse(alp == 1.0, true_p.rl.test)
fin_results[6,7] <- ifelse(alp == 1.0, true_pos_p.rl.test)
fin_results[6,8] <- ifelse(alp == 1.0, ROC.elastic.test$auc)
fin_results
# final results table: elastic net logit plot
# final results table: elastic net logit plot
net_logit_val[1,2] <- ifelse(alp == 0.0, perc.f.75(0.01))
net_logit_val[1,3] <- ifelse(alp == 0.0, perc.f.75(0.02))
net_logit_val[1,4] <- ifelse(alp == 0.0, perc.f.75(0.03))
net_logit_val[1,5] <- ifelse(alp == 0.0, perc.f.75(0.10))
net_logit_val[1,2] <- ifelse(alp == 0.0, perc.f.75(0.01))
net_logit_val[1,3] <- ifelse(alp == 0.0, perc.f.75(0.02))
net_logit_val[1,4] <- ifelse(alp == 0.0, perc.f.75(0.03))
net_logit_val[1,5] <- ifelse(alp == 0.0, perc.f.75(0.10))
net_logit_val[2,2] <- ifelse(alp == 0.2, perc.f.75(0.01))
net_logit_val[2,3] <- ifelse(alp == 0.2, perc.f.75(0.02))
net_logit_val[2,4] <- ifelse(alp == 0.2, perc.f.75(0.03))
net_logit_val[2,5] <- ifelse(alp == 0.2, perc.f.75(0.10))
net_logit_val[3,2] <- ifelse(alp == 0.4, perc.f.75(0.01))
net_logit_val[3,3] <- ifelse(alp == 0.4, perc.f.75(0.02))
net_logit_val[3,4] <- ifelse(alp == 0.4, perc.f.75(0.03))
net_logit_val[3,5] <- ifelse(alp == 0.4, perc.f.75(0.10))
net_logit_val[4,2] <- ifelse(alp == 0.6, perc.f.75(0.01))
net_logit_val[4,3] <- ifelse(alp == 0.6, perc.f.75(0.02))
net_logit_val[4,4] <- ifelse(alp == 0.6, perc.f.75(0.03))
net_logit_val[4,5] <- ifelse(alp == 0.6, perc.f.75(0.10))
net_logit_val[5,2] <- ifelse(alp == 0.8, perc.f.75(0.01))
net_logit_val[5,3] <- ifelse(alp == 0.8, perc.f.75(0.02))
net_logit_val[5,4] <- ifelse(alp == 0.8, perc.f.75(0.03))
net_logit_val[5,5] <- ifelse(alp == 0.8, perc.f.75(0.10))
net_logit_val[6,2] <- ifelse(alp == 1.0, perc.f.75(0.01))
net_logit_val[6,3] <- ifelse(alp == 1.0, perc.f.75(0.02))
net_logit_val[6,4] <- ifelse(alp == 1.0, perc.f.75(0.03))
net_logit_val[6,5] <- ifelse(alp == 1.0, perc.f.75(0.10))
net_logit_val
var_store
fin_results
net_logit_val
write.xlsx(fin_results, file="imp1_script_logreg_RESULTS.xlsx", sheetName="1 - fin_results")
write.xlsx(net_logit_val, file="imp1_script_logreg_RESULTS.xlsx", sheetName="2 - net_logit_val", append=TRUE)
write.xlsx(var_store, file="imp1_script_logreg_RESULTS.xlsx", sheetName="3 - var_store", append=TRUE)
# CUSTOM FUNCTION: GET ROWNAMES TO PASTE INTO FUNCTION (after define alpha part)
# rownames(beta.elastic)
# p0lm <- function(X){paste0(X, collapse = " + ")}
# p0lm(rownames(beta.elastic))
##################################################################################################
##################################################################################################
##################################################################################################
# ALTERNATIVE FUNCTIONS... didn't work
# net_logit_insert_vector <- c(alp, perc.f.75(0.01), perc.f.75(0.02), perc.f.75(0.03), perc.f.75(0.10))
# net_logit_val[1,] <- ifelse(alp == 0.0, net_logit_insert_vector)
# net_logit_val[2,] <- ifelse(alp == 0.2, net_logit_insert_vector)
# net_logit_val[3,] <- ifelse(alp == 0.4, net_logit_insert_vector)
# net_logit_val[4,] <- ifelse(alp == 0.6, net_logit_insert_vector)
# net_logit_val[5,] <- ifelse(alp == 0.8, net_logit_insert_vector)
# net_logit_val[6,] <- ifelse(alp == 1.0, net_logit_insert_vector)
# net_logit_val
# fin_results_insert_vector <- c(alp, test.rl$aic, true_p.rl.train, true_pos_p.rl.train,
# ROC.elastic.train$auc, true_p.rl.test, true_pos_p.rl.test,
# ROC.elastic.test$auc)
# fin_results[1,] <- ifelse(alp == 0.0, fin_results_insert_vector)
# fin_results[2,] <- ifelse(alp == 0.2, fin_results_insert_vector)
# fin_results[3,] <- ifelse(alp == 0.4, fin_results_insert_vector)
# fin_results[4,] <- ifelse(alp == 0.6, fin_results_insert_vector)
# fin_results[5,] <- ifelse(alp == 0.8, fin_results_insert_vector)
# fin_results[6,] <- ifelse(alp == 1.0, fin_results_insert_vector)
# fin_results
|
/imp1_script_logreg.R
|
no_license
|
apaetsch/ACCT399-AnnalectAnalyticsProject
|
R
| false
| false
| 19,739
|
r
|
# SETUP
install.packages("plyr")
library("plyr")
install.packages("gdata")
library("gdata")
install.packages("ROCR")
library(ROCR)
install.packages("leaps")
library(leaps)
install.packages("stat")
library("stat")
install.packages("glmnet")
install.packages("pROC")
library("glmnet")
library("pROC")
library(ggplot2)
library(dplyr)
library(plyr)
install.packages("xlsx")
library(xlsx)
# ---------------------------------------------------------------------------------------
# READ IN DATA
table_by_user <- read.csv("/Users/apaetsch/Desktop/imp_eng_conv_v1_10k_users_ROLLUP_v1.csv")
###########################################################################################
###########################################################################################
# PREPARATION: SIMPLIFY TABLE AND PUT INTO MATRIX FORMAT
table_by_user_v1 <- table_by_user
colnames(table_by_user_v1)[19] <- "NULL_touches"
table_by_user_v1$converted_to_VISIT <- ifelse(table_by_user_v1$converted_to_VISIT == "FALSE", 0, 1)
tbu_cleaned <- subset(table_by_user_v1, select = -c(X, ak_user_id,
total_conversions_VISIT, total_conversions_SALES, total_conversions_NULL,
converted_to_SALES,
timeframe_secs, timeframe_mins, timeframe_days,
channel_display_perc, channel_highimpact_perc, channel_search_perc,
channel_video_perc, channel_NULL_perc,
platform_NULL_abs, platform_desktop_perc, platform_mobile_perc,
platform_social_perc, platform_NULL_perc,
typeofbuy_NULL_abs, typeofbuy_contentamplification_perc, typeofbuy_endemic_perc,
typeofbuy_programmatic_perc, typeofbuy_publisherdirect_perc, typeofbuy_search_perc,
typeofbuy_network_perc, typeofbuy_NULL_perc,
sitetype_NULL_abs, sitetype_broadcast_perc, sitetype_contentamplification_perc,
sitetype_discoveryengine_perc, sitetype_endemic_perc, sitetype_internetradio_perc,
sitetype_lifestyle_perc, sitetype_majormedia_perc, sitetype_network_perc,
sitetype_portal_perc, sitetype_programmatic_perc, sitetype_sportsleague_perc,
sitetype_videoportal_perc, sitetype_NULL_perc,
overalltargeting_NULL_abs, overalltargeting_behavioral_perc,
overalltargeting_contextual_perc, overalltargeting_geo_perc,
overalltargeting_lookalike_perc, overalltargeting_predictive_perc,
overalltargeting_prospecting_perc, overalltargeting_remarketing_perc,
overalltargeting_retargeting_perc, overalltargeting_NULL_perc,
funnel_NULL_abs, funnel_lower_perc, funnel_middle_perc,
funnel_upper_perc, funnel_NULL_perc))
tbu_cleaned <- subset(tbu_cleaned, select=c(converted_to_VISIT,total_impressions,
first_touchpoint:channel_video_abs,
platform_desktop_abs:funnel_upper_abs,
NULL_touches))
tbu_final <- tbu_cleaned
tbu_final$first_touchpoint <- as.POSIXct(tbu_final$first_touchpoint)
tbu_final$last_touchpoint <- as.POSIXct(tbu_final$last_touchpoint)
tbu_final$first_touchpoint <- as.numeric(tbu_final$first_touchpoint)
tbu_final$last_touchpoint <- as.numeric(tbu_final$last_touchpoint)
write.csv(tbu_final, file = "/Users/apaetsch/Desktop/imp_eng_conv_v1_10k_users_ROLLUP_v2.csv")
# CREATE TEST AND TRAINING SET
set.seed(123)
sample_size <- floor(0.5 * nrow(tbu_final))
train_ind <- sample(seq_len(nrow(tbu_final)), size = sample_size)
train <- tbu_final[train_ind, ]
test <- tbu_final[-train_ind, ]
####################################################################################################
####################################################################################################
####################################################################################################
###### VERSION 1: LINEAR REGRESSION ################################################################
####################################################################################################
# RUN REGRESSION
fit.logit <- glm(converted_to_VISIT ~.,family=binomial(link = "logit"), data = train)
fit.logit <- glm(converted_to_VISIT ~ total_impressions + last_touchpoint +
platform_desktop_abs + overalltargeting_behavioral_abs +
overalltargeting_contextual_abs + funnel_lower_abs,
family=binomial(link = "logit"), data = train)
# GET MODEL SUMMARY
fit.logit.summary <- fit.logit %>% summary
fit.logit
fit.logit.summary
# get model AIC
fit.logit$aic
write.xlsx(fit.logit.summary$coefficients, file = "imp1_script_logreg_RESULTS.xlsx", sheetName = "fit.logit.summary")
#total_impressions + last_touchpoint + platform_desktop_abs + overalltargeting_behavioral_abs +
# overalltargeting_contextual_abs + overalltargeting_geo_abs + overalltargeting_predictive_abs +
# funnel_lower_abs
# get variables as text
fit.logit_coeff <- as.matrix(fit.logit$coefficients)
p0lm <- function(X){paste0(X, collapse = " + ")}
var_logit <- p0lm(rownames(fit.logit_coeff))
# GET PREDICTON [TRAIN]
train.logreg <- train
train.logreg$p_garbage <- predict(fit.logit, type = "response")
# calculate accuracy (~RMSE)
train.logreg$predicted_conversion <- ifelse(train.logreg$p_garbage > .5, 1, 0)
act_vs_pred.logreg.train <- table(train.logreg$converted_to_VISIT, train.logreg$predicted_conversion)
true_p.logreg.train <- mean(train.logreg$converted_to_VISIT==train.logreg$predicted_conversion)
true_pos_p.logreg.train <- act_vs_pred.logreg.train[2,2]/sum(act_vs_pred.logreg.train[,2])
act_vs_pred.logreg.train
true_p.logreg.train
true_pos_p.logreg.train
# calculate and plot ROC
ROC.garbage.train <- roc(converted_to_VISIT ~ p_garbage, data = train.logreg)
plot(ROC.garbage.train)
ROC.garbage.train
# GET PREDICTION [TEST]
test.logreg <- test
test.logreg$p_garbage <- predict(fit.logit, newdata = test.logreg, type = "response")
# calculate accuracy (~RMSE)
test.logreg$predicted_conversion <- ifelse(test.logreg$p_garbage > .5, 1, 0)
act_vs_pred.logreg.test <- table(test.logreg$converted_to_VISIT, test.logreg$predicted_conversion)
true_p.logreg.test <- mean(test.logreg$converted_to_VISIT==test.logreg$predicted_conversion)
true_pos_p.logreg.test <- act_vs_pred.logreg.test[2,2]/sum(act_vs_pred.logreg.test[,2])
act_vs_pred.logreg.test
true_p.logreg.test
true_pos_p.logreg.test
# calculate and plot ROC
ROC.garbage.test <- roc(converted_to_VISIT ~ p_garbage, data = test.logreg)
plot(ROC.garbage.test)
ROC.garbage.test
# how to plot predicted against actual
plot(test.logreg$converted_to_VISIT, test.logreg$predicted_conversion)
####################################################################################################
####################################################################################################
###### VERSION 1: LASSO VS. RIDGE ##################################################################
####################################################################################################
# SETUP FINAL RESULTS TABLES
# selected variable table - TEXT
var_store <- data.frame(matrix(ncol = 3, nrow = 6))
colnames(var_store)[1] <- "alpha"
conames(var_store)[3] <- "no_variables"
colnames(var_store)[2] <- "variables"
var_store[,1] <- c(0.0, 0.2, 0.4, 0.6, 0.8, 1.0)
# final results table: accuracy
fin_results <- data.frame(matrix(ncol = 8, nrow = 6))
colnames(fin_results)[1] <- "alpha"
colnames(fin_results)[2] <- "aic"
colnames(fin_results)[3] <- "true_p.rl.train"
colnames(fin_results)[4] <- "true_pos_p.rl.train"
colnames(fin_results)[5] <- "ROC.elastic.train$auc"
colnames(fin_results)[6] <- "true_p.rl.test"
colnames(fin_results)[7] <- "true_pos_p.rl.test"
colnames(fin_results)[8] <- "ROC.elastic.test$auc"
fin_results[,1] <- c(0.0, 0.2, 0.4, 0.6, 0.8, 1.0)
# final results table: elastic net logit plot
net_logit_val <- data.frame(matrix(ncol = 5, nrow = 6))
colnames(net_logit_val)[1] <- "alpha"
colnames(net_logit_val)[2] <- "X=0.01"
colnames(net_logit_val)[3] <- "X=0.02"
colnames(net_logit_val)[4] <- "X=0.03"
colnames(net_logit_val)[5] <- "X=0.10"
net_logit_val[,1] <- c(0.0, 0.2, 0.4, 0.6, 0.8, 1.0)
# DEFINE X AND Ys
X.val <- as.matrix(model.matrix(converted_to_VISIT ~., train)[,-1])
Y.val <- as.matrix(train[,1])
# DEFINE ALPHA
alp = 0.0
alp_char <- as.character(alp)
# DEFINE LAMBDA
fit.elastic.cv <- cv.glmnet(X.val, Y.val, alpha=alp, family="binomial",
nfolds = 10, type.measure = "deviance")
fit.elastic.1se <- glmnet(X.val, Y.val, alpha=alp, family="binomial",
lambda=fit.elastic.cv$lambda.1se)
fit.elastic.1se.beta <- coef(fit.elastic.1se)
# test and see values
plot(fit.elastic.cv)
#plot(fit.elastic.1se)
#fit.elastic.cv
#fit.elastic.1se
# FIND VARIABLES ACCORDING TO RIDGE/LASSO ELASTIC SELECTION
beta.elastic <- fit.elastic.1se.beta[which(fit.elastic.1se.beta !=0),]
beta.elastic <- as.matrix(beta.elastic)
# SAVE THE CHOSEN VALUES IN DF
# clean up text
rownames_list <- rownames(beta.elastic)
p0lm <- function(X){paste0(X, collapse = " + ")}
raw_text <- p0lm(rownames(beta.elastic))
clean_text <- gsub("\\(Intercept\\) \\+ ", "", raw_text)
# enter into table
var_store[1,2] <- ifelse(alp == 0.0, length(rownames_list))
var_store[1,3] <- ifelse(alp == 0.0, clean_text)
var_store[2,2] <- ifelse(alp == 0.2, length(rownames_list))
var_store[2,3] <- ifelse(alp == 0.2, clean_text)
var_store[3,2] <- ifelse(alp == 0.4, length(rownames_list))
var_store[3,3] <- ifelse(alp == 0.4, clean_text)
var_store[4,2] <- ifelse(alp == 0.6, length(rownames_list))
var_store[4,3] <- ifelse(alp == 0.6, clean_text)
var_store[5,2] <- ifelse(alp == 0.8, length(rownames_list))
var_store[5,3] <- ifelse(alp == 0.8, clean_text)
var_store[6,2] <- ifelse(alp == 1.0, length(rownames_list))
var_store[6,23] <- ifelse(alp == 1.0, clean_text)
# DO LOGISTIC REGRESSION WITH VARIABLES CHOSEN ABOVE
# pull variables as string
picked_var <- as.character(switch(alp_char, "0" = {var_store[1,3]},
"0.2" = {var_store[2,3]},
"0.4" = {var_store[3,3]},
"0.6" = {var_store[4,3]},
"0.8" = {var_store[5,3]},
"1" = {var_store[6,3]}))
# run regression
formula_text <- paste("converted_to_VISIT ~ ", picked_var)
fit.elastic <- glm(as.formula(formula_text), family=binomial(link = 'logit'),
data = train)
# GET MODEL SUMMARY [TRAIN DATA]
fit.elastic.summary <- fit.elastic %>% summary
fit.elastic
fit.elastic.summary
# get model AIC
fit.elastic$aic
# GET PREDICTON [TRAIN]
train.rl <- train
train.rl$p_elastic <- predict(fit.elastic, type = "response")
# calculate accuracy (~RMSE)
train.rl$predicted_conversion <- ifelse(train.rl$p_elastic > .5, 1, 0)
act_vs_pred.rl.train <- table(train.rl$converted_to_VISIT, train.rl$predicted_conversion)
true_p.rl.train <- mean(train.rl$converted_to_VISIT==train.rl$predicted_conversion)
true_pos_p.rl.train <- act_vs_pred.rl.train[2,2]/sum(act_vs_pred.rl.train[,2])
act_vs_pred.rl.train
true_p.rl.train
true_pos_p.rl.train
# calculate and plot ROC
ROC.elastic.train <- roc(converted_to_VISIT ~ p_elastic, data = train.rl)
plot(ROC.elastic.train)
ROC.elastic.train
# GET PREDICTON [TEST]
test.rl <- test
test.rl$p_elastic <- predict(fit.elastic, newdata = test.rl, type = "response")
# calculate accuracy rate
test.rl$predicted_conversion <- ifelse(test.rl$p_elastic > .5, 1, 0)
act_vs_pred.rl.test <- table(test.rl$converted_to_VISIT, test.rl$predicted_conversion)
true_p.rl.test <- mean(test.rl$converted_to_VISIT==test.rl$predicted_conversion)
true_pos_p.rl.test <- act_vs_pred.rl.test[2,2]/sum(act_vs_pred.rl.test[,2])
act_vs_pred.rl.test
true_p.rl.test
true_pos_p.logreg.test
# calculate and plot ROC
ROC.elastic.test <- roc(converted_to_VISIT ~ p_elastic, data = test.rl)
plot(ROC.elastic.test)
ROC.elastic.test
ROC.elastic.test$auc
# CUSTOM FUNCTION TO PLOT ELAST NET LOGIT PLOT
lapply.uvec <- function(X,fx){lapply(X,fx) %>% unlist %>% as.vector}
perc.f.75 <- function(i){sum(as.numeric(as.numeric(test.rl$p_elastic > i )
== (test.rl$converted_to_VISIT %>% unique)[2]) )/nrow(test.rl)}
pY.75 <- lapply.uvec(((1:1000)/1000),perc.f.75)
# PLOT ELASTIC NET LOGIT PLOT
results.elasticlogit <- data.frame(X=c(((1:1000)/1000)),p.75=pY.75)
results.elasticlogit.plot <- ggplot(results.elasticlogit , aes(X)) +
geom_line(aes(y = p.75, colour = "p.75")) +
ggtitle("Elastic Net Logit") + ylab("Accuracy Rate at p=X decision boundary")
results.elasticlogit.plot
# INSERT VALUES INTO FINAL RESULTS TABLES
# final results table: accuracy
fin_results[1,2] <- ifelse(alp == 0.0, fit.elastic$aic)
fin_results[1,3] <- ifelse(alp == 0.0, true_p.rl.train)
fin_results[1,4] <- ifelse(alp == 0.0, true_pos_p.rl.train)
fin_results[1,5] <- ifelse(alp == 0.0, ROC.elastic.train$auc)
fin_results[1,6] <- ifelse(alp == 0.0, true_p.rl.test)
fin_results[1,7] <- ifelse(alp == 0.0, true_pos_p.rl.test)
fin_results[1,8] <- ifelse(alp == 0.0, ROC.elastic.test$auc)
fin_results[2,2] <- ifelse(alp == 0.2, fit.elastic$aic)
fin_results[2,3] <- ifelse(alp == 0.2, true_p.rl.train)
fin_results[2,4] <- ifelse(alp == 0.2, true_pos_p.rl.train)
fin_results[2,5] <- ifelse(alp == 0.2, ROC.elastic.train$auc)
fin_results[2,6] <- ifelse(alp == 0.2, true_p.rl.test)
fin_results[2,7] <- ifelse(alp == 0.2, true_pos_p.rl.test)
fin_results[2,8] <- ifelse(alp == 0.2, ROC.elastic.test$auc)
fin_results[3,2] <- ifelse(alp == 0.4, fit.elastic$aic)
fin_results[3,3] <- ifelse(alp == 0.4, true_p.rl.train)
fin_results[3,4] <- ifelse(alp == 0.4, true_pos_p.rl.train)
fin_results[3,5] <- ifelse(alp == 0.4, ROC.elastic.train$auc)
fin_results[3,6] <- ifelse(alp == 0.4, true_p.rl.test)
fin_results[3,7] <- ifelse(alp == 0.4, true_pos_p.rl.test)
fin_results[3,8] <- ifelse(alp == 0.4, ROC.elastic.test$auc)
fin_results[4,2] <- ifelse(alp == 0.6, fit.elastic$aic)
fin_results[4,3] <- ifelse(alp == 0.6, true_p.rl.train)
fin_results[4,4] <- ifelse(alp == 0.6, true_pos_p.rl.train)
fin_results[4,5] <- ifelse(alp == 0.6, ROC.elastic.train$auc)
fin_results[4,6] <- ifelse(alp == 0.6, true_p.rl.test)
fin_results[4,7] <- ifelse(alp == 0.6, true_pos_p.rl.test)
fin_results[4,8] <- ifelse(alp == 0.6, ROC.elastic.test$auc)
fin_results[5,2] <- ifelse(alp == 0.8, fit.elastic$aic)
fin_results[5,3] <- ifelse(alp == 0.8, true_p.rl.train)
fin_results[5,4] <- ifelse(alp == 0.8, true_pos_p.rl.train)
fin_results[5,5] <- ifelse(alp == 0.8, ROC.elastic.train$auc)
fin_results[5,6] <- ifelse(alp == 0.8, true_p.rl.test)
fin_results[5,7] <- ifelse(alp == 0.8, true_pos_p.rl.test)
fin_results[5,8] <- ifelse(alp == 0.8, ROC.elastic.test$auc)
fin_results[6,2] <- ifelse(alp == 1.0, fit.elastic$aic)
fin_results[6,3] <- ifelse(alp == 1.0, true_p.rl.train)
fin_results[6,4] <- ifelse(alp == 1.0, true_pos_p.rl.train)
fin_results[6,5] <- ifelse(alp == 1.0, ROC.elastic.train$auc)
fin_results[6,6] <- ifelse(alp == 1.0, true_p.rl.test)
fin_results[6,7] <- ifelse(alp == 1.0, true_pos_p.rl.test)
fin_results[6,8] <- ifelse(alp == 1.0, ROC.elastic.test$auc)
fin_results
# final results table: elastic net logit plot
# final results table: elastic net logit plot
net_logit_val[1,2] <- ifelse(alp == 0.0, perc.f.75(0.01))
net_logit_val[1,3] <- ifelse(alp == 0.0, perc.f.75(0.02))
net_logit_val[1,4] <- ifelse(alp == 0.0, perc.f.75(0.03))
net_logit_val[1,5] <- ifelse(alp == 0.0, perc.f.75(0.10))
net_logit_val[1,2] <- ifelse(alp == 0.0, perc.f.75(0.01))
net_logit_val[1,3] <- ifelse(alp == 0.0, perc.f.75(0.02))
net_logit_val[1,4] <- ifelse(alp == 0.0, perc.f.75(0.03))
net_logit_val[1,5] <- ifelse(alp == 0.0, perc.f.75(0.10))
net_logit_val[2,2] <- ifelse(alp == 0.2, perc.f.75(0.01))
net_logit_val[2,3] <- ifelse(alp == 0.2, perc.f.75(0.02))
net_logit_val[2,4] <- ifelse(alp == 0.2, perc.f.75(0.03))
net_logit_val[2,5] <- ifelse(alp == 0.2, perc.f.75(0.10))
net_logit_val[3,2] <- ifelse(alp == 0.4, perc.f.75(0.01))
net_logit_val[3,3] <- ifelse(alp == 0.4, perc.f.75(0.02))
net_logit_val[3,4] <- ifelse(alp == 0.4, perc.f.75(0.03))
net_logit_val[3,5] <- ifelse(alp == 0.4, perc.f.75(0.10))
net_logit_val[4,2] <- ifelse(alp == 0.6, perc.f.75(0.01))
net_logit_val[4,3] <- ifelse(alp == 0.6, perc.f.75(0.02))
net_logit_val[4,4] <- ifelse(alp == 0.6, perc.f.75(0.03))
net_logit_val[4,5] <- ifelse(alp == 0.6, perc.f.75(0.10))
net_logit_val[5,2] <- ifelse(alp == 0.8, perc.f.75(0.01))
net_logit_val[5,3] <- ifelse(alp == 0.8, perc.f.75(0.02))
net_logit_val[5,4] <- ifelse(alp == 0.8, perc.f.75(0.03))
net_logit_val[5,5] <- ifelse(alp == 0.8, perc.f.75(0.10))
net_logit_val[6,2] <- ifelse(alp == 1.0, perc.f.75(0.01))
net_logit_val[6,3] <- ifelse(alp == 1.0, perc.f.75(0.02))
net_logit_val[6,4] <- ifelse(alp == 1.0, perc.f.75(0.03))
net_logit_val[6,5] <- ifelse(alp == 1.0, perc.f.75(0.10))
net_logit_val
var_store
fin_results
net_logit_val
write.xlsx(fin_results, file="imp1_script_logreg_RESULTS.xlsx", sheetName="1 - fin_results")
write.xlsx(net_logit_val, file="imp1_script_logreg_RESULTS.xlsx", sheetName="2 - net_logit_val", append=TRUE)
write.xlsx(var_store, file="imp1_script_logreg_RESULTS.xlsx", sheetName="3 - var_store", append=TRUE)
# CUSTOM FUNCTION: GET ROWNAMES TO PASTE INTO FUNCTION (after define alpha part)
# rownames(beta.elastic)
# p0lm <- function(X){paste0(X, collapse = " + ")}
# p0lm(rownames(beta.elastic))
##################################################################################################
##################################################################################################
##################################################################################################
# ALTERNATIVE FUNCTIONS... didn't work
# net_logit_insert_vector <- c(alp, perc.f.75(0.01), perc.f.75(0.02), perc.f.75(0.03), perc.f.75(0.10))
# net_logit_val[1,] <- ifelse(alp == 0.0, net_logit_insert_vector)
# net_logit_val[2,] <- ifelse(alp == 0.2, net_logit_insert_vector)
# net_logit_val[3,] <- ifelse(alp == 0.4, net_logit_insert_vector)
# net_logit_val[4,] <- ifelse(alp == 0.6, net_logit_insert_vector)
# net_logit_val[5,] <- ifelse(alp == 0.8, net_logit_insert_vector)
# net_logit_val[6,] <- ifelse(alp == 1.0, net_logit_insert_vector)
# net_logit_val
# fin_results_insert_vector <- c(alp, test.rl$aic, true_p.rl.train, true_pos_p.rl.train,
# ROC.elastic.train$auc, true_p.rl.test, true_pos_p.rl.test,
# ROC.elastic.test$auc)
# fin_results[1,] <- ifelse(alp == 0.0, fin_results_insert_vector)
# fin_results[2,] <- ifelse(alp == 0.2, fin_results_insert_vector)
# fin_results[3,] <- ifelse(alp == 0.4, fin_results_insert_vector)
# fin_results[4,] <- ifelse(alp == 0.6, fin_results_insert_vector)
# fin_results[5,] <- ifelse(alp == 0.8, fin_results_insert_vector)
# fin_results[6,] <- ifelse(alp == 1.0, fin_results_insert_vector)
# fin_results
|
# Prepare a few descriptive plots and statistics for the PAA proposal
library(tidyverse)
library(data.table)
library(foreign)
library(survival)
library(broom)
# 0.1 load data
# --------------
# EDAD households and follow up data
load('follow.up.RData')
#
# load('EDAD2008Hogares.RData')
#
# # make datatable functions available
# as.data.table(ed.hog) -> ed.hog
as.data.table(follow.up) -> follow.up
# # use the labels
# ed.hog2 <- as.data.table(lapply(ed.hog, function(e) {if (class(e)=='labelled') {as_factor(e)} else {as.integer(e) } }) )
# tolower(names(ed.hog2)) -> names(ed.hog2)
#
# # 1. Extract variables and make a working dataset
# # -----------------------------------------------
#
# # see the two data sets
# glimpse(ed.hog2)
#
glimpse(follow.up)
follow.up[,nseccion:= as.integer(as.character(nseccion))]
# extract variables for
# names(ed.hog2)
#
# merge(ed.hog2[,c(4:10,17:22,27),with=F],follow.up, by=c('nseccion','dc','viv','hog','nord'), all=T) -> link
# class(link)
#
# link[,enlazado:=!is.na(estado)]
#
# # 2. Stats on disability in the linked data
# table(link$limit)
# table(link$certmi)
# table(link$dislim)
#
# link %>% dplyr::mutate(tt = ifelse(limit!="No",TRUE,FALSE)) %>% dplyr::count(tt)
# # 1 FALSE 232551
# # 2 TRUE 25636
# link <- link %>% mutate(limbo = ifelse(limit!="No",1,0))
################################################
# Data of interest is hidden in another source #
# ---------------------------------------------#
################################################
EDAD.mayor = read.spss("TOTAL-EDAD-2008-Mayores.sav", to.data.frame=TRUE)
summary(EDAD.mayor$NumPersonas65)
summary(EDAD.mayor$EDAD) # people 65 plus
# the ones with death information (!BAJA does not mean death!)
summary(link.may$anodef)
# There is information on 44 possible disabilities
str(EDAD.mayor$EdadInicioDisca13)
str(EDAD.mayor$EdadInicioDisca44)
# And the year of onset of dependency
str(EDAD.mayor$Edadinicio_cuidado)
# see differences between different entry ages (disabilities 13-19-44)
par(mfrow=c(1,3))
hist(as.numeric(EDAD.mayor$EdadInicioDisca44))
hist(as.numeric(EDAD.mayor$EdadInicioDisca19))
hist(as.numeric(EDAD.mayor$EdadInicioDisca13))
par(mfrow=c(1,1))
# A few checks with variables - to get a feeling for the number of transitions
EDAD.mayor <- data.table(EDAD.mayor)
EDAD.mayor[, .N, .(!is.na(EdadInicioDisca44))]
EDAD.mayor[, .N, .(!is.na(Edadinicio_cuidado))]
EDAD.mayor[, .N, .(as.numeric(EdadInicioDisca44)<as.numeric(Edadinicio_cuidado))]
# ! about 6000 cases we can work with of which 4500 experience a transition from one state to other
# compare to last age
summary(as.numeric(EDAD.mayor$EdadInicioDisca44))
summary(as.numeric(EDAD.mayor$EdadUltimaDisca44)) # could be
# Dependientes13, Dependientes13 Dependientes14 AsistenciaPersonal14
# DemandaCuidados14 EdadInicioDisca44 EdadUltimaDisca44
# select variables
head(names(EDAD.mayor), 1000)
tail(names(EDAD.mayor), 404)
# change variable names for merge process
names(EDAD.mayor)[6:10]<- c("nseccion", "dc", "viv", "hog", "nord")
merge(EDAD.mayor[,c(1,4:12,17:29, 53:76, 89:92, 885:898, 960:963, 1321:1329, 1381:1404),with=F],follow.up, by=c('nseccion','dc','viv','hog','nord'), all=T) -> link.may
class(link.may)
link.may[,enlazado:=!is.na(estado)]
link.may[,.N,.(enlazado,estado)]
# from the 6568 who are not linked, 2.127 are deaths which occurred in 2017
# "El resto son, bien emigraciones, bien limpiezas del padrón o bien otras causas.
# Los otros 469 (=6.963-6.494) son registros para los que tendríamos que analizar la casuística
# (a veces han podido modificarse los identificadores por distintas razones a lo largo de los años
# now reduce to the ones which are in the original data (65+)
table(link.may$enlazado)
# Choose the linked ones and the ones with age information
link.may <- link.may %>% filter(!is.na(EDAD)) %>% filter(enlazado==T) %>% ### 38985 individuals (65 +, followed up)
# and make an event variable with the sure information we have (death year and month)
mutate(event=ifelse(!is.na(anodef),1,0))
# !This will censor everybody after 31.12.2016 - even if some information is available
# time to death or censor var mutate(age.d = EDAD + (2008.5 - (a_salida)))
link.may <- data.table(link.may)
link.may[estado=='B',.N ,keyby=.(Con.ANODEF=(!is.na(anodef)), con.CAUSA = (!is.na(causa)))]
## 1783 (11.74%) def sin causa y fecha
# Impute a death date for the ones without assigned date but information on state at exit ("B")
summary(link.may$a_salida) # muy pocos! only the ones who left?
# Check the "bajas"
link.may %>% count(estado=='B' & is.na(anodef) & is.na(a_salida)) # 1689 bajas don´t have a year of death or exit from the survey
link.may %>% count(estado=='B' & !is.na(a_salida))
link.may %>% count(estado=='B' & !is.na(a_salida) & is.na(causa)) # 94 have a year of exit but no cause (death, censor)
# See if that is in line with the INE email
link.may %>% count(event==1 & is.na(a_salida)) # Año de salida does not coincide with death year
link.may %>% count(event==1 & is.na(causa)) # 0 cases = which encourages one to censor at 12/2016
#########################################################################################################################
#########################################################################################################################
### For now this is out commented as have no further information on the bajas in 2017
#########################################################################################################################
#########################################################################################################################
### all possible ways I can think of over the top the head to impute the date from one of the two situations
### ---------------------------------------------------------------------------------------------------------
#
# # If there is neither a death date nor an exit date but the "baja" information, we approximate by the medium time
# link.may[estado=='B' & is.na(anodef) & is.na(a_salida), ':='(anodef=2013, mesdef=1, a_salida=2013, m_salida=1)]
# # If there is a death date this will become the exit date (absorbing state)
# link.may[estado=='B' & !is.na(anodef) & !is.na(causa), ':='(a_salida=anodef, m_salida=mesdef)]
# # If there is a age at death information but no exit year
# link.may[estado=='B' & !is.na(a_salida), ':='(anodef=a_salida, mesdef=m_salida)] # ??? - !is.na(causa)
# link.may %>% count(estado=='B')
# link.may %>% count(estado=='B' & !is.na(a_salida)) # ok! Same number (assuming everyone has an exit year)
# # Assign a censoring date for censored cases
# # for now: last month of 2017 where event happened
# max(link.may$m_salida[link.may$estado=='B' & link.may$a_salida==2017]) # Last month = May
#########################################################################################################################
#########################################################################################################################
# For now the analysis is censored at 31.12.2016
# Censored are all individuals without event before 31.12.2016
link.may[event==0, ':='(a_salida=2016, m_salida=12)]
# for the rest it is the year/month of death information
link.may[event==1, ':='(a_salida=anodef, m_salida=mesdef)]
# Quick check! Looks okay!
summary(link.may$a_salida)
# For earlier problems
# head(link.may[is.na(link.may$a_salida)]) # this person is censored at the end but does not any other time information
# link.may[is.na(a_salida), ':='(a_salida=2017, m_salida=5)] # impute censoring date
# double-check age at different states
######################################
# Entry age in 2008 (EDAD)
# ------------------------
summary(link.may$EDAD)
hist(link.may$EDAD, nclass = 42, main = "", xlab = "Age") # age 65 years seem to be overrepresented (also more 66)
# --- assumption: ??
# creating an exit age variable based on a_salida (2007 as first year for data collection -
# some deaths are early in 2007 and lead to entry age = exit age problem)
### ------------
### AGE AT EXIT
### ------------
link.may <- link.may %>%
# first making the exit age smoother by adding the month and than substracting the first interview date (! find variable)
mutate(age.ex = EDAD + (a_salida+(m_salida/12) - 2006.99))
hist(link.may$age.ex) # looks ok!
# Look at the entry to exit age relationship - no cases with lower
link.may %>% count(EDAD < age.ex)
# entry in disability (What does the 13 mean?)
# --------------------------------------------
str(link.may$Edadinicio_disca44)
link.may$Edadinicio_disca44 <- as.numeric(link.may$Edadinicio_disca44)
summary(link.may$Edadinicio_disca44)
link.may %>% count(Edadinicio_disca44>=65)
# check the other ages - entry to dependency
# ------------------------------------------
str(link.may$Edadinicio_cuidado)
link.may$Edadinicio_cuidado <- as.numeric(link.may$Edadinicio_cuidado)
summary(link.may$Edadinicio_cuidado)
link.may %>% count(Edadinicio_cuidado>=65)
# # possible end of disability
# link.may$EdadFinDiscal13 <- as.numeric(link.may$EdadFinDiscal13)
# summary(link.may$EdadFinDiscal13) # No hay nadie!
# --------------------------------------------
# save(link.may, file='010_mayor.link.RData')
# --------------------------------------------
# rm(ed.hog, ed.hog2, EDAD.mayor, follow.up)
|
/PAA2019CFP.R
|
no_license
|
mvoigt87/EDAD
|
R
| false
| false
| 9,567
|
r
|
# Prepare a few descriptive plots and statistics for the PAA proposal
library(tidyverse)
library(data.table)
library(foreign)
library(survival)
library(broom)
# 0.1 load data
# --------------
# EDAD households and follow up data
load('follow.up.RData')
#
# load('EDAD2008Hogares.RData')
#
# # make datatable functions available
# as.data.table(ed.hog) -> ed.hog
as.data.table(follow.up) -> follow.up
# # use the labels
# ed.hog2 <- as.data.table(lapply(ed.hog, function(e) {if (class(e)=='labelled') {as_factor(e)} else {as.integer(e) } }) )
# tolower(names(ed.hog2)) -> names(ed.hog2)
#
# # 1. Extract variables and make a working dataset
# # -----------------------------------------------
#
# # see the two data sets
# glimpse(ed.hog2)
#
glimpse(follow.up)
follow.up[,nseccion:= as.integer(as.character(nseccion))]
# extract variables for
# names(ed.hog2)
#
# merge(ed.hog2[,c(4:10,17:22,27),with=F],follow.up, by=c('nseccion','dc','viv','hog','nord'), all=T) -> link
# class(link)
#
# link[,enlazado:=!is.na(estado)]
#
# # 2. Stats on disability in the linked data
# table(link$limit)
# table(link$certmi)
# table(link$dislim)
#
# link %>% dplyr::mutate(tt = ifelse(limit!="No",TRUE,FALSE)) %>% dplyr::count(tt)
# # 1 FALSE 232551
# # 2 TRUE 25636
# link <- link %>% mutate(limbo = ifelse(limit!="No",1,0))
################################################
# Data of interest is hidden in another source #
# ---------------------------------------------#
################################################
EDAD.mayor = read.spss("TOTAL-EDAD-2008-Mayores.sav", to.data.frame=TRUE)
summary(EDAD.mayor$NumPersonas65)
summary(EDAD.mayor$EDAD) # people 65 plus
# the ones with death information (!BAJA does not mean death!)
summary(link.may$anodef)
# There is information on 44 possible disabilities
str(EDAD.mayor$EdadInicioDisca13)
str(EDAD.mayor$EdadInicioDisca44)
# And the year of onset of dependency
str(EDAD.mayor$Edadinicio_cuidado)
# see differences between different entry ages (disabilities 13-19-44)
par(mfrow=c(1,3))
hist(as.numeric(EDAD.mayor$EdadInicioDisca44))
hist(as.numeric(EDAD.mayor$EdadInicioDisca19))
hist(as.numeric(EDAD.mayor$EdadInicioDisca13))
par(mfrow=c(1,1))
# A few checks with variables - to get a feeling for the number of transitions
EDAD.mayor <- data.table(EDAD.mayor)
EDAD.mayor[, .N, .(!is.na(EdadInicioDisca44))]
EDAD.mayor[, .N, .(!is.na(Edadinicio_cuidado))]
EDAD.mayor[, .N, .(as.numeric(EdadInicioDisca44)<as.numeric(Edadinicio_cuidado))]
# ! about 6000 cases we can work with of which 4500 experience a transition from one state to other
# compare to last age
summary(as.numeric(EDAD.mayor$EdadInicioDisca44))
summary(as.numeric(EDAD.mayor$EdadUltimaDisca44)) # could be
# Dependientes13, Dependientes13 Dependientes14 AsistenciaPersonal14
# DemandaCuidados14 EdadInicioDisca44 EdadUltimaDisca44
# select variables
head(names(EDAD.mayor), 1000)
tail(names(EDAD.mayor), 404)
# change variable names for merge process
names(EDAD.mayor)[6:10]<- c("nseccion", "dc", "viv", "hog", "nord")
merge(EDAD.mayor[,c(1,4:12,17:29, 53:76, 89:92, 885:898, 960:963, 1321:1329, 1381:1404),with=F],follow.up, by=c('nseccion','dc','viv','hog','nord'), all=T) -> link.may
class(link.may)
link.may[,enlazado:=!is.na(estado)]
link.may[,.N,.(enlazado,estado)]
# from the 6568 who are not linked, 2.127 are deaths which occurred in 2017
# "El resto son, bien emigraciones, bien limpiezas del padrón o bien otras causas.
# Los otros 469 (=6.963-6.494) son registros para los que tendríamos que analizar la casuística
# (a veces han podido modificarse los identificadores por distintas razones a lo largo de los años
# now reduce to the ones which are in the original data (65+)
table(link.may$enlazado)
# Choose the linked ones and the ones with age information
link.may <- link.may %>% filter(!is.na(EDAD)) %>% filter(enlazado==T) %>% ### 38985 individuals (65 +, followed up)
# and make an event variable with the sure information we have (death year and month)
mutate(event=ifelse(!is.na(anodef),1,0))
# !This will censor everybody after 31.12.2016 - even if some information is available
# time to death or censor var mutate(age.d = EDAD + (2008.5 - (a_salida)))
link.may <- data.table(link.may)
link.may[estado=='B',.N ,keyby=.(Con.ANODEF=(!is.na(anodef)), con.CAUSA = (!is.na(causa)))]
## 1783 (11.74%) def sin causa y fecha
# Impute a death date for the ones without assigned date but information on state at exit ("B")
summary(link.may$a_salida) # muy pocos! only the ones who left?
# Check the "bajas"
link.may %>% count(estado=='B' & is.na(anodef) & is.na(a_salida)) # 1689 bajas don´t have a year of death or exit from the survey
link.may %>% count(estado=='B' & !is.na(a_salida))
link.may %>% count(estado=='B' & !is.na(a_salida) & is.na(causa)) # 94 have a year of exit but no cause (death, censor)
# See if that is in line with the INE email
link.may %>% count(event==1 & is.na(a_salida)) # Año de salida does not coincide with death year
link.may %>% count(event==1 & is.na(causa)) # 0 cases = which encourages one to censor at 12/2016
#########################################################################################################################
#########################################################################################################################
### For now this is out commented as have no further information on the bajas in 2017
#########################################################################################################################
#########################################################################################################################
### all possible ways I can think of over the top the head to impute the date from one of the two situations
### ---------------------------------------------------------------------------------------------------------
#
# # If there is neither a death date nor an exit date but the "baja" information, we approximate by the medium time
# link.may[estado=='B' & is.na(anodef) & is.na(a_salida), ':='(anodef=2013, mesdef=1, a_salida=2013, m_salida=1)]
# # If there is a death date this will become the exit date (absorbing state)
# link.may[estado=='B' & !is.na(anodef) & !is.na(causa), ':='(a_salida=anodef, m_salida=mesdef)]
# # If there is a age at death information but no exit year
# link.may[estado=='B' & !is.na(a_salida), ':='(anodef=a_salida, mesdef=m_salida)] # ??? - !is.na(causa)
# link.may %>% count(estado=='B')
# link.may %>% count(estado=='B' & !is.na(a_salida)) # ok! Same number (assuming everyone has an exit year)
# # Assign a censoring date for censored cases
# # for now: last month of 2017 where event happened
# max(link.may$m_salida[link.may$estado=='B' & link.may$a_salida==2017]) # Last month = May
#########################################################################################################################
#########################################################################################################################
# For now the analysis is censored at 31.12.2016
# Censored are all individuals without event before 31.12.2016
link.may[event==0, ':='(a_salida=2016, m_salida=12)]
# for the rest it is the year/month of death information
link.may[event==1, ':='(a_salida=anodef, m_salida=mesdef)]
# Quick check! Looks okay!
summary(link.may$a_salida)
# For earlier problems
# head(link.may[is.na(link.may$a_salida)]) # this person is censored at the end but does not any other time information
# link.may[is.na(a_salida), ':='(a_salida=2017, m_salida=5)] # impute censoring date
# double-check age at different states
######################################
# Entry age in 2008 (EDAD)
# ------------------------
summary(link.may$EDAD)
hist(link.may$EDAD, nclass = 42, main = "", xlab = "Age") # age 65 years seem to be overrepresented (also more 66)
# --- assumption: ??
# creating an exit age variable based on a_salida (2007 as first year for data collection -
# some deaths are early in 2007 and lead to entry age = exit age problem)
### ------------
### AGE AT EXIT
### ------------
link.may <- link.may %>%
# first making the exit age smoother by adding the month and than substracting the first interview date (! find variable)
mutate(age.ex = EDAD + (a_salida+(m_salida/12) - 2006.99))
hist(link.may$age.ex) # looks ok!
# Look at the entry to exit age relationship - no cases with lower
link.may %>% count(EDAD < age.ex)
# entry in disability (What does the 13 mean?)
# --------------------------------------------
str(link.may$Edadinicio_disca44)
link.may$Edadinicio_disca44 <- as.numeric(link.may$Edadinicio_disca44)
summary(link.may$Edadinicio_disca44)
link.may %>% count(Edadinicio_disca44>=65)
# check the other ages - entry to dependency
# ------------------------------------------
str(link.may$Edadinicio_cuidado)
link.may$Edadinicio_cuidado <- as.numeric(link.may$Edadinicio_cuidado)
summary(link.may$Edadinicio_cuidado)
link.may %>% count(Edadinicio_cuidado>=65)
# # possible end of disability
# link.may$EdadFinDiscal13 <- as.numeric(link.may$EdadFinDiscal13)
# summary(link.may$EdadFinDiscal13) # No hay nadie!
# --------------------------------------------
# save(link.may, file='010_mayor.link.RData')
# --------------------------------------------
# rm(ed.hog, ed.hog2, EDAD.mayor, follow.up)
|
# Copyright 2019 Battelle Memorial Institute; see the LICENSE file.
#' module_aglu_land_input_2_xml
#'
#' Construct XML data structure for \code{land_input_2.xml}.
#'
#' @param command API command to execute
#' @param ... other optional parameters, depending on command
#' @return Depends on \code{command}: either a vector of required inputs,
#' a vector of output names, or (if \code{command} is "MAKE") all
#' the generated outputs: \code{land_input_2.xml}. The corresponding file in the
#' original data system was \code{batch_land_input_2.xml.R} (aglu XML).
module_aglu_land_input_2_xml <- function(command, ...) {
if(command == driver.DECLARE_INPUTS) {
return(c("L222.LN2_Logit",
"L222.LN2_HistUnmgdAllocation",
"L222.LN2_UnmgdAllocation",
"L222.LN2_HistMgdAllocation",
"L222.LN2_MgdAllocation",
"L222.LN2_UnmgdCarbon",
"L222.LN2_MgdCarbon"))
} else if(command == driver.DECLARE_OUTPUTS) {
return(c(XML = "land_input_2.xml"))
} else if(command == driver.MAKE) {
all_data <- list(...)[[1]]
# Load required inputs
L222.LN2_Logit <- get_data(all_data, "L222.LN2_Logit")
L222.LN2_HistUnmgdAllocation <- get_data(all_data, "L222.LN2_HistUnmgdAllocation")
L222.LN2_UnmgdAllocation <- get_data(all_data, "L222.LN2_UnmgdAllocation")
L222.LN2_HistMgdAllocation <- get_data(all_data, "L222.LN2_HistMgdAllocation")
L222.LN2_MgdAllocation <- get_data(all_data, "L222.LN2_MgdAllocation")
L222.LN2_UnmgdCarbon <- get_data(all_data, "L222.LN2_UnmgdCarbon")
L222.LN2_MgdCarbon <- get_data(all_data, "L222.LN2_MgdCarbon")
# ===================================================
# Produce outputs
create_xml("land_input_2.xml") %>%
add_logit_tables_xml(L222.LN2_Logit, "LN2_Logit") %>%
add_xml_data(L222.LN2_HistUnmgdAllocation, "LN2_HistUnmgdAllocation") %>%
add_xml_data(L222.LN2_UnmgdAllocation, "LN2_UnmgdAllocation") %>%
add_xml_data(L222.LN2_HistMgdAllocation, "LN2_HistMgdAllocation") %>%
add_xml_data(L222.LN2_MgdAllocation, "LN2_MgdAllocation") %>%
add_xml_data(L222.LN2_UnmgdCarbon, "LN2_UnmgdCarbon") %>%
add_xml_data(L222.LN2_MgdCarbon, "LN2_MgdCarbon") %>%
add_rename_landnode_xml() %>%
add_precursors("L222.LN2_Logit", "L222.LN2_HistUnmgdAllocation", "L222.LN2_UnmgdAllocation", "L222.LN2_HistMgdAllocation", "L222.LN2_MgdAllocation", "L222.LN2_UnmgdCarbon", "L222.LN2_MgdCarbon") ->
land_input_2.xml
return_data(land_input_2.xml)
} else {
stop("Unknown command")
}
}
|
/input/gcamdata/R/zaglu_xml_land_input_2.R
|
permissive
|
JGCRI/gcam-core
|
R
| false
| false
| 2,592
|
r
|
# Copyright 2019 Battelle Memorial Institute; see the LICENSE file.
#' module_aglu_land_input_2_xml
#'
#' Construct XML data structure for \code{land_input_2.xml}.
#'
#' @param command API command to execute
#' @param ... other optional parameters, depending on command
#' @return Depends on \code{command}: either a vector of required inputs,
#' a vector of output names, or (if \code{command} is "MAKE") all
#' the generated outputs: \code{land_input_2.xml}. The corresponding file in the
#' original data system was \code{batch_land_input_2.xml.R} (aglu XML).
module_aglu_land_input_2_xml <- function(command, ...) {
if(command == driver.DECLARE_INPUTS) {
return(c("L222.LN2_Logit",
"L222.LN2_HistUnmgdAllocation",
"L222.LN2_UnmgdAllocation",
"L222.LN2_HistMgdAllocation",
"L222.LN2_MgdAllocation",
"L222.LN2_UnmgdCarbon",
"L222.LN2_MgdCarbon"))
} else if(command == driver.DECLARE_OUTPUTS) {
return(c(XML = "land_input_2.xml"))
} else if(command == driver.MAKE) {
all_data <- list(...)[[1]]
# Load required inputs
L222.LN2_Logit <- get_data(all_data, "L222.LN2_Logit")
L222.LN2_HistUnmgdAllocation <- get_data(all_data, "L222.LN2_HistUnmgdAllocation")
L222.LN2_UnmgdAllocation <- get_data(all_data, "L222.LN2_UnmgdAllocation")
L222.LN2_HistMgdAllocation <- get_data(all_data, "L222.LN2_HistMgdAllocation")
L222.LN2_MgdAllocation <- get_data(all_data, "L222.LN2_MgdAllocation")
L222.LN2_UnmgdCarbon <- get_data(all_data, "L222.LN2_UnmgdCarbon")
L222.LN2_MgdCarbon <- get_data(all_data, "L222.LN2_MgdCarbon")
# ===================================================
# Produce outputs
create_xml("land_input_2.xml") %>%
add_logit_tables_xml(L222.LN2_Logit, "LN2_Logit") %>%
add_xml_data(L222.LN2_HistUnmgdAllocation, "LN2_HistUnmgdAllocation") %>%
add_xml_data(L222.LN2_UnmgdAllocation, "LN2_UnmgdAllocation") %>%
add_xml_data(L222.LN2_HistMgdAllocation, "LN2_HistMgdAllocation") %>%
add_xml_data(L222.LN2_MgdAllocation, "LN2_MgdAllocation") %>%
add_xml_data(L222.LN2_UnmgdCarbon, "LN2_UnmgdCarbon") %>%
add_xml_data(L222.LN2_MgdCarbon, "LN2_MgdCarbon") %>%
add_rename_landnode_xml() %>%
add_precursors("L222.LN2_Logit", "L222.LN2_HistUnmgdAllocation", "L222.LN2_UnmgdAllocation", "L222.LN2_HistMgdAllocation", "L222.LN2_MgdAllocation", "L222.LN2_UnmgdCarbon", "L222.LN2_MgdCarbon") ->
land_input_2.xml
return_data(land_input_2.xml)
} else {
stop("Unknown command")
}
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/Plate_Layout.R
\name{Plate_Layout_count}
\alias{Plate_Layout_count}
\title{Return the number of wells}
\value{
the number of wells
}
\description{
Return the number of wells
}
\seealso{
Other PLATE_LAYOUT: \code{\link{Create_Plate_Layout}};
\code{\link{Plate_Layout_check_names}};
\code{\link{Plate_Layout_colnames}};
\code{\link{Plate_Layout_dimnames}};
\code{\link{Plate_Layout_dim}};
\code{\link{Plate_Layout_get_names}};
\code{\link{Plate_Layout_get_types}};
\code{\link{Plate_Layout_read_file}};
\code{\link{Plate_Layout_read_text}};
\code{\link{Plate_Layout_rownames}};
\code{\link{Plate_Layout_show}};
\code{\link{Plate_Layout_to_tecan}};
\code{\link{Plate_Layout_write_file}};
\code{\link{Plate_Layout}}
}
|
/man/Plate_Layout_count.Rd
|
no_license
|
BigelowLab/plateLayout
|
R
| false
| false
| 827
|
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/Plate_Layout.R
\name{Plate_Layout_count}
\alias{Plate_Layout_count}
\title{Return the number of wells}
\value{
the number of wells
}
\description{
Return the number of wells
}
\seealso{
Other PLATE_LAYOUT: \code{\link{Create_Plate_Layout}};
\code{\link{Plate_Layout_check_names}};
\code{\link{Plate_Layout_colnames}};
\code{\link{Plate_Layout_dimnames}};
\code{\link{Plate_Layout_dim}};
\code{\link{Plate_Layout_get_names}};
\code{\link{Plate_Layout_get_types}};
\code{\link{Plate_Layout_read_file}};
\code{\link{Plate_Layout_read_text}};
\code{\link{Plate_Layout_rownames}};
\code{\link{Plate_Layout_show}};
\code{\link{Plate_Layout_to_tecan}};
\code{\link{Plate_Layout_write_file}};
\code{\link{Plate_Layout}}
}
|
# Create the data
data <- Data(x=c(0.1, 0.5, 1.5, 3, 6, 10, 10, 10),
y=c(0, 0, 0, 0, 0, 0, 1, 0),
cohort=c(0, 1, 2, 3, 4, 5, 5, 5),
doseGrid=
c(0.1, 0.5, 1.5, 3, 6,
seq(from=10, to=80, by=2)))
# Initialize the CRM model used to model the data
model <- LogisticLogNormal(mean=c(-0.85, 1),
cov=
matrix(c(1, -0.5, -0.5, 1),
nrow=2),
refDose=56)
# Set-up some MCMC parameters and generate samples from the posterior
options <- McmcOptions(burnin=100,
step=2,
samples=2000)
set.seed(94)
samples <- mcmc(data, model, options)
# Define the rule for dose increments and calculate the maximum dose allowed
myIncrements <- IncrementsRelative(intervals=c(0, 20),
increments=c(1, 0.33))
nextMaxDose <- maxDose(myIncrements,
data=data)
# Define the rule which will be used to select the next best dose
# based on the class 'NextBestNCRM'
myNextBest <- NextBestNCRM(target=c(0.2, 0.35),
overdose=c(0.35, 1),
maxOverdoseProb=0.25)
# Calculate the next best dose
doseRecommendation <- nextBest(myNextBest,
doselimit=nextMaxDose,
samples=samples, model=model, data=data)
# Define the stopping rule such that the study would be stopped if at least 9
# patients were already dosed within 1 +/- 0.2 of the next best dose
myStopping <- StoppingPatientsNearDose(nPatients = 9,
percentage = 0.2)
# Evaluate if to stop the trial
stopTrial(stopping=myStopping,
dose=doseRecommendation$value,
data=data)
|
/examples/Rules-method-stopTrial-StoppingPatientsNearDose.R
|
no_license
|
insightsengineering/crmPack
|
R
| false
| false
| 1,858
|
r
|
# Create the data
data <- Data(x=c(0.1, 0.5, 1.5, 3, 6, 10, 10, 10),
y=c(0, 0, 0, 0, 0, 0, 1, 0),
cohort=c(0, 1, 2, 3, 4, 5, 5, 5),
doseGrid=
c(0.1, 0.5, 1.5, 3, 6,
seq(from=10, to=80, by=2)))
# Initialize the CRM model used to model the data
model <- LogisticLogNormal(mean=c(-0.85, 1),
cov=
matrix(c(1, -0.5, -0.5, 1),
nrow=2),
refDose=56)
# Set-up some MCMC parameters and generate samples from the posterior
options <- McmcOptions(burnin=100,
step=2,
samples=2000)
set.seed(94)
samples <- mcmc(data, model, options)
# Define the rule for dose increments and calculate the maximum dose allowed
myIncrements <- IncrementsRelative(intervals=c(0, 20),
increments=c(1, 0.33))
nextMaxDose <- maxDose(myIncrements,
data=data)
# Define the rule which will be used to select the next best dose
# based on the class 'NextBestNCRM'
myNextBest <- NextBestNCRM(target=c(0.2, 0.35),
overdose=c(0.35, 1),
maxOverdoseProb=0.25)
# Calculate the next best dose
doseRecommendation <- nextBest(myNextBest,
doselimit=nextMaxDose,
samples=samples, model=model, data=data)
# Define the stopping rule such that the study would be stopped if at least 9
# patients were already dosed within 1 +/- 0.2 of the next best dose
myStopping <- StoppingPatientsNearDose(nPatients = 9,
percentage = 0.2)
# Evaluate if to stop the trial
stopTrial(stopping=myStopping,
dose=doseRecommendation$value,
data=data)
|
## download data from url address for data file provided in assignment
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(url, "zipfile.zip")
## read data table into R
library(data.table)
data <- read.table(unz("zipfile.zip", "household_power_consumption.txt"), header = TRUE, sep = ";" ,na.strings = "?")
data$Date <- as.Date(data$Date,"%d/%m/%Y")
## create a subset of the data as defined in the assignment
subsetdata <- data[data$Date %between% c("2007-02-01", "2007-02-02"),]
## remove zip file; no longer needed
unlink("zipfile.zip")
## create Plot 3
subsetdata$DT <- as.POSIXct(as.character(paste(subsetdata$Date, subsetdata$Time)))
plot(x=subsetdata$DT,y=subsetdata$Sub_metering_1, type = "n", xlab = "", ylab = "Energy sub metering")
points(x=subsetdata$DT,subsetdata$Sub_metering_1, lines(x=subsetdata$DT,y=subsetdata$Sub_metering_1,col = "black"), pch = NA)
points(x=subsetdata$DT,subsetdata$Sub_metering_2, lines(x=subsetdata$DT,y=subsetdata$Sub_metering_2,col = "red"), pch = NA)
points(x=subsetdata$DT,subsetdata$Sub_metering_3, lines(x=subsetdata$DT,y=subsetdata$Sub_metering_3,col = "blue"), pch = NA)
legend("topright", col = c("black","red","blue"), legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), lty = 1)
## save as 480x480 PNG
dev.copy(png, file = "plot3.png", width = 480, height = 480)
dev.off()
|
/plot3.R
|
no_license
|
dmwieskeds/ExData_Plotting1
|
R
| false
| false
| 1,396
|
r
|
## download data from url address for data file provided in assignment
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(url, "zipfile.zip")
## read data table into R
library(data.table)
data <- read.table(unz("zipfile.zip", "household_power_consumption.txt"), header = TRUE, sep = ";" ,na.strings = "?")
data$Date <- as.Date(data$Date,"%d/%m/%Y")
## create a subset of the data as defined in the assignment
subsetdata <- data[data$Date %between% c("2007-02-01", "2007-02-02"),]
## remove zip file; no longer needed
unlink("zipfile.zip")
## create Plot 3
subsetdata$DT <- as.POSIXct(as.character(paste(subsetdata$Date, subsetdata$Time)))
plot(x=subsetdata$DT,y=subsetdata$Sub_metering_1, type = "n", xlab = "", ylab = "Energy sub metering")
points(x=subsetdata$DT,subsetdata$Sub_metering_1, lines(x=subsetdata$DT,y=subsetdata$Sub_metering_1,col = "black"), pch = NA)
points(x=subsetdata$DT,subsetdata$Sub_metering_2, lines(x=subsetdata$DT,y=subsetdata$Sub_metering_2,col = "red"), pch = NA)
points(x=subsetdata$DT,subsetdata$Sub_metering_3, lines(x=subsetdata$DT,y=subsetdata$Sub_metering_3,col = "blue"), pch = NA)
legend("topright", col = c("black","red","blue"), legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), lty = 1)
## save as 480x480 PNG
dev.copy(png, file = "plot3.png", width = 480, height = 480)
dev.off()
|
#:# libraries
library(digest)
library(mlr)
library(OpenML)
library(farff)
#:# config
set.seed(1)
#:# data
dataset <- getOMLDataSet(data.name = "blood-transfusion-service-center")
head(dataset$data)
#:# preprocessing
head(dataset$data)
#:# model
task = makeClassifTask(id = "task", data = dataset$data, target = "Class")
lrn = makeLearner("classif.C50", par.vals = list(trials = 1L, minCases = 2L), predict.type = "prob")
#:# hash
#:# 46a4d44f60c4a12d1d599236ec041a84
hash <- digest(list(task, lrn))
hash
#:# audit
cv <- makeResampleDesc("CV", iters = 5)
r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1))
ACC <- r$aggr
ACC
#:# session info
sink(paste0("sessionInfo.txt"))
sessionInfo()
sink()
|
/models/openml_blood-transfusion-service-center/classification_Class/46a4d44f60c4a12d1d599236ec041a84/code.R
|
no_license
|
pysiakk/CaseStudies2019S
|
R
| false
| false
| 729
|
r
|
#:# libraries
library(digest)
library(mlr)
library(OpenML)
library(farff)
#:# config
set.seed(1)
#:# data
dataset <- getOMLDataSet(data.name = "blood-transfusion-service-center")
head(dataset$data)
#:# preprocessing
head(dataset$data)
#:# model
task = makeClassifTask(id = "task", data = dataset$data, target = "Class")
lrn = makeLearner("classif.C50", par.vals = list(trials = 1L, minCases = 2L), predict.type = "prob")
#:# hash
#:# 46a4d44f60c4a12d1d599236ec041a84
hash <- digest(list(task, lrn))
hash
#:# audit
cv <- makeResampleDesc("CV", iters = 5)
r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1))
ACC <- r$aggr
ACC
#:# session info
sink(paste0("sessionInfo.txt"))
sessionInfo()
sink()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getLikesByMediaCode.R
\name{getLikesByMediaCode}
\alias{getLikesByMediaCode}
\title{Get Likes By Media Code}
\usage{
getLikesByMediaCode(code, n = 10, maxID = "", ...)
}
\arguments{
\item{code}{An Instagram shortcode for a media post}
\item{n}{The number of comments to return}
\item{maxID}{An identifier for a comment that indicates where to start searching}
\item{...}{Additional options passed to a shinyAppDir}
}
\value{
n x 7 dataframe - id, username,full_name,profile_pic_url, \cr
is_verified_followed_by_viewer,requested_by_viewer
}
\description{
Gets the first n likes for a media with a given Instagram shortcode
}
\examples{
\dontrun{ getLikesByMediaCode("W0IL2cujb3", 100)}
}
|
/man/getLikesByMediaCode.Rd
|
no_license
|
AFIT-R/instaExtract
|
R
| false
| true
| 769
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getLikesByMediaCode.R
\name{getLikesByMediaCode}
\alias{getLikesByMediaCode}
\title{Get Likes By Media Code}
\usage{
getLikesByMediaCode(code, n = 10, maxID = "", ...)
}
\arguments{
\item{code}{An Instagram shortcode for a media post}
\item{n}{The number of comments to return}
\item{maxID}{An identifier for a comment that indicates where to start searching}
\item{...}{Additional options passed to a shinyAppDir}
}
\value{
n x 7 dataframe - id, username,full_name,profile_pic_url, \cr
is_verified_followed_by_viewer,requested_by_viewer
}
\description{
Gets the first n likes for a media with a given Instagram shortcode
}
\examples{
\dontrun{ getLikesByMediaCode("W0IL2cujb3", 100)}
}
|
# plots the world contour lines in maps
data(World)
map_layer <- tm_shape(World) +
tm_borders() +
tm_format("World", earth.boundary = TRUE, frame = FALSE) +
tm_layout(between.margin = 0.1)
|
/5_day/2_multi_testing/R/lib/map_layer.R
|
no_license
|
giscience-fsu/daad_summerschool
|
R
| false
| false
| 198
|
r
|
# plots the world contour lines in maps
data(World)
map_layer <- tm_shape(World) +
tm_borders() +
tm_format("World", earth.boundary = TRUE, frame = FALSE) +
tm_layout(between.margin = 0.1)
|
library(ape)
testtree <- read.tree("6187_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="6187_0_unrooted.txt")
|
/codeml_files/newick_trees_processed/6187_0/rinput.R
|
no_license
|
DaniBoo/cyanobacteria_project
|
R
| false
| false
| 135
|
r
|
library(ape)
testtree <- read.tree("6187_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="6187_0_unrooted.txt")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.