content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
require("devtools")
install_github("tidymodels/textrecipes")
#We found vacationrentalslbi.com has an extensive list
#of rentals, and doesn’t restrict to the particular
#agency maintaining the listing, so this is the perfect
#place to gather our data. Naturally, we would like to
#be polite, so check that we are allowed to scrape.
#We can see from the R robotstxt library, which indicates
#that paths_allowed is TRUE, that we are good to go with
#our intended link.
# Robots.txt says okay to scrape
robotstxt::paths_allowed('https://www.vacationrentalslbi.com/search/for.rent/sleeps_min.4/')
robotstxt::paths_allowed('http://www.unirio.br/')
library(robotstxt)
paths_allowed("http://google.com/")
## [1] TRUE
paths_allowed("http://google.com/search")
## [1] FALSE
| /robotstxt.R | no_license | DATAUNIRIO/SER_III_WebScraping | R | false | false | 777 | r | require("devtools")
install_github("tidymodels/textrecipes")
#We found vacationrentalslbi.com has an extensive list
#of rentals, and doesn’t restrict to the particular
#agency maintaining the listing, so this is the perfect
#place to gather our data. Naturally, we would like to
#be polite, so check that we are allowed to scrape.
#We can see from the R robotstxt library, which indicates
#that paths_allowed is TRUE, that we are good to go with
#our intended link.
# Robots.txt says okay to scrape
robotstxt::paths_allowed('https://www.vacationrentalslbi.com/search/for.rent/sleeps_min.4/')
robotstxt::paths_allowed('http://www.unirio.br/')
library(robotstxt)
paths_allowed("http://google.com/")
## [1] TRUE
paths_allowed("http://google.com/search")
## [1] FALSE
|
\name{stepp.KM}
\alias{stepp.KM}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
The constructor to create the stmodelKM object
}
\description{
This is the constructor function for the stmodelKM object. This object sets up the data with
a stepp model using the Kaplan-Meier method for analysis.\cr
The model explores the treatment-covariate interactions in survival data arising
from two treatment arms of a clinical trial. The treatment effects are measured using survival
functions at a specified time point estimated from the Kaplan-Meier method and the hazard ratio
based on observed-minus-expected estimation. A permutation distribution approach to inference
is implemented, based on permuting the covariate values within each treatment group.
The statistical significance of observed heterogeneity of treatment effects is calculated using
permutation tests:\cr\cr
1) for the maximum difference between each subpopulation effect and the overall population
treatment effect or supremum based test statistic; \cr
2) for the difference between each subpopulation effect and the overall population treatment
effect, which resembles the chi-square statistic.
}
\usage{
stepp.KM(coltrt, survTime, censor, trts, timePoint)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{coltrt}{the treatment variable}
\item{survTime}{the time to event variable}
\item{censor}{the censor variable}
\item{trts}{a vector containing the codes for the 2 treatment arms, 1st and 2nd treatment groups, respectively}
\item{timePoint}{timepoint to estimate survival}
}
\value{
It returns the stmodelKM object.
}
\author{
Wai-Ki Yip
}
\references{
Bonetti M, Gelber RD. Patterns of treatment effects in subsets of patients in clinical trials. Biostatistics 2004; 5(3):465-481.
Bonetti M, Zahrieh D, Cole BF, Gelber RD. A small sample study of the STEPP approach to assessing treatment-covariate interactions in survival data. Statistics in Medicine 2009; 28(8):1255-68.
}
\seealso{
\code{\linkS4class{stwin}}, \code{\linkS4class{stsubpop}}, \code{\linkS4class{stmodelKM}},
\code{\linkS4class{stmodelCI}}, \code{\linkS4class{stmodelGLM}},
\code{\linkS4class{steppes}}, \code{\linkS4class{stmodel}},
\code{\link{stepp.win}}, \code{\link{stepp.subpop}},
\code{\link{stepp.CI}}, \code{\link{stepp.GLM}},
\code{\link{stepp.test}}, \code{\link{estimate}}, \code{\link{generate}}
}
\examples{
#GENERATE TREATMENT VARIABLE:
N <- 1000
Txassign <- sample(c(1,2), N, replace=TRUE, prob=c(1/2, 1/2))
n1 <- length(Txassign[Txassign==1])
n2 <- N - n1
#GENERATE A COVARIATE:
covariate <- rnorm(N, 55, 7)
#GENERATE SURVIVAL AND CENSORING VARIABLES ASSUMING A TREATMENT COVARIATE INTERACTION:
Entry <- sort( runif(N, 0, 5) )
SurvT1 <- .5
beta0 <- -65 / 75
beta1 <- 2 / 75
Surv <- rep(0, N)
lambda1 <- -log(SurvT1) / 4
Surv[Txassign==1] <- rexp(n1, lambda1)
Surv[Txassign==2] <- rexp(n2, (lambda1*(beta0+beta1*covariate[Txassign==2])))
EventTimes <- rep(0, N)
EventTimes <- Entry + Surv
censor <- rep(0, N)
time <- rep(0,N)
for ( i in 1:N )
{
censor[i] <- ifelse( EventTimes[i] <= 7, 1, 0 )
time[i] <- ifelse( EventTimes[i] < 7, Surv[i], 7 - Entry[i] )
}
modKM <- stepp.KM( coltrt=Txassign, survTime=time, censor=censor, trts=c(1,2), timePoint=4)
} | /man/stepp.KM.Rd | no_license | cran/stepp | R | false | false | 3,400 | rd | \name{stepp.KM}
\alias{stepp.KM}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
The constructor to create the stmodelKM object
}
\description{
This is the constructor function for the stmodelKM object. This object sets up the data with
a stepp model using the Kaplan-Meier method for analysis.\cr
The model explores the treatment-covariate interactions in survival data arising
from two treatment arms of a clinical trial. The treatment effects are measured using survival
functions at a specified time point estimated from the Kaplan-Meier method and the hazard ratio
based on observed-minus-expected estimation. A permutation distribution approach to inference
is implemented, based on permuting the covariate values within each treatment group.
The statistical significance of observed heterogeneity of treatment effects is calculated using
permutation tests:\cr\cr
1) for the maximum difference between each subpopulation effect and the overall population
treatment effect or supremum based test statistic; \cr
2) for the difference between each subpopulation effect and the overall population treatment
effect, which resembles the chi-square statistic.
}
\usage{
stepp.KM(coltrt, survTime, censor, trts, timePoint)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{coltrt}{the treatment variable}
\item{survTime}{the time to event variable}
\item{censor}{the censor variable}
\item{trts}{a vector containing the codes for the 2 treatment arms, 1st and 2nd treatment groups, respectively}
\item{timePoint}{timepoint to estimate survival}
}
\value{
It returns the stmodelKM object.
}
\author{
Wai-Ki Yip
}
\references{
Bonetti M, Gelber RD. Patterns of treatment effects in subsets of patients in clinical trials. Biostatistics 2004; 5(3):465-481.
Bonetti M, Zahrieh D, Cole BF, Gelber RD. A small sample study of the STEPP approach to assessing treatment-covariate interactions in survival data. Statistics in Medicine 2009; 28(8):1255-68.
}
\seealso{
\code{\linkS4class{stwin}}, \code{\linkS4class{stsubpop}}, \code{\linkS4class{stmodelKM}},
\code{\linkS4class{stmodelCI}}, \code{\linkS4class{stmodelGLM}},
\code{\linkS4class{steppes}}, \code{\linkS4class{stmodel}},
\code{\link{stepp.win}}, \code{\link{stepp.subpop}},
\code{\link{stepp.CI}}, \code{\link{stepp.GLM}},
\code{\link{stepp.test}}, \code{\link{estimate}}, \code{\link{generate}}
}
\examples{
#GENERATE TREATMENT VARIABLE:
N <- 1000
Txassign <- sample(c(1,2), N, replace=TRUE, prob=c(1/2, 1/2))
n1 <- length(Txassign[Txassign==1])
n2 <- N - n1
#GENERATE A COVARIATE:
covariate <- rnorm(N, 55, 7)
#GENERATE SURVIVAL AND CENSORING VARIABLES ASSUMING A TREATMENT COVARIATE INTERACTION:
Entry <- sort( runif(N, 0, 5) )
SurvT1 <- .5
beta0 <- -65 / 75
beta1 <- 2 / 75
Surv <- rep(0, N)
lambda1 <- -log(SurvT1) / 4
Surv[Txassign==1] <- rexp(n1, lambda1)
Surv[Txassign==2] <- rexp(n2, (lambda1*(beta0+beta1*covariate[Txassign==2])))
EventTimes <- rep(0, N)
EventTimes <- Entry + Surv
censor <- rep(0, N)
time <- rep(0,N)
for ( i in 1:N )
{
censor[i] <- ifelse( EventTimes[i] <= 7, 1, 0 )
time[i] <- ifelse( EventTimes[i] < 7, Surv[i], 7 - Entry[i] )
}
modKM <- stepp.KM( coltrt=Txassign, survTime=time, censor=censor, trts=c(1,2), timePoint=4)
} |
packages <- c("tidyverse", "here", "rstudioapi", "MASS", "actuar", "lubridate", "readr", "readxl", "haven")
new_packages <- packages[!(packages %in% installed.packages()[,"Package"])]
if(length(new_packages)) install.packages(new_packages)
if(sum(!(packages %in% installed.packages()[, "Package"]))) {
stop(paste('The following required packages are not installed:\n',
paste(packages[which(!(packages %in% installed.packages()[, "Package"]))], collapse = ', ')));
} else {
message("Everything is set up correctly. You are ready to go.")
} | /scripts/installation-instructions.R | no_license | katrienantonio/werkt-U-al-met-R | R | false | false | 558 | r | packages <- c("tidyverse", "here", "rstudioapi", "MASS", "actuar", "lubridate", "readr", "readxl", "haven")
new_packages <- packages[!(packages %in% installed.packages()[,"Package"])]
if(length(new_packages)) install.packages(new_packages)
if(sum(!(packages %in% installed.packages()[, "Package"]))) {
stop(paste('The following required packages are not installed:\n',
paste(packages[which(!(packages %in% installed.packages()[, "Package"]))], collapse = ', ')));
} else {
message("Everything is set up correctly. You are ready to go.")
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/eff_summary.R
\name{eff_suggest}
\alias{eff_suggest}
\title{Suggest the input \code{eff.structure} of function \code{SimPRMD} with
selected \code{eff.sd_tran}}
\usage{
eff_suggest(eff.M, eff.sd, eff.sd_trans, n.sim = 30000)
}
\arguments{
\item{eff.M}{The efficacy mean matrix whose (i,j)th element save the
target mean of the efficacy data}
\item{eff.sd}{The target standard deviation matrix for all dose and
cycles. Notice that the generated efficacy may have different standard
deviation matrix due to the correlations across cycles}
\item{eff.sd_trans}{The eff.sd_trans for test. Notice variance of the
generated efficacy data will be effected by \code{eff.sd_trans}.}
\item{n.sim}{The number of simulations for the numerical calculation in
the function. The default is 30,000}
}
\value{
\item{eff.suggest}{The matrix suggested for the input
\code{eff.structure} of function \code{SimPRMD}}
}
\description{
Suggest the input \code{eff.structure} of function SimPRMD with selected
\code{eff.sd_tran} for given efficacy mean matrix and efficacy standard
deviation
}
\examples{
# Provide an target efficacy mean matrix for all dose and cycles
eff.M <- matrix(rep(3:8/10, 6), nrow = 6, ncol = 6)
# Give a target standard deviation matrix for all dose and cycles
# Notice that the generated efficacy may have difference standard deviation
# matrix due to the correlations across cycles
eff.sd <- matrix(0.2, nrow = 6, ncol = 6)
# Select a eff.sd_trans for testing. The efficacy variance are mainly
# controlled by the eff.sd_trans
eff.sd_trans <- 1.5 # or other positive value
eff.structure <- eff_suggest(eff.M = eff.M, eff.sd = eff.sd,
eff.sd_trans = eff.sd_trans)
# check whether the suggested eff.M and the selected sd_trans
# generate the desirable scenario
eff.Sigma <- diag(6)
diag(eff.Sigma[-1,]) = 0.5
diag(eff.Sigma[, -1]) = 0.5
eff.check <- eff_summary(eff.structure = eff.structure,
eff.Sigma = eff.Sigma,
eff.sd_trans = eff.sd_trans,
plot.flag = FALSE)
eff.check$eff.M
eff.check$eff.cor.ls
}
| /man/eff_suggest.Rd | no_license | LuZhangstat/phase1PRMD | R | false | true | 2,197 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/eff_summary.R
\name{eff_suggest}
\alias{eff_suggest}
\title{Suggest the input \code{eff.structure} of function \code{SimPRMD} with
selected \code{eff.sd_tran}}
\usage{
eff_suggest(eff.M, eff.sd, eff.sd_trans, n.sim = 30000)
}
\arguments{
\item{eff.M}{The efficacy mean matrix whose (i,j)th element save the
target mean of the efficacy data}
\item{eff.sd}{The target standard deviation matrix for all dose and
cycles. Notice that the generated efficacy may have different standard
deviation matrix due to the correlations across cycles}
\item{eff.sd_trans}{The eff.sd_trans for test. Notice variance of the
generated efficacy data will be effected by \code{eff.sd_trans}.}
\item{n.sim}{The number of simulations for the numerical calculation in
the function. The default is 30,000}
}
\value{
\item{eff.suggest}{The matrix suggested for the input
\code{eff.structure} of function \code{SimPRMD}}
}
\description{
Suggest the input \code{eff.structure} of function SimPRMD with selected
\code{eff.sd_tran} for given efficacy mean matrix and efficacy standard
deviation
}
\examples{
# Provide an target efficacy mean matrix for all dose and cycles
eff.M <- matrix(rep(3:8/10, 6), nrow = 6, ncol = 6)
# Give a target standard deviation matrix for all dose and cycles
# Notice that the generated efficacy may have difference standard deviation
# matrix due to the correlations across cycles
eff.sd <- matrix(0.2, nrow = 6, ncol = 6)
# Select a eff.sd_trans for testing. The efficacy variance are mainly
# controlled by the eff.sd_trans
eff.sd_trans <- 1.5 # or other positive value
eff.structure <- eff_suggest(eff.M = eff.M, eff.sd = eff.sd,
eff.sd_trans = eff.sd_trans)
# check whether the suggested eff.M and the selected sd_trans
# generate the desirable scenario
eff.Sigma <- diag(6)
diag(eff.Sigma[-1,]) = 0.5
diag(eff.Sigma[, -1]) = 0.5
eff.check <- eff_summary(eff.structure = eff.structure,
eff.Sigma = eff.Sigma,
eff.sd_trans = eff.sd_trans,
plot.flag = FALSE)
eff.check$eff.M
eff.check$eff.cor.ls
}
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/MFCLclasses.r
\docType{class}
\name{MFCLFrq-class}
\alias{MFCLFrq-class}
\title{An S4 class : Representation of a frq input file for MFCL}
\description{
A class comprising an MFCLFrqStats object and an MFCLLenFrq object
}
| /man/MFCLFrq-class.Rd | no_license | lauratboyer/FLR4MFCL | R | false | false | 309 | rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/MFCLclasses.r
\docType{class}
\name{MFCLFrq-class}
\alias{MFCLFrq-class}
\title{An S4 class : Representation of a frq input file for MFCL}
\description{
A class comprising an MFCLFrqStats object and an MFCLLenFrq object
}
|
# Sometimes data requires more complex storage than simple vectors
# Data Structures - Apart from Vectors, we have Data Frames, Matrix, List and Array.
# Data Frames(DF) - Most useful features of R & also cited reason for R's ease of use.
# In dataframe, each column is actually a vector, each of which has same length.
# Each column can hold different type of data.
# Also within each column, each element must be of same type, like vectors.
# Data frames
# Creating a data frame from vectors #STr=structure
x= 10:1
y =-4:5
q = c("x","y","z","m","n","o","a","b","c","d")
theDF = data.frame(x,y,q) # this would create a 10*3 data.frame with x,y,q as variable names
theDF
str(theDF)
q = as.factor(q)
#Assigning Names
theDF= data.frame(first=x,second=y,Sports=q)
str(theDF)
# Checking the dimension of the DF.
nrow(theDF)
ncol(theDF)
dim((theDF))
names(theDF)
names(theDF)[3]
rownames(theDF) #Indexing
#Head and tail
head(theDF)
head(theDF, n=10)
tail(theDF)
class(theDF)
# Accessing Individual column using $
theDF$Sports #gives the coloumn named sports
#Accessing specific row and column
theDF[3,2] #3rd row & 2nd column
theDF[3, 2:3] #3rd row & 2nd through 3rd column
theDF[c(3,5), 2] #row 3&5 from column 2;
#since only one column was selected, it was returned as vector and hence no column name in output
# Rows 3&5 and Columns 2 through 3
theDF[c(3,5), 2:3]
theDF[ ,3] # Access all Rows for column 3
theDF[ , 2:3]
theDF[2,]# Access all columns for Row 2
theDF[2:4,]
theDF[ , c("First", "Sport")]# access using Column Names
theDF[ ,"Sport"]# Access specific Column
class(theDF[ ,"Sport"])
theDF["Sport"]# This returns the one column data.frame
class(theDF["Sport"]) # Data.Frame
theDF[["Sport"]]#To access Specific column using Double Square Brackets
class(theDF[["Sport"]]) # Factor
theDF[ ,"Sport", drop = FALSE]# Use "Drop=FALSE" to get data.fame with single sqaure bracket.
class(theDF[ ,"Sport", drop = FALSE]) # data.frame
theDF[ ,3, drop = FALSE]
class(theDF[ ,3, drop = FALSE]) # data.frame
#Accessinto see how factor is stored in data.frame
| /4.2- Data Frames- 04-12-2020.R | no_license | meetgarg28/DSA-ASSIGNMENT-MEET-GARG | R | false | false | 2,143 | r | # Sometimes data requires more complex storage than simple vectors
# Data Structures - Apart from Vectors, we have Data Frames, Matrix, List and Array.
# Data Frames(DF) - Most useful features of R & also cited reason for R's ease of use.
# In dataframe, each column is actually a vector, each of which has same length.
# Each column can hold different type of data.
# Also within each column, each element must be of same type, like vectors.
# Data frames
# Creating a data frame from vectors #STr=structure
x= 10:1
y =-4:5
q = c("x","y","z","m","n","o","a","b","c","d")
theDF = data.frame(x,y,q) # this would create a 10*3 data.frame with x,y,q as variable names
theDF
str(theDF)
q = as.factor(q)
#Assigning Names
theDF= data.frame(first=x,second=y,Sports=q)
str(theDF)
# Checking the dimension of the DF.
nrow(theDF)
ncol(theDF)
dim((theDF))
names(theDF)
names(theDF)[3]
rownames(theDF) #Indexing
#Head and tail
head(theDF)
head(theDF, n=10)
tail(theDF)
class(theDF)
# Accessing Individual column using $
theDF$Sports #gives the coloumn named sports
#Accessing specific row and column
theDF[3,2] #3rd row & 2nd column
theDF[3, 2:3] #3rd row & 2nd through 3rd column
theDF[c(3,5), 2] #row 3&5 from column 2;
#since only one column was selected, it was returned as vector and hence no column name in output
# Rows 3&5 and Columns 2 through 3
theDF[c(3,5), 2:3]
theDF[ ,3] # Access all Rows for column 3
theDF[ , 2:3]
theDF[2,]# Access all columns for Row 2
theDF[2:4,]
theDF[ , c("First", "Sport")]# access using Column Names
theDF[ ,"Sport"]# Access specific Column
class(theDF[ ,"Sport"])
theDF["Sport"]# This returns the one column data.frame
class(theDF["Sport"]) # Data.Frame
theDF[["Sport"]]#To access Specific column using Double Square Brackets
class(theDF[["Sport"]]) # Factor
theDF[ ,"Sport", drop = FALSE]# Use "Drop=FALSE" to get data.fame with single sqaure bracket.
class(theDF[ ,"Sport", drop = FALSE]) # data.frame
theDF[ ,3, drop = FALSE]
class(theDF[ ,3, drop = FALSE]) # data.frame
#Accessinto see how factor is stored in data.frame
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hmi_smallfunctions.R
\name{tail.interval}
\alias{tail.interval}
\title{Tail for intervals}
\usage{
\method{tail}{interval}(x, ...)
}
\arguments{
\item{x}{vector, matrix, table, data.frame or interval object}
\item{...}{further arguments passed to \code{tail}.}
}
\description{
Tail function for intervals returning the last elements of an \code{interval} object
}
| /man/tail.interval.Rd | no_license | cran/hmi | R | false | true | 459 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hmi_smallfunctions.R
\name{tail.interval}
\alias{tail.interval}
\title{Tail for intervals}
\usage{
\method{tail}{interval}(x, ...)
}
\arguments{
\item{x}{vector, matrix, table, data.frame or interval object}
\item{...}{further arguments passed to \code{tail}.}
}
\description{
Tail function for intervals returning the last elements of an \code{interval} object
}
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
sapplymakeCacheMatrix <- function(x = matrix()) {
## set the matrix
## get the matrix
## set the inverse
## get the inverse
## this list is used as the input to cacheSolve()
inv_x <- NULL
set <- function(y) {
## use "<<-" to assign a value to an object in an environment
x <<- y
inv_x <<- NULL
}
get <- function() x
setinse<- function(inse) inv_x <<-inse
getinse <- function() inv_x
list(set = set, get = get,
setinse = setinse,
getinse = getinse)
}
## Write a short comment describing this function
## This function computes the inverse of the special "matrix" created by
## makeCacheMatrix above.
cacheSolve <- function(x, ...) {
inv_x <- x$getinse()
if (!is.null(inv_x)) {
## get it from the cache and skips the computation.
message("getting cached inverse matrix")
return(inv_x)
} else {
inv_x <- solve(x$get())
## sets the value of the inverse in the cache via the setinv function.
x$setinse(inv_x)
return(inv_x)
}
}
| /cachematrix.R | no_license | bxjaj/Programming-Assignment-2- | R | false | false | 1,094 | r | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
sapplymakeCacheMatrix <- function(x = matrix()) {
## set the matrix
## get the matrix
## set the inverse
## get the inverse
## this list is used as the input to cacheSolve()
inv_x <- NULL
set <- function(y) {
## use "<<-" to assign a value to an object in an environment
x <<- y
inv_x <<- NULL
}
get <- function() x
setinse<- function(inse) inv_x <<-inse
getinse <- function() inv_x
list(set = set, get = get,
setinse = setinse,
getinse = getinse)
}
## Write a short comment describing this function
## This function computes the inverse of the special "matrix" created by
## makeCacheMatrix above.
cacheSolve <- function(x, ...) {
inv_x <- x$getinse()
if (!is.null(inv_x)) {
## get it from the cache and skips the computation.
message("getting cached inverse matrix")
return(inv_x)
} else {
inv_x <- solve(x$get())
## sets the value of the inverse in the cache via the setinv function.
x$setinse(inv_x)
return(inv_x)
}
}
|
library(psych)
library(Matrix)
args <- commandArgs(trailingOnly = TRUE)
M<-as.matrix(read.table(file=paste(args[2],"",sep=""), header = FALSE, sep = ","))
N<-as.matrix(read.table(file=paste(args[2],"",sep=""), header = FALSE, sep = ","))
path <- paste('results.out', sep='')
times <- double(5)
for (iter in 1:5) {
start <- Sys.time()
res = sum(t(colSums(M))*rowSums(N))
end <- Sys.time()
print(res)
times[iter] <- as.numeric(end-start, units="secs")
}
writeHeader <- if (!file.exists(path)) TRUE else FALSE
write.table(paste("P.13RW:",args[1],sep=""),
path,
append = TRUE,
row.names = FALSE,
col.names = writeHeader,
sep = ',')
writeHeader <- if (!file.exists(path)) TRUE else FALSE
write.table(times,
path,
append = TRUE,
row.names = FALSE,
col.names = writeHeader,
sep = ',')
writeHeader <- if (!file.exists(path)) TRUE else FALSE
write.table("\n",
path,
append = TRUE,
row.names = FALSE,
col.names = writeHeader,
sep = ',') | /scripts/LAPipe/R/PART1/P1.13_S-S-RW.R | no_license | hadad-paper/HADAD_SIGMOD2021 | R | false | false | 1,145 | r | library(psych)
library(Matrix)
args <- commandArgs(trailingOnly = TRUE)
M<-as.matrix(read.table(file=paste(args[2],"",sep=""), header = FALSE, sep = ","))
N<-as.matrix(read.table(file=paste(args[2],"",sep=""), header = FALSE, sep = ","))
path <- paste('results.out', sep='')
times <- double(5)
for (iter in 1:5) {
start <- Sys.time()
res = sum(t(colSums(M))*rowSums(N))
end <- Sys.time()
print(res)
times[iter] <- as.numeric(end-start, units="secs")
}
writeHeader <- if (!file.exists(path)) TRUE else FALSE
write.table(paste("P.13RW:",args[1],sep=""),
path,
append = TRUE,
row.names = FALSE,
col.names = writeHeader,
sep = ',')
writeHeader <- if (!file.exists(path)) TRUE else FALSE
write.table(times,
path,
append = TRUE,
row.names = FALSE,
col.names = writeHeader,
sep = ',')
writeHeader <- if (!file.exists(path)) TRUE else FALSE
write.table("\n",
path,
append = TRUE,
row.names = FALSE,
col.names = writeHeader,
sep = ',') |
#' List the names of all visual styles
#'
#' @param base.url cyrest base url for communicating with cytoscape
#' @return network viewid
#' @export
#' @import stringr
listStyles <- function(base.url='http://localhost:1234/v1'){
get.styles.url <- paste(base.url,"styles",sep="/")
response <- GET(url=get.styles.url)
res.styles <- unname(fromJSON(rawToChar(response$content)))
return(res.styles)
}
| /for-scripters/R/r2cytoscape/R/listStyles.R | permissive | mpg-age-bioinformatics/cytoscape-automation | R | false | false | 418 | r | #' List the names of all visual styles
#'
#' @param base.url cyrest base url for communicating with cytoscape
#' @return network viewid
#' @export
#' @import stringr
listStyles <- function(base.url='http://localhost:1234/v1'){
get.styles.url <- paste(base.url,"styles",sep="/")
response <- GET(url=get.styles.url)
res.styles <- unname(fromJSON(rawToChar(response$content)))
return(res.styles)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getData.R
\name{search_analytics}
\alias{search_analytics}
\title{Query search traffic keyword data}
\usage{
search_analytics(siteURL, startDate = Sys.Date() - 93,
endDate = Sys.Date() - 3, dimensions = NULL, searchType = c("web",
"video", "image"), dimensionFilterExp = NULL,
aggregationType = c("auto", "byPage", "byProperty"), rowLimit = 1000,
prettyNames = TRUE, walk_data = c("byBatch", "byDate", "none"))
}
\arguments{
\item{siteURL}{The URL of the website you have auth access to.}
\item{startDate}{Start date of requested range, in YYYY-MM-DD.}
\item{endDate}{End date of the requested date range, in YYYY-MM-DD.}
\item{dimensions}{Zero or more dimensions to group results by:
\code{"date", "country", "device", "page" , "query" or "searchAppearance"}}
\item{searchType}{Search type filter, default 'web'.}
\item{dimensionFilterExp}{A character vector of expressions to filter.
e.g. \code{("device==TABLET", "country~~GBR")}}
\item{aggregationType}{How data is aggregated.}
\item{rowLimit}{How many rows to fetch. Ignored if \code{walk_data} is "byDate"}
\item{prettyNames}{If TRUE, converts SO 3166-1 alpha-3 country code to full name and
creates new column called countryName.}
\item{walk_data}{Make multiple API calls. One of \code{("byBatch","byDate","none")}}
}
\value{
A dataframe with columns in order of dimensions plus metrics, with attribute "aggregationType"
}
\description{
Download your Google SEO data.
}
\details{
\strong{startDate}: Start date of the requested date range, in YYYY-MM-DD format,
in PST time (UTC - 8:00). Must be less than or equal to the end date.
This value is included in the range.
\strong{endDate}: End date of the requested date range, in YYYY-MM-DD format,
in PST time (UTC - 8:00). Must be greater than or equal to the start date.
This value is included in the range.
\strong{dimensions}: [Optional] Zero or more dimensions to group results by.
\itemize{
\item 'date'
\item 'country'
\item 'device'
\item 'page'
\item 'query'
\item 'searchAppearance' (can only appear on its own)
}
The grouping dimension values are combined to create a unique key
for each result row. If no dimensions are specified,
all values will be combined into a single row.
There is no limit to the number of dimensions that you can group by apart from \code{searchAppearance} can only be grouped alone.
You cannot group by the same dimension twice.
Example: \code{c('country', 'device')}
\strong{dimensionFilterExp}:
Results are grouped in the order that you supply these dimensions.
dimensionFilterExp expects a character vector of expressions in the form:
("device==TABLET", "country~~GBR", "dimension operator expression")
\itemize{
\item dimension
\itemize{
\item 'country'
\item 'device'
\item 'page'
\item 'query'
\item 'searchAppearance'
}
\item operator
\itemize{
\item '~~' meaning 'contains'
\item '==' meaning 'equals'
\item '!~' meaning 'notContains'
\item '!=' meaning 'notEquals
}
\item expression
\itemize{
\item country: an ISO 3166-1 alpha-3 country code.
\item device: 'DESKTOP','MOBILE','TABLET'.
\item page: not checked, a string in page URLs without hostname.
\item query: not checked, a string in keywords.
\item searchAppearance: 'AMP_BLUE_LINK', 'RICHCARD'
}
}
\strong{searchType}: [Optional] The search type to filter for. Acceptable values are:
\itemize{
\item "web": [Default] Web search results
\item "image": Image search results
\item "video": Video search results
}
\strong{aggregationType}: [Optional] How data is aggregated.
\itemize{
\item If aggregated by property, all data for the same property is aggregated;
\item If aggregated by page, all data is aggregated by canonical URI.
\item If you filter or group by page, choose auto; otherwise you can aggregate either by property or by page, depending on how you want your data calculated;
}
See the API documentation to learn how data is calculated differently by site versus by page.
Note: If you group or filter by page, you cannot aggregate by property.
If you specify any value other than auto, the aggregation type in the result will match the requested type, or if you request an invalid type, you will get an error.
The API will never change your aggregation type if the requested type is invalid.
Acceptable values are:
\itemize{
\item "auto": [Default] Let the service decide the appropriate aggregation type.
\item "byPage": Aggregate values by URI.
\item "byProperty": Aggregate values by property.
}
\strong{batchType}: [Optional] Batching data into multiple API calls
\itemize{
\item byBatch Use the API call to batch
\item byData Runs a call over each day in the date range.
\item none No batching
}
}
\examples{
\dontrun{
library(searchConsoleR)
scr_auth()
sc_websites <- list_websites()
default_fetch <- search_analytics("http://www.example.com")
gbr_desktop_queries <-
search_analytics("http://www.example.com",
start = "2016-01-01", end = "2016-03-01",
dimensions = c("query", "page"),
dimensionFilterExp = c("device==DESKTOP", "country==GBR"),
searchType = "web", rowLimit = 100)
batching <-
search_analytics("http://www.example.com",
start = "2016-01-01", end = "2016-03-01",
dimensions = c("query", "page", "date"),
searchType = "web", rowLimit = 100000,
walk_data = "byBatch")
}
}
\seealso{
Guide to Search Analytics: \url{https://support.google.com/webmasters/answer/6155685}
API docs: \url{https://developers.google.com/webmaster-tools/v3/searchanalytics/query}
}
| /man/search_analytics.Rd | no_license | cran/searchConsoleR | R | false | true | 6,081 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getData.R
\name{search_analytics}
\alias{search_analytics}
\title{Query search traffic keyword data}
\usage{
search_analytics(siteURL, startDate = Sys.Date() - 93,
endDate = Sys.Date() - 3, dimensions = NULL, searchType = c("web",
"video", "image"), dimensionFilterExp = NULL,
aggregationType = c("auto", "byPage", "byProperty"), rowLimit = 1000,
prettyNames = TRUE, walk_data = c("byBatch", "byDate", "none"))
}
\arguments{
\item{siteURL}{The URL of the website you have auth access to.}
\item{startDate}{Start date of requested range, in YYYY-MM-DD.}
\item{endDate}{End date of the requested date range, in YYYY-MM-DD.}
\item{dimensions}{Zero or more dimensions to group results by:
\code{"date", "country", "device", "page" , "query" or "searchAppearance"}}
\item{searchType}{Search type filter, default 'web'.}
\item{dimensionFilterExp}{A character vector of expressions to filter.
e.g. \code{("device==TABLET", "country~~GBR")}}
\item{aggregationType}{How data is aggregated.}
\item{rowLimit}{How many rows to fetch. Ignored if \code{walk_data} is "byDate"}
\item{prettyNames}{If TRUE, converts SO 3166-1 alpha-3 country code to full name and
creates new column called countryName.}
\item{walk_data}{Make multiple API calls. One of \code{("byBatch","byDate","none")}}
}
\value{
A dataframe with columns in order of dimensions plus metrics, with attribute "aggregationType"
}
\description{
Download your Google SEO data.
}
\details{
\strong{startDate}: Start date of the requested date range, in YYYY-MM-DD format,
in PST time (UTC - 8:00). Must be less than or equal to the end date.
This value is included in the range.
\strong{endDate}: End date of the requested date range, in YYYY-MM-DD format,
in PST time (UTC - 8:00). Must be greater than or equal to the start date.
This value is included in the range.
\strong{dimensions}: [Optional] Zero or more dimensions to group results by.
\itemize{
\item 'date'
\item 'country'
\item 'device'
\item 'page'
\item 'query'
\item 'searchAppearance' (can only appear on its own)
}
The grouping dimension values are combined to create a unique key
for each result row. If no dimensions are specified,
all values will be combined into a single row.
There is no limit to the number of dimensions that you can group by apart from \code{searchAppearance} can only be grouped alone.
You cannot group by the same dimension twice.
Example: \code{c('country', 'device')}
\strong{dimensionFilterExp}:
Results are grouped in the order that you supply these dimensions.
dimensionFilterExp expects a character vector of expressions in the form:
("device==TABLET", "country~~GBR", "dimension operator expression")
\itemize{
\item dimension
\itemize{
\item 'country'
\item 'device'
\item 'page'
\item 'query'
\item 'searchAppearance'
}
\item operator
\itemize{
\item '~~' meaning 'contains'
\item '==' meaning 'equals'
\item '!~' meaning 'notContains'
\item '!=' meaning 'notEquals
}
\item expression
\itemize{
\item country: an ISO 3166-1 alpha-3 country code.
\item device: 'DESKTOP','MOBILE','TABLET'.
\item page: not checked, a string in page URLs without hostname.
\item query: not checked, a string in keywords.
\item searchAppearance: 'AMP_BLUE_LINK', 'RICHCARD'
}
}
\strong{searchType}: [Optional] The search type to filter for. Acceptable values are:
\itemize{
\item "web": [Default] Web search results
\item "image": Image search results
\item "video": Video search results
}
\strong{aggregationType}: [Optional] How data is aggregated.
\itemize{
\item If aggregated by property, all data for the same property is aggregated;
\item If aggregated by page, all data is aggregated by canonical URI.
\item If you filter or group by page, choose auto; otherwise you can aggregate either by property or by page, depending on how you want your data calculated;
}
See the API documentation to learn how data is calculated differently by site versus by page.
Note: If you group or filter by page, you cannot aggregate by property.
If you specify any value other than auto, the aggregation type in the result will match the requested type, or if you request an invalid type, you will get an error.
The API will never change your aggregation type if the requested type is invalid.
Acceptable values are:
\itemize{
\item "auto": [Default] Let the service decide the appropriate aggregation type.
\item "byPage": Aggregate values by URI.
\item "byProperty": Aggregate values by property.
}
\strong{batchType}: [Optional] Batching data into multiple API calls
\itemize{
\item byBatch Use the API call to batch
\item byData Runs a call over each day in the date range.
\item none No batching
}
}
\examples{
\dontrun{
library(searchConsoleR)
scr_auth()
sc_websites <- list_websites()
default_fetch <- search_analytics("http://www.example.com")
gbr_desktop_queries <-
search_analytics("http://www.example.com",
start = "2016-01-01", end = "2016-03-01",
dimensions = c("query", "page"),
dimensionFilterExp = c("device==DESKTOP", "country==GBR"),
searchType = "web", rowLimit = 100)
batching <-
search_analytics("http://www.example.com",
start = "2016-01-01", end = "2016-03-01",
dimensions = c("query", "page", "date"),
searchType = "web", rowLimit = 100000,
walk_data = "byBatch")
}
}
\seealso{
Guide to Search Analytics: \url{https://support.google.com/webmasters/answer/6155685}
API docs: \url{https://developers.google.com/webmaster-tools/v3/searchanalytics/query}
}
|
rm(list=ls())
###packages################
library(NLP)
library(tm)
library(rJava)
library(Rwordseg)
library(SnowballC)
library(MASS)
library(RColorBrewer)
library(wordcloud)
library(pcaPP)
library(rainbow)
library(Rcpp)
library(cluster)
library(mvtnorm)
library(hdrcde)
library(locfit)
library(ash)
library(KernSmooth)
library(misc3d)
library(rgl)
library(ks)
library(ggplot2)
library(ggmap)
library(maps)
library(mapdata)
library(sp)
library(maptools)
library(grid)
library(vcd)
library(topicmodels)
library(randomForest)
library(rFerns)
library(ranger)
library(Boruta)
library(lattice)
library(caret)
library(slam)
library(Matrix)
library(foreach)
library(glmnet)
library(xlsx)
library(igraph)
library(wordcloud2)
library(e1071)
#第一步:读取数据##############################################
jour<- read.xlsx("C:/Users/lenovo/Desktop/behavir.xlsx",1,encoding="UTF-8")
#第二步:导入语料库
installDict("D:\\library\\words\\心理学.scel","sougou")
installDict("D:\\library\\words\\项目管理词汇.scel","sougou")
installDict("D:\\library\\words\\社会学专业词库.scel","sougou")
listDict()#查看已安装词典
installDict(dictpath = 'D:\\library\\words\\社会学专业词库.scel',
dictname = "社会学专业词库", dicttype = "scel", load = TRUE)
installDict(dictpath = 'D:\\library\\words\\项目管理词汇.scel',
dictname = "项目管理词汇", dicttype = "scel", load = TRUE)
installDict(dictpath = 'D:\\library\\words\\心理学.scel',
dictname = "心理学", dicttype = "scel", load = TRUE)
installDict(dictpath = 'D:\\library\\words\\社会工作专业词库.scel',
dictname = "社会工作专业词库", dicttype = "scel", load = TRUE)
installDict(dictpath = 'D:\\library\\words\\中国职业名称大全.scel',
dictname = "中国职业名称大全", dicttype = "scel", load = TRUE)
installDict(dictpath = 'D:\\library\\words\\心理学词库.scel',
dictname = "心理学词库", dicttype = "scel", load = TRUE)
installDict(dictpath = 'D:\\library\\words\\教育教学综合词库.scel',
dictname = "教育教学综合词库", dicttype = "scel", load = TRUE)
#第三步:切割分词。在处理中文时,要把字符变量转换可读的,所以尽量用英文写变量名词
title<-as.character(jour$标题)
segmentCN(title)
titlewords=segmentCN(title)
#sink("D:\\library\\words\\titlewords.xlsx",append=TRUE,split=TRUE)#把数据导出到文件
#第四步:计算词频→建立数据框)
term<-lapply(X=titlewords,FUN = strsplit,' ')
term<-unlist(term)
df<-table(term)#建表
df
df1<-sort(df,decreasing = T)#降序排列
df1
seg3<-names(df1)
df2<-as.data.frame(df1)
df2
write.csv(df2,"path")
wordsFreq<-wordsFreq[-which(nchar(wordsFreq[,1])<3),]
wordcloud(df2$term,df2$Freq,min.freq = 10)
| /Rcurl/2017.11.19组织行为学小论文练手/组织行为.R | no_license | ZuoRX/Rcurl | R | false | false | 2,986 | r | rm(list=ls())
###packages################
library(NLP)
library(tm)
library(rJava)
library(Rwordseg)
library(SnowballC)
library(MASS)
library(RColorBrewer)
library(wordcloud)
library(pcaPP)
library(rainbow)
library(Rcpp)
library(cluster)
library(mvtnorm)
library(hdrcde)
library(locfit)
library(ash)
library(KernSmooth)
library(misc3d)
library(rgl)
library(ks)
library(ggplot2)
library(ggmap)
library(maps)
library(mapdata)
library(sp)
library(maptools)
library(grid)
library(vcd)
library(topicmodels)
library(randomForest)
library(rFerns)
library(ranger)
library(Boruta)
library(lattice)
library(caret)
library(slam)
library(Matrix)
library(foreach)
library(glmnet)
library(xlsx)
library(igraph)
library(wordcloud2)
library(e1071)
#第一步:读取数据##############################################
jour<- read.xlsx("C:/Users/lenovo/Desktop/behavir.xlsx",1,encoding="UTF-8")
#第二步:导入语料库
installDict("D:\\library\\words\\心理学.scel","sougou")
installDict("D:\\library\\words\\项目管理词汇.scel","sougou")
installDict("D:\\library\\words\\社会学专业词库.scel","sougou")
listDict()#查看已安装词典
installDict(dictpath = 'D:\\library\\words\\社会学专业词库.scel',
dictname = "社会学专业词库", dicttype = "scel", load = TRUE)
installDict(dictpath = 'D:\\library\\words\\项目管理词汇.scel',
dictname = "项目管理词汇", dicttype = "scel", load = TRUE)
installDict(dictpath = 'D:\\library\\words\\心理学.scel',
dictname = "心理学", dicttype = "scel", load = TRUE)
installDict(dictpath = 'D:\\library\\words\\社会工作专业词库.scel',
dictname = "社会工作专业词库", dicttype = "scel", load = TRUE)
installDict(dictpath = 'D:\\library\\words\\中国职业名称大全.scel',
dictname = "中国职业名称大全", dicttype = "scel", load = TRUE)
installDict(dictpath = 'D:\\library\\words\\心理学词库.scel',
dictname = "心理学词库", dicttype = "scel", load = TRUE)
installDict(dictpath = 'D:\\library\\words\\教育教学综合词库.scel',
dictname = "教育教学综合词库", dicttype = "scel", load = TRUE)
#第三步:切割分词。在处理中文时,要把字符变量转换可读的,所以尽量用英文写变量名词
title<-as.character(jour$标题)
segmentCN(title)
titlewords=segmentCN(title)
#sink("D:\\library\\words\\titlewords.xlsx",append=TRUE,split=TRUE)#把数据导出到文件
#第四步:计算词频→建立数据框)
term<-lapply(X=titlewords,FUN = strsplit,' ')
term<-unlist(term)
df<-table(term)#建表
df
df1<-sort(df,decreasing = T)#降序排列
df1
seg3<-names(df1)
df2<-as.data.frame(df1)
df2
write.csv(df2,"path")
wordsFreq<-wordsFreq[-which(nchar(wordsFreq[,1])<3),]
wordcloud(df2$term,df2$Freq,min.freq = 10)
|
require(statisticalModeling)
require(rpart)
# Build the null model with rpart()
Runners$all_the_same <- 1 # null "explanatory" variable
null_model <- rpart(start_position ~ all_the_same, data = Runners)
# Evaluate the null model on training data
null_model_output <- evaluate_model(null_model, data = Runners, type = "class")
# Calculate the error rate
with(data = null_model_output, mean(start_position != model_output, na.rm = TRUE))
| /Categorical Responce variable.R | no_license | ironhidee/RCode | R | false | false | 439 | r | require(statisticalModeling)
require(rpart)
# Build the null model with rpart()
Runners$all_the_same <- 1 # null "explanatory" variable
null_model <- rpart(start_position ~ all_the_same, data = Runners)
# Evaluate the null model on training data
null_model_output <- evaluate_model(null_model, data = Runners, type = "class")
# Calculate the error rate
with(data = null_model_output, mean(start_position != model_output, na.rm = TRUE))
|
##extract data from https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip
##The dataset has 2,075,259 rows and 9 columns.
##First calculate a rough estimate of how much memory the dataset will require in memory before reading into R.
##Make sure your computer has enough memory (most modern computers should be fine)
data_full <- read.csv("./Data/household_power_consumption.txt", header=T, sep=';',na.strings="?", nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
data_full$Date <- as.Date(data_full$Date, format="%d/%m/%Y")
##We will only be using data from the dates 2007-02-01 and 2007-02-02.
## One alternative is to read the data from just those dates rather than reading in the entire dataset and subsetting to those dates.
data <- subset(data_full, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
rm(data_full)
##You may find it useful to convert the Date and Time variables to Date/Time classes in R using the strptime() and as.Date() functions.
datetime <- paste(as.Date(data$Date), data$Time)
data$Datetime <- as.POSIXct(datetime)
hist(data$Global_active_power, main="Global Active Power",xlab="Global Active Power (kilowatts)", ylab="Frequency", col="Red")
dev.copy(png, file="plot1.png", height=480, width=480)
dev.off() | /Week1 courseProj/Plot1.R | no_license | Robertocalubag/ExData_Plotting1 | R | false | false | 1,312 | r | ##extract data from https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip
##The dataset has 2,075,259 rows and 9 columns.
##First calculate a rough estimate of how much memory the dataset will require in memory before reading into R.
##Make sure your computer has enough memory (most modern computers should be fine)
data_full <- read.csv("./Data/household_power_consumption.txt", header=T, sep=';',na.strings="?", nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
data_full$Date <- as.Date(data_full$Date, format="%d/%m/%Y")
##We will only be using data from the dates 2007-02-01 and 2007-02-02.
## One alternative is to read the data from just those dates rather than reading in the entire dataset and subsetting to those dates.
data <- subset(data_full, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
rm(data_full)
##You may find it useful to convert the Date and Time variables to Date/Time classes in R using the strptime() and as.Date() functions.
datetime <- paste(as.Date(data$Date), data$Time)
data$Datetime <- as.POSIXct(datetime)
hist(data$Global_active_power, main="Global Active Power",xlab="Global Active Power (kilowatts)", ylab="Frequency", col="Red")
dev.copy(png, file="plot1.png", height=480, width=480)
dev.off() |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data-freeman.R
\docType{data}
\name{law_cowork}
\alias{law_cowork}
\title{Law Firm (Co-work)}
\format{igraph object}
\source{
http://moreno.ss.uci.edu/data#lazega
}
\usage{
law_cowork
}
\description{
This data set comes from a network study of corporate law partnership that was carried out in a Northeastern US corporate law firm, referred to as SG&R, 1988-1991 in New England. It includes (among others) measurements of networks among the 71 attorneys (partners and associates) of this firm, i.e. their strong-coworker network, advice network, friendship network, and indirect control networks. Various members' attributes are also part of the dataset, including seniority, formal status, office in which they work, gender, lawschool attended. The ethnography, organizational and network analyses of this case are available in Lazega (2001).
\strong{Strong coworkers network:}
"Because most firms like yours are also organized very informally, it is difficult to get a clear idea of how the members really work together. Think back over the past year, consider all the lawyers in your Firm. Would you go through this list and check the names of those with whom you have worked with. (By "worked with" I mean that you have spent time together on at least one case, that you have been assigned to the same case, that they read or used your work product or that you have read or used their work product; this includes professional work done within the Firm like Bar association work, administration, etc.)"
\preformatted{
Coding:
The first 36 respondents are the partners in the firm. The attribute variables are:
1. status (1=partner; 2=associate)
2. gender (1=man; 2=woman)
3. office (1=Boston; 2=Hartford; 3=Providence)
4. years with the firm
5. age
6. practice (1=litigation; 2=corporate)
7. law school (1: harvard, yale; 2: ucon; 3: other)
}
}
\references{
Emmanuel Lazega, The Collegial Phenomenon: The Social Mechanisms of Cooperation Among Peers in a Corporate Law Partnership, Oxford University Press (2001).
Tom A.B. Snijders, Philippa E. Pattison, Garry L. Robins, and Mark S. Handcock. New specifications for exponential random graph models. \emph{Sociological Methodology} (2006), 99-153.
}
\seealso{
\link{law_advice},\link{law_friends}
}
\keyword{datasets}
| /man/law_cowork.Rd | permissive | kjhealy/networkdata | R | false | true | 2,351 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data-freeman.R
\docType{data}
\name{law_cowork}
\alias{law_cowork}
\title{Law Firm (Co-work)}
\format{igraph object}
\source{
http://moreno.ss.uci.edu/data#lazega
}
\usage{
law_cowork
}
\description{
This data set comes from a network study of corporate law partnership that was carried out in a Northeastern US corporate law firm, referred to as SG&R, 1988-1991 in New England. It includes (among others) measurements of networks among the 71 attorneys (partners and associates) of this firm, i.e. their strong-coworker network, advice network, friendship network, and indirect control networks. Various members' attributes are also part of the dataset, including seniority, formal status, office in which they work, gender, lawschool attended. The ethnography, organizational and network analyses of this case are available in Lazega (2001).
\strong{Strong coworkers network:}
"Because most firms like yours are also organized very informally, it is difficult to get a clear idea of how the members really work together. Think back over the past year, consider all the lawyers in your Firm. Would you go through this list and check the names of those with whom you have worked with. (By "worked with" I mean that you have spent time together on at least one case, that you have been assigned to the same case, that they read or used your work product or that you have read or used their work product; this includes professional work done within the Firm like Bar association work, administration, etc.)"
\preformatted{
Coding:
The first 36 respondents are the partners in the firm. The attribute variables are:
1. status (1=partner; 2=associate)
2. gender (1=man; 2=woman)
3. office (1=Boston; 2=Hartford; 3=Providence)
4. years with the firm
5. age
6. practice (1=litigation; 2=corporate)
7. law school (1: harvard, yale; 2: ucon; 3: other)
}
}
\references{
Emmanuel Lazega, The Collegial Phenomenon: The Social Mechanisms of Cooperation Among Peers in a Corporate Law Partnership, Oxford University Press (2001).
Tom A.B. Snijders, Philippa E. Pattison, Garry L. Robins, and Mark S. Handcock. New specifications for exponential random graph models. \emph{Sociological Methodology} (2006), 99-153.
}
\seealso{
\link{law_advice},\link{law_friends}
}
\keyword{datasets}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/usa.R
\docType{data}
\name{USAChoropleth}
\alias{USAChoropleth}
\title{Normal choropleth that draws Alaska and Hawaii as insets.
In addition to a columns named "region" and "value", also requires a column named "state".}
\format{\preformatted{Class 'R6ClassGenerator' <environment: 0x10fd354d0>
- attr(*, "name")= chr "USAChoropleth_generator"
}}
\usage{
USAChoropleth
}
\description{
Normal choropleth that draws Alaska and Hawaii as insets.
In addition to a columns named "region" and "value", also requires a column named "state".
}
\keyword{datasets}
| /man/USAChoropleth.Rd | no_license | cardiomoon/choroplethr | R | false | false | 644 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/usa.R
\docType{data}
\name{USAChoropleth}
\alias{USAChoropleth}
\title{Normal choropleth that draws Alaska and Hawaii as insets.
In addition to a columns named "region" and "value", also requires a column named "state".}
\format{\preformatted{Class 'R6ClassGenerator' <environment: 0x10fd354d0>
- attr(*, "name")= chr "USAChoropleth_generator"
}}
\usage{
USAChoropleth
}
\description{
Normal choropleth that draws Alaska and Hawaii as insets.
In addition to a columns named "region" and "value", also requires a column named "state".
}
\keyword{datasets}
|
#' Conditional count by ordinal tests for association.
#'
#' \code{countbot} tests for independence between an ordered categorical
#' variable, \var{X}, and a count variable, \var{Y}, conditional on other variables,
#' \var{Z}. The basic approach involves fitting an ordinal model of \var{X} on
#' \var{Z}, a Poisson or Negative Binomial model of \var{Y} on \var{Z}, and then determining whether there is any
#' residual information between \var{X} and \var{Y}. This is done by
#' computing residuals for both models, calculating their correlation, and
#' testing the null of no residual correlation. This procedure is analogous to test statistic
#' \code{T2} in \code{cobot}. Two test statistics (correlations) are currently output. The first
#' is the correlation between probability-scale residuals. The second is the correlation between
#' the Pearson residual for the count outcome model and a latent variable residual
#' for the ordinal model (Li C and Shepherd BE, 2012).
#'
#' Formula is specified as \code{\var{X} | \var{Y} ~ \var{Z}}.
#' This indicates that models of \code{\var{X} ~ \var{Z}} and
#' \code{\var{Y} ~ \var{Z}} will be fit. The null hypothesis to be
#' tested is \eqn{H_0 : X}{H0 : X} independent of \var{Y} conditional
#' on \var{Z}. The ordinal variable, \code{\var{X}}, must precede the \code{|} and be a factor variable, and \code{\var{Y}} must be an integer.
#' @references Li C and Shepherd BE (2012)
#' A new residual for ordinal outcomes.
#' \emph{Biometrika}. \bold{99}: 473--480.
#' @references Shepherd BE, Li C, Liu Q (2016)
#' Probability-scale residuals for continuous, discrete, and censored data.
#' \emph{The Canadian Journal of Statistics}. \bold{44}: 463--479.
#'
#' @param formula an object of class \code{\link{Formula}} (or one
#' that can be coerced to that class): a symbolic description of the
#' model to be fitted. The details of model specification are given
#' under \sQuote{Details}.
#'
#' @param data an optional data frame, list or environment (or object
#' coercible by \code{\link{as.data.frame}} to a data frame)
#' containing the variables in the model. If not found in
#' \code{data}, the variables are taken from
#' \code{environment(formula)}, typically the environment from which
#' \code{countbot} is called.
#' @param link.x The link family to be used for the ordinal model of
#' \var{X} on \var{Z}. Defaults to \samp{logit}. Other options are
#' \samp{probit}, \samp{cloglog},\samp{loglog}, and \samp{cauchit}.
#'
#' @param fit.y The error distribution for the count model of \var{Y} on \var{Z}.
#' Defaults to \samp{poisson}. The other option is \samp{negative binomial}.
#' If \samp{negative binomial} is specified, \code{\link[MASS]{glm.nb}} is called to fit the count model.
#' @param subset an optional vector specifying a subset of
#' observations to be used in the fitting process.
#'
#' @param na.action action to take when \code{NA} present in data.
#'
#' @param fisher logical indicating whether to apply fisher transformation to compute confidence intervals and p-values for the correlation.
#'
#' @param conf.int numeric specifying confidence interval coverage.
#'
#' @return object of \samp{cocobot} class.
#' @export
#' @importFrom stats qlogis qnorm qcauchy integrate
#' @examples
#' data(PResidData)
#' countbot(x|c ~z, fit.y="poisson",data=PResidData)
#' countbot(x|c ~z, fit.y="negative binomial",data=PResidData)
countbot <- function(formula, data, link.x=c("logit", "probit","loglog", "cloglog", "cauchit"),
fit.y=c("poisson", "negative binomial"),
subset, na.action=getOption('na.action'),
fisher=TRUE, conf.int=0.95) {
# Construct the model frames for x ~ z and y ~ z
F1 <- Formula(formula)
Fx <- formula(F1, lhs=1)
Fy <- formula(F1, lhs=2)
mf <- match.call(expand.dots = FALSE)
m <- match(c("formula", "data", "subset", "weights", "na.action",
"offset"), names(mf), 0L)
mf <- mf[c(1L, m)]
mf$drop.unused.levels <- TRUE
mf$na.action <- na.action
# We set xlev to a benign non-value in the call so that it won't get partially matched
# to any variable in the formula. For instance a variable named 'x' could possibly get
# bound to xlev, which is not what we want.
mf$xlev <- integer(0)
mf[[1L]] <- as.name("model.frame")
mx <- my <- mf
# NOTE: we add the opposite variable to each model frame call so that
# subsetting occurs correctly. Later we strip them off.
mx[["formula"]] <- Fx
yName <- all.vars(Fy[[2]])[1]
mx[[yName]] <- Fy[[2]]
my[["formula"]] <- Fy
xName <- all.vars(Fx[[2]])[1]
my[[xName]] <- Fx[[2]]
mx <- eval(mx, parent.frame())
mx[[paste('(',yName,')',sep='')]] <- NULL
my <- eval(my, parent.frame())
my[[paste('(',xName,')',sep='')]] <- NULL
data.points <- nrow(mx)
if (!is.factor(mx[[1]])){
warning("Coercing ",names(mx)[1]," to factor. Check the ordering of categories.")
mx[[1]] <- as.factor(mx[[1]])
}
if (is.factor(my[[1]])){
stop(names(my)[1]," cannot be a factor.")
}
# Construct the model matrix z
mxz <- model.matrix(attr(mx,'terms'),mx)
zzint <- match("(Intercept)", colnames(mxz), nomatch = 0L)
if(zzint > 0L) {
mxz <- mxz[, -zzint, drop = FALSE]
}
myz <- model.matrix(attr(my,'terms'),my)
zzint <- match("(Intercept)", colnames(myz), nomatch = 0L)
if(zzint > 0L) {
myz <- myz[, -zzint, drop = FALSE]
}
score.xz <- ordinal.scores(mx, mxz,method=link.x[1])
if (fit.y[1]=="poisson")
score.yz <- poisson.scores(y=model.response(my), X=myz)
else if (fit.y[1]=="negative binomial")
score.yz <- nb.scores(y=model.response(my), X=myz)
else stop("fit.y has to be 'poisson' or 'negative binomial'")
npar.xz = dim(score.xz$dl.dtheta)[2]
npar.yz = dim(score.yz$dl.dtheta)[2]
xx = as.integer(model.response(mx))
nx = length(table(xx))
N = length(xx)
low.x = cbind(0, score.xz$Gamma)[cbind(1:N, xx)]
hi.x = cbind(1-score.xz$Gamma, 0)[cbind(1:N, xx)]
xz.presid <- low.x - hi.x
xz.dpresid.dtheta <- score.xz$dlow.dtheta - score.xz$dhi.dtheta
## return value
ans <- list(
TS=list(),
fisher=fisher,
conf.int=conf.int,
data.points=data.points
)
tb = corTS(xz.presid, score.yz$presid,
score.xz$dl.dtheta, score.yz$dl.dtheta,
score.xz$d2l.dtheta.dtheta, score.yz$d2l.dtheta.dtheta,
xz.dpresid.dtheta, score.yz$dpresid.dtheta,fisher)
tb.label = "PResid vs. PResid"
ans$TS$TB <-
list(
ts=tb$TS, var=tb$var.TS, pval=tb$pval.TS,
label = tb.label
)
rij <- cbind(score.xz$Gamma, 1)[cbind(1:N, xx)]
rij_1 <- cbind(0,score.xz$Gamma)[cbind(1:N, xx)]
pij <- rij-rij_1
G.inverse <- switch(link.x[1], logit = qlogis, probit = qnorm,
cloglog = qgumbel, cauchit = qcauchy)
xz.latent.resid <- rep(NA, N)
inverse_fail <- FALSE
for (i in 1:N){
tmp <- try(integrate(G.inverse, rij_1[i], rij[i])$value/pij[i],silent=TRUE)
if (inherits(tmp,'try-error')){
if (link.x[1] != 'cauchit')
warning("Cannot compute latent variable residual.")
else
warning("Cannot compute latent variable residual with link function cauchit.")
inverse_fail <- TRUE
break
} else {
xz.latent.resid[i] <- tmp
}
}
if (!inverse_fail){
### To compute dlatent.dtheta (need dgamma.dtheta and dp0.dtheta from ordinal scores)
xz.dlatent.dtheta = dpij.dtheta = matrix(, npar.xz, N)
drij_1.dtheta <- score.xz$dlow.dtheta
drij.dtheta <- -score.xz$dhi.dtheta
for(i in 1:N) {
dpij.dtheta[,i] <- score.xz$dp0.dtheta[i, xx[i],]
if (xx[i] == 1) {
xz.dlatent.dtheta[,i] <- -xz.latent.resid[i]/pij[i]*dpij.dtheta[,i] + 1/pij[i]*(
G.inverse(rij[i])*drij.dtheta[,i] - 0 )
} else if(xx[i] == nx){
xz.dlatent.dtheta[,i] <- -xz.latent.resid[i]/pij[i]*dpij.dtheta[,i] + 1/pij[i]*(
0 - G.inverse(rij_1[i])*drij_1.dtheta[,i] )
} else
xz.dlatent.dtheta[,i] <- -xz.latent.resid[i]/pij[i]*dpij.dtheta[,i] + 1/pij[i]*(
G.inverse(rij[i])*drij.dtheta[,i] - G.inverse(rij_1[i])*drij_1.dtheta[,i])
}
### latent.resid vs pearson resid
tc <- corTS(xz.latent.resid, score.yz$pearson.resid,
score.xz$dl.dtheta, score.yz$dl.dtheta,
score.xz$d2l.dtheta.dtheta, score.yz$d2l.dtheta.dtheta,
xz.dlatent.dtheta, score.yz$dpearson.resid.dtheta, fisher)
ans$TS$TC <-
list(
ts=tc$TS, var=tc$var.TS, pval=tc$pval.TS,
label = 'Latent.resid vs. Pearson.resid'
)
}
ans <- structure(ans, class="cocobot")
# Apply confidence intervals
for (i in seq_len(length(ans$TS))){
ts_ci <- getCI(ans$TS[[i]]$ts,ans$TS[[i]]$var,ans$fisher,conf.int)
ans$TS[[i]]$lower <- ts_ci[,1]
ans$TS[[i]]$upper <- ts_ci[,2]
}
return(ans)
}
#### example
## generate count by ordinal data
## generate.data3 = function(alphax, betax, alphay, betay, eta, N) {
## z = rnorm(N,0,1)
## x = y = numeric(N)
## ## px is an N x length(alphax) matrix.
## ## Each row has the TRUE cummulative probabilities for each subject.
## px = (1 + exp(- outer(alphax, betax*z, "+"))) ^ (-1)
## aa = runif(N)
## for(i in 1:N)
## x[i] = sum(aa[i] > px[,i])
## x = as.numeric(as.factor(x))
## y = rpois(N, exp(outer(alphay, betay*z+eta[x], "+")))
## return(list(x=as.factor(x), y=y, z=z))
## }
## set.seed(13)
## alphax = c(-1, 0, 1, 2)
## betax = 1
## alphay = 1
## betay = -.5
## #eta = rep(0, 5)
## eta = c(1:5)/20
## N = 200
## data <- generate.data3(alphax, betax, alphay, betay, eta, N)
## #### check for cocobot
## cocobot(x|y~z, data=data)
## countbot(x|y~z, data=data, fisher=TRUE)
## countbot(x|y~z, data=data, family="negative binomial")
| /R/countbot.R | no_license | cran/PResiduals | R | false | false | 9,903 | r | #' Conditional count by ordinal tests for association.
#'
#' \code{countbot} tests for independence between an ordered categorical
#' variable, \var{X}, and a count variable, \var{Y}, conditional on other variables,
#' \var{Z}. The basic approach involves fitting an ordinal model of \var{X} on
#' \var{Z}, a Poisson or Negative Binomial model of \var{Y} on \var{Z}, and then determining whether there is any
#' residual information between \var{X} and \var{Y}. This is done by
#' computing residuals for both models, calculating their correlation, and
#' testing the null of no residual correlation. This procedure is analogous to test statistic
#' \code{T2} in \code{cobot}. Two test statistics (correlations) are currently output. The first
#' is the correlation between probability-scale residuals. The second is the correlation between
#' the Pearson residual for the count outcome model and a latent variable residual
#' for the ordinal model (Li C and Shepherd BE, 2012).
#'
#' Formula is specified as \code{\var{X} | \var{Y} ~ \var{Z}}.
#' This indicates that models of \code{\var{X} ~ \var{Z}} and
#' \code{\var{Y} ~ \var{Z}} will be fit. The null hypothesis to be
#' tested is \eqn{H_0 : X}{H0 : X} independent of \var{Y} conditional
#' on \var{Z}. The ordinal variable, \code{\var{X}}, must precede the \code{|} and be a factor variable, and \code{\var{Y}} must be an integer.
#' @references Li C and Shepherd BE (2012)
#' A new residual for ordinal outcomes.
#' \emph{Biometrika}. \bold{99}: 473--480.
#' @references Shepherd BE, Li C, Liu Q (2016)
#' Probability-scale residuals for continuous, discrete, and censored data.
#' \emph{The Canadian Journal of Statistics}. \bold{44}: 463--479.
#'
#' @param formula an object of class \code{\link{Formula}} (or one
#' that can be coerced to that class): a symbolic description of the
#' model to be fitted. The details of model specification are given
#' under \sQuote{Details}.
#'
#' @param data an optional data frame, list or environment (or object
#' coercible by \code{\link{as.data.frame}} to a data frame)
#' containing the variables in the model. If not found in
#' \code{data}, the variables are taken from
#' \code{environment(formula)}, typically the environment from which
#' \code{countbot} is called.
#' @param link.x The link family to be used for the ordinal model of
#' \var{X} on \var{Z}. Defaults to \samp{logit}. Other options are
#' \samp{probit}, \samp{cloglog},\samp{loglog}, and \samp{cauchit}.
#'
#' @param fit.y The error distribution for the count model of \var{Y} on \var{Z}.
#' Defaults to \samp{poisson}. The other option is \samp{negative binomial}.
#' If \samp{negative binomial} is specified, \code{\link[MASS]{glm.nb}} is called to fit the count model.
#' @param subset an optional vector specifying a subset of
#' observations to be used in the fitting process.
#'
#' @param na.action action to take when \code{NA} present in data.
#'
#' @param fisher logical indicating whether to apply fisher transformation to compute confidence intervals and p-values for the correlation.
#'
#' @param conf.int numeric specifying confidence interval coverage.
#'
#' @return object of \samp{cocobot} class.
#' @export
#' @importFrom stats qlogis qnorm qcauchy integrate
#' @examples
#' data(PResidData)
#' countbot(x|c ~z, fit.y="poisson",data=PResidData)
#' countbot(x|c ~z, fit.y="negative binomial",data=PResidData)
countbot <- function(formula, data, link.x=c("logit", "probit","loglog", "cloglog", "cauchit"),
fit.y=c("poisson", "negative binomial"),
subset, na.action=getOption('na.action'),
fisher=TRUE, conf.int=0.95) {
# Construct the model frames for x ~ z and y ~ z
F1 <- Formula(formula)
Fx <- formula(F1, lhs=1)
Fy <- formula(F1, lhs=2)
mf <- match.call(expand.dots = FALSE)
m <- match(c("formula", "data", "subset", "weights", "na.action",
"offset"), names(mf), 0L)
mf <- mf[c(1L, m)]
mf$drop.unused.levels <- TRUE
mf$na.action <- na.action
# We set xlev to a benign non-value in the call so that it won't get partially matched
# to any variable in the formula. For instance a variable named 'x' could possibly get
# bound to xlev, which is not what we want.
mf$xlev <- integer(0)
mf[[1L]] <- as.name("model.frame")
mx <- my <- mf
# NOTE: we add the opposite variable to each model frame call so that
# subsetting occurs correctly. Later we strip them off.
mx[["formula"]] <- Fx
yName <- all.vars(Fy[[2]])[1]
mx[[yName]] <- Fy[[2]]
my[["formula"]] <- Fy
xName <- all.vars(Fx[[2]])[1]
my[[xName]] <- Fx[[2]]
mx <- eval(mx, parent.frame())
mx[[paste('(',yName,')',sep='')]] <- NULL
my <- eval(my, parent.frame())
my[[paste('(',xName,')',sep='')]] <- NULL
data.points <- nrow(mx)
if (!is.factor(mx[[1]])){
warning("Coercing ",names(mx)[1]," to factor. Check the ordering of categories.")
mx[[1]] <- as.factor(mx[[1]])
}
if (is.factor(my[[1]])){
stop(names(my)[1]," cannot be a factor.")
}
# Construct the model matrix z
mxz <- model.matrix(attr(mx,'terms'),mx)
zzint <- match("(Intercept)", colnames(mxz), nomatch = 0L)
if(zzint > 0L) {
mxz <- mxz[, -zzint, drop = FALSE]
}
myz <- model.matrix(attr(my,'terms'),my)
zzint <- match("(Intercept)", colnames(myz), nomatch = 0L)
if(zzint > 0L) {
myz <- myz[, -zzint, drop = FALSE]
}
score.xz <- ordinal.scores(mx, mxz,method=link.x[1])
if (fit.y[1]=="poisson")
score.yz <- poisson.scores(y=model.response(my), X=myz)
else if (fit.y[1]=="negative binomial")
score.yz <- nb.scores(y=model.response(my), X=myz)
else stop("fit.y has to be 'poisson' or 'negative binomial'")
npar.xz = dim(score.xz$dl.dtheta)[2]
npar.yz = dim(score.yz$dl.dtheta)[2]
xx = as.integer(model.response(mx))
nx = length(table(xx))
N = length(xx)
low.x = cbind(0, score.xz$Gamma)[cbind(1:N, xx)]
hi.x = cbind(1-score.xz$Gamma, 0)[cbind(1:N, xx)]
xz.presid <- low.x - hi.x
xz.dpresid.dtheta <- score.xz$dlow.dtheta - score.xz$dhi.dtheta
## return value
ans <- list(
TS=list(),
fisher=fisher,
conf.int=conf.int,
data.points=data.points
)
tb = corTS(xz.presid, score.yz$presid,
score.xz$dl.dtheta, score.yz$dl.dtheta,
score.xz$d2l.dtheta.dtheta, score.yz$d2l.dtheta.dtheta,
xz.dpresid.dtheta, score.yz$dpresid.dtheta,fisher)
tb.label = "PResid vs. PResid"
ans$TS$TB <-
list(
ts=tb$TS, var=tb$var.TS, pval=tb$pval.TS,
label = tb.label
)
rij <- cbind(score.xz$Gamma, 1)[cbind(1:N, xx)]
rij_1 <- cbind(0,score.xz$Gamma)[cbind(1:N, xx)]
pij <- rij-rij_1
G.inverse <- switch(link.x[1], logit = qlogis, probit = qnorm,
cloglog = qgumbel, cauchit = qcauchy)
xz.latent.resid <- rep(NA, N)
inverse_fail <- FALSE
for (i in 1:N){
tmp <- try(integrate(G.inverse, rij_1[i], rij[i])$value/pij[i],silent=TRUE)
if (inherits(tmp,'try-error')){
if (link.x[1] != 'cauchit')
warning("Cannot compute latent variable residual.")
else
warning("Cannot compute latent variable residual with link function cauchit.")
inverse_fail <- TRUE
break
} else {
xz.latent.resid[i] <- tmp
}
}
if (!inverse_fail){
### To compute dlatent.dtheta (need dgamma.dtheta and dp0.dtheta from ordinal scores)
xz.dlatent.dtheta = dpij.dtheta = matrix(, npar.xz, N)
drij_1.dtheta <- score.xz$dlow.dtheta
drij.dtheta <- -score.xz$dhi.dtheta
for(i in 1:N) {
dpij.dtheta[,i] <- score.xz$dp0.dtheta[i, xx[i],]
if (xx[i] == 1) {
xz.dlatent.dtheta[,i] <- -xz.latent.resid[i]/pij[i]*dpij.dtheta[,i] + 1/pij[i]*(
G.inverse(rij[i])*drij.dtheta[,i] - 0 )
} else if(xx[i] == nx){
xz.dlatent.dtheta[,i] <- -xz.latent.resid[i]/pij[i]*dpij.dtheta[,i] + 1/pij[i]*(
0 - G.inverse(rij_1[i])*drij_1.dtheta[,i] )
} else
xz.dlatent.dtheta[,i] <- -xz.latent.resid[i]/pij[i]*dpij.dtheta[,i] + 1/pij[i]*(
G.inverse(rij[i])*drij.dtheta[,i] - G.inverse(rij_1[i])*drij_1.dtheta[,i])
}
### latent.resid vs pearson resid
tc <- corTS(xz.latent.resid, score.yz$pearson.resid,
score.xz$dl.dtheta, score.yz$dl.dtheta,
score.xz$d2l.dtheta.dtheta, score.yz$d2l.dtheta.dtheta,
xz.dlatent.dtheta, score.yz$dpearson.resid.dtheta, fisher)
ans$TS$TC <-
list(
ts=tc$TS, var=tc$var.TS, pval=tc$pval.TS,
label = 'Latent.resid vs. Pearson.resid'
)
}
ans <- structure(ans, class="cocobot")
# Apply confidence intervals
for (i in seq_len(length(ans$TS))){
ts_ci <- getCI(ans$TS[[i]]$ts,ans$TS[[i]]$var,ans$fisher,conf.int)
ans$TS[[i]]$lower <- ts_ci[,1]
ans$TS[[i]]$upper <- ts_ci[,2]
}
return(ans)
}
#### example
## generate count by ordinal data
## generate.data3 = function(alphax, betax, alphay, betay, eta, N) {
## z = rnorm(N,0,1)
## x = y = numeric(N)
## ## px is an N x length(alphax) matrix.
## ## Each row has the TRUE cummulative probabilities for each subject.
## px = (1 + exp(- outer(alphax, betax*z, "+"))) ^ (-1)
## aa = runif(N)
## for(i in 1:N)
## x[i] = sum(aa[i] > px[,i])
## x = as.numeric(as.factor(x))
## y = rpois(N, exp(outer(alphay, betay*z+eta[x], "+")))
## return(list(x=as.factor(x), y=y, z=z))
## }
## set.seed(13)
## alphax = c(-1, 0, 1, 2)
## betax = 1
## alphay = 1
## betay = -.5
## #eta = rep(0, 5)
## eta = c(1:5)/20
## N = 200
## data <- generate.data3(alphax, betax, alphay, betay, eta, N)
## #### check for cocobot
## cocobot(x|y~z, data=data)
## countbot(x|y~z, data=data, fisher=TRUE)
## countbot(x|y~z, data=data, family="negative binomial")
|
test_that(paste("make_standata returns correct data names ",
"for fixed and random effects"), {
expect_equal(names(make_standata(rating ~ treat + period + carry
+ (1|subject), data = inhaler)),
c("N", "Y", "K", "X", "Z_1_1",
"J_1", "N_1", "M_1", "NC_1", "prior_only"))
expect_equal(names(make_standata(rating ~ treat + period + carry
+ (1+treat|id|subject), data = inhaler,
family = "categorical")),
c("N", "Y", "K_X2", "X_X2", "Z_1_X2_1", "Z_1_X2_2",
"K_X3", "X_X3", "Z_1_X3_3", "Z_1_X3_4",
"K_X4", "X_X4", "Z_1_X4_5", "Z_1_X4_6",
"J_1", "N_1", "M_1", "NC_1", "ncat",
"prior_only"))
expect_equal(names(make_standata(rating ~ treat + period + carry
+ (1+treat|subject), data = inhaler,
control = list(not4stan = TRUE))),
c("N", "Y", "K", "X", "Z_1", "J_1", "N_1", "M_1",
"NC_1", "prior_only"))
dat <- data.frame(y = 1:10, g = 1:10, h = 11:10, x = rep(0,10))
expect_equal(names(make_standata(y ~ x + (1|g) + (1|h), family = "poisson",
data = dat)),
c("N", "Y", "K", "X", "Z_1_1", "Z_2_1",
"J_1", "N_1", "M_1", "NC_1", "J_2", "N_2", "M_2", "NC_2",
"prior_only"))
expect_true(all(c("Z_1_1", "Z_1_2", "Z_2_1", "Z_2_2") %in%
names(make_standata(y ~ x + (1+x|g/h), dat))))
expect_equal(make_standata(y ~ x + (1+x|g+h), dat),
make_standata(y ~ x + (1+x|g) + (1+x|h), dat))
})
test_that(paste("make_standata handles variables used as fixed effects",
"and grouping factors at the same time"), {
data <- data.frame(y = 1:9, x = factor(rep(c("a","b","c"), 3)))
standata <- make_standata(y ~ x + (1|x), data = data)
expect_equal(colnames(standata$X), c("Intercept", "xb", "xc"))
expect_equal(standata$J_1, as.array(rep(1:3, 3)))
standata2 <- make_standata(y ~ x + (1|x), data = data,
control = list(not4stan = TRUE))
expect_equal(colnames(standata2$X), c("Intercept", "xb", "xc"))
})
test_that(paste("make_standata returns correct data names",
"for addition and cs variables"), {
dat <- data.frame(y = 1:10, w = 1:10, t = 1:10, x = rep(0,10),
c = sample(-1:1,10,TRUE))
expect_equal(names(make_standata(y | se(w) ~ x, dat, gaussian())),
c("N", "Y", "K", "X", "se", "prior_only"))
expect_equal(names(make_standata(y | weights(w) ~ x, dat, "gaussian")),
c("N", "Y", "K", "X", "weights", "prior_only"))
expect_equal(names(make_standata(y | cens(c) ~ x, dat, "student")),
c("N", "Y", "K", "X", "cens", "prior_only"))
expect_equal(names(make_standata(y | trials(t) ~ x, dat, "binomial")),
c("N", "Y", "K", "X", "trials", "prior_only"))
expect_equal(names(make_standata(y | trials(10) ~ x, dat, "binomial")),
c("N", "Y", "K", "X", "trials", "prior_only"))
expect_equal(names(make_standata(y | cat(11) ~ x, dat, "acat")),
c("N", "Y", "K", "X", "disc", "ncat", "prior_only"))
expect_equal(names(make_standata(y | cat(10) ~ x, dat, cumulative())),
c("N", "Y", "K", "X", "disc", "ncat", "prior_only"))
sdata <- make_standata(y | trunc(0,20) ~ x, dat, "gaussian")
expect_true(all(sdata$lb == 0) && all(sdata$ub == 20))
sdata <- make_standata(y | trunc(ub = 21:30) ~ x, dat)
expect_true(all(all(sdata$ub == 21:30)))
})
test_that(paste("make_standata accepts correct response variables",
"depending on the family"), {
expect_equal(make_standata(y ~ 1, data = data.frame(y = seq(-9.9,0,0.1)),
family = "student")$Y, as.array(seq(-9.9,0,0.1)))
expect_equal(make_standata(y ~ 1, data = data.frame(y = 1:10),
family = "binomial")$Y, as.array(1:10))
expect_equal(make_standata(y ~ 1, data = data.frame(y = 10:20),
family = "poisson")$Y, as.array(10:20))
expect_equal(make_standata(y ~ 1, data = data.frame(y = rep(-c(1:2),5)),
family = "bernoulli")$Y, as.array(rep(1:0,5)))
expect_equal(make_standata(y ~ 1, data = data.frame(y = rep(c(TRUE, FALSE),5)),
family = "bernoulli")$Y, as.array(rep(1:0,5)))
expect_equal(make_standata(y ~ 1, data = data.frame(y = rep(1:10,5)),
family = "categorical")$Y, as.array(rep(1:10,5)))
expect_equal(make_standata(y ~ 1, data = data.frame(y = rep(-4:5,5)),
family = "categorical")$Y, as.array(rep(1:10,5)))
expect_equal(make_standata(y ~ 1, data = data.frame(y = factor(rep(-4:5,5))),
family = "categorical")$Y, as.array(rep(1:10,5)))
expect_equal(make_standata(y ~ 1, data = data.frame(y = rep(1:10,5)),
family = "cumulative")$Y, as.array(rep(1:10,5)))
dat <- data.frame(y = factor(rep(-4:5,5), order = TRUE))
expect_equal(make_standata(y ~ 1, data = dat, family = "acat")$Y,
as.array(rep(1:10,5)))
expect_equal(make_standata(y ~ 1, data = data.frame(y = seq(1,10,0.1)),
family = "exponential")$Y, as.array(seq(1,10,0.1)))
dat <- data.frame(y1 = 1:10, y2 = 11:20, x = rep(0,10))
expect_equal(unname(make_standata(cbind(y1,y2) ~ x, data = dat)$Y),
cbind(1:10, 11:20))
})
test_that(paste("make_standata rejects incorrect response variables",
"depending on the family"), {
expect_error(make_standata(y ~ 1, data = data.frame(y = factor(1:10)),
family = "student"),
"Family 'student' requires numeric responses")
expect_error(make_standata(y ~ 1, data = data.frame(y = -5:5),
family = "geometric"),
"Family 'geometric' requires responses to be non-negative integers")
expect_error(make_standata(y ~ 1, data = data.frame(y = -1:1),
family = "bernoulli"),
"contain only two different values")
expect_error(make_standata(y ~ 1, data = data.frame(y = factor(-1:1)),
family = "cratio"),
"Family 'cratio' requires either integers or ordered factors")
expect_error(make_standata(y ~ 1, data = data.frame(y = rep(0.5:7.5), 2),
family = "sratio"),
"Family 'sratio' requires either integers or ordered factors")
expect_error(make_standata(y ~ 1, data = data.frame(y = rep(-7.5:7.5), 2),
family = "gamma"),
"Family 'gamma' requires responses to be positive")
expect_error(make_standata(y ~ 1, data = data.frame(y = c(0, 0.5, 1)),
family = Beta()),
"requires responses between 0 and 1")
expect_error(make_standata(y ~ 1, data = data.frame(y = c(0, 0.5, 4)),
family = von_mises()),
"requires responses between -pi and pi")
expect_error(make_standata(y ~ 1, data = data.frame(y = c(-1, 2, 5)),
family = hurdle_gamma()),
"requires responses to be non-negative")
})
test_that("make_standata suggests using family bernoulli if appropriate", {
expect_message(make_standata(y ~ 1, data = data.frame(y = rep(0:1,5)),
family = "binomial"),
paste("family 'bernoulli' might be a more efficient choice."))
expect_message(make_standata(y ~ 1, data = data.frame(y = rep(0:1,5)),
family = "acat"),
paste("family 'bernoulli' might be a more efficient choice."))
expect_error(make_standata(y ~ 1, data = data.frame(y = rep(0:1,5)),
family = "categorical"),
paste("At least 3 response categories are required"))
})
test_that("make_standata returns correct values for addition terms", {
dat <- data.frame(y = rnorm(9), s = 1:9, w = 1:9, c1 = rep(-1:1, 3),
c2 = rep(c("left","none","right"), 3),
c3 = c(rep(c(TRUE, FALSE), 4), FALSE),
c4 = c(sample(-1:1, 5, TRUE), rep(2, 4)),
t = 11:19)
expect_equivalent(make_standata(y | se(s) ~ 1, data = dat)$se,
as.array(1:9))
expect_equal(make_standata(y | weights(w) ~ 1, data = dat)$weights,
as.array(1:9))
expect_equal(make_standata(y | disp(w) ~ 1, data = dat)$disp,
as.array(1:9))
expect_equal(make_standata(y | cens(c1) ~ 1, data = dat)$cens,
as.array(rep(-1:1, 3)))
expect_equal(make_standata(y | cens(c2) ~ 1, data = dat)$cens,
as.array(rep(-1:1, 3)))
expect_equal(make_standata(y | cens(c3) ~ 1, data = dat)$cens,
as.array(c(rep(1:0, 4), 0)))
expect_equal(make_standata(y | cens(c4, y + 2) ~ 1, data = dat)$rcens,
as.array(c(rep(0, 5), dat$y[6:9] + 2)))
expect_equal(make_standata(s ~ 1, dat, family = "binomial")$trials,
as.array(rep(9, 9)))
expect_equal(make_standata(s | trials(10) ~ 1, dat,
family = "binomial")$trials,
as.array(rep(10, 9)))
expect_equal(make_standata(s | trials(t) ~ 1, data = dat,
family = "binomial")$trials,
as.array(11:19))
expect_equal(make_standata(s | cat(19) ~ 1, data = dat,
family = "cumulative")$ncat,
19)
})
test_that("make_standata rejects incorrect addition terms", {
dat <- data.frame(y = rnorm(9), s = -(1:9), w = -(1:9),
c = rep(-2:0, 3), t = 9:1, z = 1:9)
expect_error(make_standata(y | se(s) ~ 1, data = dat),
"Standard errors must be non-negative")
expect_error(make_standata(y | weights(w) ~ 1, data = dat),
"Weights must be non-negative")
expect_error(make_standata(y | cens(c) ~ 1, data = dat))
expect_error(make_standata(z | trials(t) ~ 1, data = dat,
family = "binomial"),
"Number of trials is smaller than the response variable")
})
test_that("make_standata handles multivariate models", {
dat <- data.frame(y1 = 1:10, y2 = 11:20, w = 1:10,
x = rep(0,10), tim = 10:1, g = rep(1:2,5))
sdata <- make_standata(cbind(y1, y2) | weights(w) ~ x, data = dat)
expect_equal(colnames(sdata$Y), c("y1", "y2"))
expect_equal(sdata$weights, as.array(1:10))
sdata <- make_standata(cbind(y1, y2, y2) ~ x, data = dat)
expect_equal(colnames(sdata$Y), c("y1", "y2", "y21"))
sdata <- make_standata(cbind(y1 / y2, y2, y1 * 3) ~ x, data = dat)
expect_equal(colnames(sdata$Y), c("response1", "y2", "response3"))
sdata <- make_standata(cbind(y1, y2) ~ x, dat,
autocor = cor_ar(~ tim | g))
target <- cbind(c(seq(9, 1, -2), seq(10, 2, -2)),
c(seq(19, 11, -2), seq(20, 12, -2)))
expect_equal(unname(sdata$Y), target)
})
test_that(paste("make_standata returns correct data",
"for autocorrelations structures"), {
dat <- data.frame(y=1:10, x=rep(0,10), tim=10:1, g = rep(3:4,5))
expect_equal(make_standata(y ~ x, data = dat,
autocor = cor_arr(~tim|g))$Yarr,
cbind(c(0,9,7,5,3,0,10,8,6,4)))
expect_equal(make_standata(y ~ x, data = dat,
autocor = cor_arr(~tim|g, r = 2))$Yarr,
cbind(c(0,9,7,5,3,0,10,8,6,4), c(0,0,9,7,5,0,0,10,8,6)))
expect_equal(make_standata(y ~ x, data = dat,
autocor = cor_ma(~tim|g))$tg,
c(rep(1,5), rep(2,5)))
expect_equal(make_standata(y ~ x, data = dat,
autocor = cor_ar(~tim|g))$tg,
c(rep(1,5), rep(2,5)))
standata <- make_standata(y ~ x, data = dat,
autocor = cor_ar(~tim|g, cov = TRUE))
expect_equal(standata$begin_tg, as.array(c(1, 6)))
expect_equal(standata$nobs_tg, as.array(c(5, 5)))
})
test_that("make_standata allows to retrieve the initial data order", {
dat <- data.frame(y1 = rnorm(100), y2 = rnorm(100),
id = sample(1:10, 100, TRUE),
time = sample(1:100, 100))
# univariate model
sdata1 <- make_standata(y1 ~ 1, data = dat,
autocor = cor_ar(~time|id),
control = list(save_order = TRUE))
expect_equal(dat$y1, as.numeric(sdata1$Y[attr(sdata1, "old_order")]))
# multivariate model
sdata2 <- make_standata(cbind(y1, y2) ~ 1, data = dat,
autocor = cor_ma(~time|id),
control = list(save_order = TRUE))
expect_equal(c(dat$y1, dat$y2),
as.numeric(sdata2$Y[attr(sdata2, "old_order"), ]))
})
test_that("make_standata handles covariance matrices correctly", {
A <- structure(diag(1, 4), dimnames = list(1:4, NULL))
expect_equivalent(make_standata(count ~ Trt_c + (1|visit), data = epilepsy,
cov_ranef = list(visit = A))$Lcov_1, A)
B <- diag(1, 4)
expect_error(make_standata(count ~ Trt_c + (1|visit), data = epilepsy,
cov_ranef = list(visit = B)),
"Row names are required")
B <- structure(diag(1, 4), dimnames = list(2:5, NULL))
expect_error(make_standata(count ~ Trt_c + (1|visit), data = epilepsy,
cov_ranef = list(visit = B)),
"Row names .* do not match")
B <- structure(diag(1:5), dimnames = list(c(1,5,2,4,3), NULL))
expect_equivalent(make_standata(count ~ Trt_c + (1|visit), data = epilepsy,
cov_ranef = list(visit = B))$Lcov_1,
t(chol(B[c(1,3,5,4), c(1,3,5,4)])))
B <- A
B[1,2] <- 0.5
expect_error(make_standata(count ~ Trt_c + (1|visit), data = epilepsy,
cov_ranef = list(visit = B)),
"not symmetric")
})
test_that("(deprecated) brmdata is backwards compatible", {
dat <- data.frame(y = 1:10, x = sample(1:5, 10, TRUE))
expect_identical(SW(brmdata(y ~ x + (1|x), data = dat,
family = "poisson")),
make_standata(y ~ x + (1|x), data = dat,
family = "poisson"))
expect_identical(SW(brmdata(y ~ 1, data = dat,
family = "acat", partial = ~ x)),
SW(make_standata(y ~ 1, data = dat,
family = "acat", partial = ~ x)))
})
test_that("make_standata correctly prepares data for non-linear models", {
flist <- list(a ~ x + (1|1|g), b ~ mono(z) + (1|1|g))
data <- data.frame(y = rnorm(9), x = rnorm(9), z = sample(1:9, 9),
g = rep(1:3, 3))
sdata <- make_standata(bf(y ~ a - b^z, flist = flist, nl = TRUE),
data = data)
expect_equal(names(sdata),
c("N", "Y", "C_1", "K_a", "X_a", "Z_1_a_1",
"K_b", "X_b", "Kmo_b", "Xmo_b", "Jmo_b",
"con_simplex_b_1", "Z_1_b_2", "J_1", "N_1",
"M_1", "NC_1", "prior_only")
)
expect_equal(colnames(sdata$X_a), c("Intercept", "x"))
expect_equal(sdata$J_1, as.array(data$g))
sdata <- make_standata(bf(y ~ a - b^z, flist = flist, nl = TRUE),
data = data, control = list(not4stan = TRUE))
expect_equal(colnames(sdata$C), "z")
})
test_that("make_standata correctly prepares data for monotonic effects", {
data <- data.frame(y = rpois(120, 10), x1 = rep(1:4, 30),
x2 = factor(rep(c("a", "b", "c"), 40), ordered = TRUE))
sdata <- make_standata(y ~ mono(x1 + x2), data = data)
expect_true(all(c("Xmo", "Jmo", "con_simplex_1", "con_simplex_2") %in% names(sdata)))
expect_equivalent(sdata$Xmo, cbind(data$x1 - 1, as.numeric(data$x2) - 1))
expect_equal(as.vector(unname(sdata$Jmo)),
c(max(data$x1) - 1, length(unique(data$x2)) - 1))
expect_equal(sdata$con_simplex_1, rep(1, 3))
prior <- set_prior("dirichlet(1:3)", coef = "x1",
class = "simplex", nlpar = "sigma")
sdata <- make_standata(bf(y ~ 1, sigma ~ mono(x1)),
data = data, prior = prior)
expect_equal(sdata$con_simplex_sigma_1, 1:3)
prior <- c(set_prior("normal(0,1)", class = "b", coef = "x"),
set_prior("dirichlet(c(1,0.5,2))", class = "simplex", coef = "x1"))
sdata <- make_standata(y ~ monotonic(x1 + x2), data = data, prior = prior)
expect_equal(sdata$con_simplex_1, c(1,0.5,2))
prior <- c(set_prior("dirichlet(c(1,0.5,2))", class = "simplex", coef = "x2"))
expect_error(make_standata(y ~ monotonic(x1 + x2), data = data, prior = prior),
"Invalid Dirichlet prior for the simplex of coefficient 'x2'",
fixed = TRUE)
})
test_that("make_standata returns fixed residual covariance matrices", {
data <- data.frame(y = 1:5)
V <- diag(5)
expect_equal(make_standata(y~1, data, autocor = SW(cor_fixed(V)))$V, V)
expect_error(make_standata(y~1, data, autocor = cor_fixed(diag(2))),
"'V' must have the same number of rows as 'data'")
})
test_that("make_standata returns data for bsts models", {
dat <- data.frame(y = 1:5, g = c(1:3, sample(1:3, 2, TRUE)), t = 1:5)
expect_equal(make_standata(y~1, data = dat, autocor = cor_bsts(~t|g))$tg,
as.array(sort(dat$g)))
expect_equivalent(make_standata(bf(y~1, sigma ~ 1), data = dat,
autocor = cor_bsts(~t|g))$X_sigma[, 1],
rep(1, nrow(dat)))
})
test_that("make_standata returns data for GAMMs", {
dat <- data.frame(y = rnorm(10), x1 = rnorm(10), x2 = rnorm(10),
x3 = rnorm(10), z = rnorm(10), g = factor(rep(1:2, 5)))
sdata <- make_standata(y ~ s(x1) + z + s(x2, by = x3), data = dat)
expect_equal(sdata$nb_1, 1)
expect_equal(as.vector(sdata$knots_2), 8)
expect_equal(dim(sdata$Zs_1_1), c(10, 8))
expect_equal(dim(sdata$Zs_2_1), c(10, 8))
sdata <- make_standata(bf(y ~ lp, lp ~ s(x1) + z + s(x2, by = x3),
nl = TRUE), data = dat)
expect_equal(sdata$nb_lp_1, 1)
expect_equal(as.vector(sdata$knots_lp_2), 8)
expect_equal(dim(sdata$Zs_lp_1_1), c(10, 8))
expect_equal(dim(sdata$Zs_lp_2_1), c(10, 8))
sdata <- make_standata(y ~ g + s(x2, by = g), data = dat)
expect_true(all(c("knots_1", "knots_2") %in% names(sdata)))
sdata <- make_standata(y ~ t2(x1, x2), data = dat)
expect_equal(sdata$nb_1, 3)
expect_equal(as.vector(sdata$knots_1), c(9, 6, 6))
expect_equal(dim(sdata$Zs_1_1), c(10, 9))
expect_equal(dim(sdata$Zs_1_3), c(10, 6))
expect_error(make_standata(y ~ te(x1, x2), data = dat),
"smooths 'te' and 'ti' are not yet implemented")
})
test_that("make_standata returns correct group ID data", {
form <- bf(count ~ Trt_c + (1+Trt_c|3|visit) + (1|patient),
shape ~ (1|3|visit) + (Trt_c||patient))
sdata <- make_standata(form, data = epilepsy, family = negbinomial())
expect_true(all(c("Z_1_1", "Z_2_2", "Z_3_shape_1", "Z_2_shape_3") %in%
names(sdata)))
form <- bf(count ~ a, sigma ~ (1|3|visit) + (Trt_c||patient),
a ~ Trt_c + (1+Trt_c|3|visit) + (1|patient), nl = TRUE)
sdata <- make_standata(form, data = epilepsy, family = student())
expect_true(all(c("Z_3_sigma_1", "Z_2_a_1", "Z_2_sigma_3",
"Z_1_a_1") %in% names(sdata)))
})
test_that("make_standata handles population-level intercepts", {
dat <- data.frame(y = 10:1, x = 1:10)
sdata <- make_standata(y ~ 0 + x, data = dat)
expect_equal(unname(sdata$X[, 1]), dat$x)
sdata <- make_standata(y ~ x, dat, cumulative(),
control = list(not4stan = TRUE))
expect_equal(unname(sdata$X[, 1]), dat$x)
sdata <- make_standata(y ~ 0 + intercept + x, data = dat)
expect_equal(unname(sdata$X), cbind(1, dat$x))
})
test_that("make_standata handles category specific effects", {
sdata <- make_standata(rating ~ period + carry + cse(treat),
data = inhaler, family = sratio())
expect_equivalent(sdata$Xcs, matrix(inhaler$treat))
sdata <- make_standata(rating ~ period + carry + cse(treat) + (cse(1)|subject),
data = inhaler, family = acat())
expect_equivalent(sdata$Z_1_3, as.array(rep(1, nrow(inhaler))))
sdata <- make_standata(rating ~ period + carry + (cse(treat)|subject),
data = inhaler, family = cratio())
expect_equivalent(sdata$Z_1_4, as.array(inhaler$treat))
expect_error(make_standata(rating ~ 1 + cse(treat), data = inhaler,
family = "cumulative"), "only meaningful")
expect_error(make_standata(rating ~ 1 + (treat + cse(1)|subject),
data = inhaler, family = "cratio"),
"category specific effects in separate group-level terms")
})
test_that("make_standata handles wiener diffusion models", {
dat <- RWiener::rwiener(n=100, alpha=2, tau=.3, beta=.5, delta=.5)
dat$x <- rnorm(100)
dat$dec <- ifelse(dat$resp == "lower", 0, 1)
dat$test <- "a"
sdata <- make_standata(q | dec(resp) ~ x, data = dat, family = wiener())
expect_equal(sdata$dec, as.array(dat$dec))
sdata <- make_standata(q | dec(dec) ~ x, data = dat, family = wiener())
expect_equal(sdata$dec, as.array(dat$dec))
expect_error(make_standata(q | dec(test) ~ x, data = dat, family = wiener()),
"Decisions should be 'lower' or 'upper'")
})
test_that("make_standata handles noise-free terms", {
N <- 30
dat <- data.frame(y = rnorm(N), x = rnorm(N), z = rnorm(N),
xsd = abs(rnorm(N, 1)), zsd = abs(rnorm(N, 1)),
ID = rep(1:5, each = N / 5))
sdata <- make_standata(y ~ me(x, xsd)*me(z, zsd)*x, data = dat)
expect_equal(sdata$Xn_1, as.array(dat$x))
expect_equal(sdata$noise_2, as.array(dat$zsd))
expect_equal(unname(sdata$Cme_3), dat$x)
expect_equal(sdata$Kme, 6)
})
test_that("make_standata handles multi-membership models", {
dat <- data.frame(y = rnorm(10), g1 = c(7:2, rep(10, 4)),
g2 = 1:10, w1 = rep(1, 10),
w2 = rep(abs(rnorm(10))))
sdata <- make_standata(y ~ (1|mm(g1,g2,g1,g2)), data = dat)
expect_true(all(paste0(c("W_1_", "J_1_"), 1:4) %in% names(sdata)))
expect_equal(sdata$W_1_4, rep(0.25, 10))
# this checks whether combintation of factor levels works as intended
expect_equal(sdata$J_1_1, as.array(c(6, 5, 4, 3, 2, 1, 7, 7, 7, 7)))
expect_equal(sdata$J_1_2, as.array(c(8, 1, 2, 3, 4, 5, 6, 9, 10, 7)))
})
test_that("make_standata handles calls to the 'poly' function", {
dat <- data.frame(y = rnorm(10), x = rnorm(10))
expect_equal(colnames(make_standata(y ~ 1 + poly(x, 3), dat)$X),
c("Intercept", "polyx31", "polyx32", "polyx33"))
})
test_that("make_standata allows fixed auxiliary parameters", {
dat <- list(y = 1:10)
expect_equal(make_standata(bf(y ~ 1, nu = 3), dat, student())$nu, 3)
expect_equal(make_standata(y ~ 1, dat, acat())$disc, 1)
expect_error(make_standata(bf(y ~ 1, bias = 0.5), dat),
"Invalid auxiliary parameters: 'bias'")
})
test_that("make_standata correctly includes offsets", {
data <- data.frame(y = rnorm(10), x = rnorm(10), c = 1)
sdata <- make_standata(bf(y ~ x + offset(c), sigma ~ offset(c + 1)), data)
expect_equal(sdata$offset, data$c)
expect_equal(sdata$offset_sigma, data$c + 1)
sdata <- make_standata(y ~ x + offset(c) + offset(x), data)
expect_equal(sdata$offset, data$c + data$x)
})
test_that("make_standata includes data for mixture models", {
data <- data.frame(y = rnorm(10), x = rnorm(10), c = 1)
form <- bf(y ~ x, mu1 ~ 1, family = mixture(gaussian, gaussian))
sdata <- make_standata(form, data)
expect_equal(sdata$con_theta, c(1, 1))
expect_equal(dim(sdata$X_mu1), c(10, 1))
expect_equal(dim(sdata$X_mu2), c(10, 2))
form <- bf(y ~ x, family = mixture(gaussian, gaussian))
sdata <- make_standata(form, data, prior = prior(dirichlet(10, 2), theta))
expect_equal(sdata$con_theta, c(10, 2))
form <- bf(y ~ x, theta1 = 1, theta2 = 3, family = mixture(gaussian, gaussian))
sdata <- make_standata(form, data)
expect_equal(sdata$theta1, 1/4)
expect_equal(sdata$theta2, 3/4)
})
test_that("make_standata includes data for Gaussian processes", {
dat <- data.frame(y = rnorm(10), x1 = sample(1:10, 10))
sdata <- make_standata(y ~ gp(x1), dat)
expect_equal(max(sdata$Xgp_1) - min(sdata$Xgp_1), 1)
sdata <- make_standata(y ~ gp(x1, scale = FALSE), dat)
expect_equal(max(sdata$Xgp_1) - min(sdata$Xgp_1), 9)
})
| /tests/testthat/tests.make_standata.R | no_license | aforren1/brms | R | false | false | 24,980 | r | test_that(paste("make_standata returns correct data names ",
"for fixed and random effects"), {
expect_equal(names(make_standata(rating ~ treat + period + carry
+ (1|subject), data = inhaler)),
c("N", "Y", "K", "X", "Z_1_1",
"J_1", "N_1", "M_1", "NC_1", "prior_only"))
expect_equal(names(make_standata(rating ~ treat + period + carry
+ (1+treat|id|subject), data = inhaler,
family = "categorical")),
c("N", "Y", "K_X2", "X_X2", "Z_1_X2_1", "Z_1_X2_2",
"K_X3", "X_X3", "Z_1_X3_3", "Z_1_X3_4",
"K_X4", "X_X4", "Z_1_X4_5", "Z_1_X4_6",
"J_1", "N_1", "M_1", "NC_1", "ncat",
"prior_only"))
expect_equal(names(make_standata(rating ~ treat + period + carry
+ (1+treat|subject), data = inhaler,
control = list(not4stan = TRUE))),
c("N", "Y", "K", "X", "Z_1", "J_1", "N_1", "M_1",
"NC_1", "prior_only"))
dat <- data.frame(y = 1:10, g = 1:10, h = 11:10, x = rep(0,10))
expect_equal(names(make_standata(y ~ x + (1|g) + (1|h), family = "poisson",
data = dat)),
c("N", "Y", "K", "X", "Z_1_1", "Z_2_1",
"J_1", "N_1", "M_1", "NC_1", "J_2", "N_2", "M_2", "NC_2",
"prior_only"))
expect_true(all(c("Z_1_1", "Z_1_2", "Z_2_1", "Z_2_2") %in%
names(make_standata(y ~ x + (1+x|g/h), dat))))
expect_equal(make_standata(y ~ x + (1+x|g+h), dat),
make_standata(y ~ x + (1+x|g) + (1+x|h), dat))
})
test_that(paste("make_standata handles variables used as fixed effects",
"and grouping factors at the same time"), {
data <- data.frame(y = 1:9, x = factor(rep(c("a","b","c"), 3)))
standata <- make_standata(y ~ x + (1|x), data = data)
expect_equal(colnames(standata$X), c("Intercept", "xb", "xc"))
expect_equal(standata$J_1, as.array(rep(1:3, 3)))
standata2 <- make_standata(y ~ x + (1|x), data = data,
control = list(not4stan = TRUE))
expect_equal(colnames(standata2$X), c("Intercept", "xb", "xc"))
})
test_that(paste("make_standata returns correct data names",
"for addition and cs variables"), {
dat <- data.frame(y = 1:10, w = 1:10, t = 1:10, x = rep(0,10),
c = sample(-1:1,10,TRUE))
expect_equal(names(make_standata(y | se(w) ~ x, dat, gaussian())),
c("N", "Y", "K", "X", "se", "prior_only"))
expect_equal(names(make_standata(y | weights(w) ~ x, dat, "gaussian")),
c("N", "Y", "K", "X", "weights", "prior_only"))
expect_equal(names(make_standata(y | cens(c) ~ x, dat, "student")),
c("N", "Y", "K", "X", "cens", "prior_only"))
expect_equal(names(make_standata(y | trials(t) ~ x, dat, "binomial")),
c("N", "Y", "K", "X", "trials", "prior_only"))
expect_equal(names(make_standata(y | trials(10) ~ x, dat, "binomial")),
c("N", "Y", "K", "X", "trials", "prior_only"))
expect_equal(names(make_standata(y | cat(11) ~ x, dat, "acat")),
c("N", "Y", "K", "X", "disc", "ncat", "prior_only"))
expect_equal(names(make_standata(y | cat(10) ~ x, dat, cumulative())),
c("N", "Y", "K", "X", "disc", "ncat", "prior_only"))
sdata <- make_standata(y | trunc(0,20) ~ x, dat, "gaussian")
expect_true(all(sdata$lb == 0) && all(sdata$ub == 20))
sdata <- make_standata(y | trunc(ub = 21:30) ~ x, dat)
expect_true(all(all(sdata$ub == 21:30)))
})
test_that(paste("make_standata accepts correct response variables",
"depending on the family"), {
expect_equal(make_standata(y ~ 1, data = data.frame(y = seq(-9.9,0,0.1)),
family = "student")$Y, as.array(seq(-9.9,0,0.1)))
expect_equal(make_standata(y ~ 1, data = data.frame(y = 1:10),
family = "binomial")$Y, as.array(1:10))
expect_equal(make_standata(y ~ 1, data = data.frame(y = 10:20),
family = "poisson")$Y, as.array(10:20))
expect_equal(make_standata(y ~ 1, data = data.frame(y = rep(-c(1:2),5)),
family = "bernoulli")$Y, as.array(rep(1:0,5)))
expect_equal(make_standata(y ~ 1, data = data.frame(y = rep(c(TRUE, FALSE),5)),
family = "bernoulli")$Y, as.array(rep(1:0,5)))
expect_equal(make_standata(y ~ 1, data = data.frame(y = rep(1:10,5)),
family = "categorical")$Y, as.array(rep(1:10,5)))
expect_equal(make_standata(y ~ 1, data = data.frame(y = rep(-4:5,5)),
family = "categorical")$Y, as.array(rep(1:10,5)))
expect_equal(make_standata(y ~ 1, data = data.frame(y = factor(rep(-4:5,5))),
family = "categorical")$Y, as.array(rep(1:10,5)))
expect_equal(make_standata(y ~ 1, data = data.frame(y = rep(1:10,5)),
family = "cumulative")$Y, as.array(rep(1:10,5)))
dat <- data.frame(y = factor(rep(-4:5,5), order = TRUE))
expect_equal(make_standata(y ~ 1, data = dat, family = "acat")$Y,
as.array(rep(1:10,5)))
expect_equal(make_standata(y ~ 1, data = data.frame(y = seq(1,10,0.1)),
family = "exponential")$Y, as.array(seq(1,10,0.1)))
dat <- data.frame(y1 = 1:10, y2 = 11:20, x = rep(0,10))
expect_equal(unname(make_standata(cbind(y1,y2) ~ x, data = dat)$Y),
cbind(1:10, 11:20))
})
test_that(paste("make_standata rejects incorrect response variables",
"depending on the family"), {
expect_error(make_standata(y ~ 1, data = data.frame(y = factor(1:10)),
family = "student"),
"Family 'student' requires numeric responses")
expect_error(make_standata(y ~ 1, data = data.frame(y = -5:5),
family = "geometric"),
"Family 'geometric' requires responses to be non-negative integers")
expect_error(make_standata(y ~ 1, data = data.frame(y = -1:1),
family = "bernoulli"),
"contain only two different values")
expect_error(make_standata(y ~ 1, data = data.frame(y = factor(-1:1)),
family = "cratio"),
"Family 'cratio' requires either integers or ordered factors")
expect_error(make_standata(y ~ 1, data = data.frame(y = rep(0.5:7.5), 2),
family = "sratio"),
"Family 'sratio' requires either integers or ordered factors")
expect_error(make_standata(y ~ 1, data = data.frame(y = rep(-7.5:7.5), 2),
family = "gamma"),
"Family 'gamma' requires responses to be positive")
expect_error(make_standata(y ~ 1, data = data.frame(y = c(0, 0.5, 1)),
family = Beta()),
"requires responses between 0 and 1")
expect_error(make_standata(y ~ 1, data = data.frame(y = c(0, 0.5, 4)),
family = von_mises()),
"requires responses between -pi and pi")
expect_error(make_standata(y ~ 1, data = data.frame(y = c(-1, 2, 5)),
family = hurdle_gamma()),
"requires responses to be non-negative")
})
test_that("make_standata suggests using family bernoulli if appropriate", {
expect_message(make_standata(y ~ 1, data = data.frame(y = rep(0:1,5)),
family = "binomial"),
paste("family 'bernoulli' might be a more efficient choice."))
expect_message(make_standata(y ~ 1, data = data.frame(y = rep(0:1,5)),
family = "acat"),
paste("family 'bernoulli' might be a more efficient choice."))
expect_error(make_standata(y ~ 1, data = data.frame(y = rep(0:1,5)),
family = "categorical"),
paste("At least 3 response categories are required"))
})
test_that("make_standata returns correct values for addition terms", {
dat <- data.frame(y = rnorm(9), s = 1:9, w = 1:9, c1 = rep(-1:1, 3),
c2 = rep(c("left","none","right"), 3),
c3 = c(rep(c(TRUE, FALSE), 4), FALSE),
c4 = c(sample(-1:1, 5, TRUE), rep(2, 4)),
t = 11:19)
expect_equivalent(make_standata(y | se(s) ~ 1, data = dat)$se,
as.array(1:9))
expect_equal(make_standata(y | weights(w) ~ 1, data = dat)$weights,
as.array(1:9))
expect_equal(make_standata(y | disp(w) ~ 1, data = dat)$disp,
as.array(1:9))
expect_equal(make_standata(y | cens(c1) ~ 1, data = dat)$cens,
as.array(rep(-1:1, 3)))
expect_equal(make_standata(y | cens(c2) ~ 1, data = dat)$cens,
as.array(rep(-1:1, 3)))
expect_equal(make_standata(y | cens(c3) ~ 1, data = dat)$cens,
as.array(c(rep(1:0, 4), 0)))
expect_equal(make_standata(y | cens(c4, y + 2) ~ 1, data = dat)$rcens,
as.array(c(rep(0, 5), dat$y[6:9] + 2)))
expect_equal(make_standata(s ~ 1, dat, family = "binomial")$trials,
as.array(rep(9, 9)))
expect_equal(make_standata(s | trials(10) ~ 1, dat,
family = "binomial")$trials,
as.array(rep(10, 9)))
expect_equal(make_standata(s | trials(t) ~ 1, data = dat,
family = "binomial")$trials,
as.array(11:19))
expect_equal(make_standata(s | cat(19) ~ 1, data = dat,
family = "cumulative")$ncat,
19)
})
test_that("make_standata rejects incorrect addition terms", {
dat <- data.frame(y = rnorm(9), s = -(1:9), w = -(1:9),
c = rep(-2:0, 3), t = 9:1, z = 1:9)
expect_error(make_standata(y | se(s) ~ 1, data = dat),
"Standard errors must be non-negative")
expect_error(make_standata(y | weights(w) ~ 1, data = dat),
"Weights must be non-negative")
expect_error(make_standata(y | cens(c) ~ 1, data = dat))
expect_error(make_standata(z | trials(t) ~ 1, data = dat,
family = "binomial"),
"Number of trials is smaller than the response variable")
})
test_that("make_standata handles multivariate models", {
dat <- data.frame(y1 = 1:10, y2 = 11:20, w = 1:10,
x = rep(0,10), tim = 10:1, g = rep(1:2,5))
sdata <- make_standata(cbind(y1, y2) | weights(w) ~ x, data = dat)
expect_equal(colnames(sdata$Y), c("y1", "y2"))
expect_equal(sdata$weights, as.array(1:10))
sdata <- make_standata(cbind(y1, y2, y2) ~ x, data = dat)
expect_equal(colnames(sdata$Y), c("y1", "y2", "y21"))
sdata <- make_standata(cbind(y1 / y2, y2, y1 * 3) ~ x, data = dat)
expect_equal(colnames(sdata$Y), c("response1", "y2", "response3"))
sdata <- make_standata(cbind(y1, y2) ~ x, dat,
autocor = cor_ar(~ tim | g))
target <- cbind(c(seq(9, 1, -2), seq(10, 2, -2)),
c(seq(19, 11, -2), seq(20, 12, -2)))
expect_equal(unname(sdata$Y), target)
})
test_that(paste("make_standata returns correct data",
"for autocorrelations structures"), {
dat <- data.frame(y=1:10, x=rep(0,10), tim=10:1, g = rep(3:4,5))
expect_equal(make_standata(y ~ x, data = dat,
autocor = cor_arr(~tim|g))$Yarr,
cbind(c(0,9,7,5,3,0,10,8,6,4)))
expect_equal(make_standata(y ~ x, data = dat,
autocor = cor_arr(~tim|g, r = 2))$Yarr,
cbind(c(0,9,7,5,3,0,10,8,6,4), c(0,0,9,7,5,0,0,10,8,6)))
expect_equal(make_standata(y ~ x, data = dat,
autocor = cor_ma(~tim|g))$tg,
c(rep(1,5), rep(2,5)))
expect_equal(make_standata(y ~ x, data = dat,
autocor = cor_ar(~tim|g))$tg,
c(rep(1,5), rep(2,5)))
standata <- make_standata(y ~ x, data = dat,
autocor = cor_ar(~tim|g, cov = TRUE))
expect_equal(standata$begin_tg, as.array(c(1, 6)))
expect_equal(standata$nobs_tg, as.array(c(5, 5)))
})
test_that("make_standata allows to retrieve the initial data order", {
dat <- data.frame(y1 = rnorm(100), y2 = rnorm(100),
id = sample(1:10, 100, TRUE),
time = sample(1:100, 100))
# univariate model
sdata1 <- make_standata(y1 ~ 1, data = dat,
autocor = cor_ar(~time|id),
control = list(save_order = TRUE))
expect_equal(dat$y1, as.numeric(sdata1$Y[attr(sdata1, "old_order")]))
# multivariate model
sdata2 <- make_standata(cbind(y1, y2) ~ 1, data = dat,
autocor = cor_ma(~time|id),
control = list(save_order = TRUE))
expect_equal(c(dat$y1, dat$y2),
as.numeric(sdata2$Y[attr(sdata2, "old_order"), ]))
})
test_that("make_standata handles covariance matrices correctly", {
A <- structure(diag(1, 4), dimnames = list(1:4, NULL))
expect_equivalent(make_standata(count ~ Trt_c + (1|visit), data = epilepsy,
cov_ranef = list(visit = A))$Lcov_1, A)
B <- diag(1, 4)
expect_error(make_standata(count ~ Trt_c + (1|visit), data = epilepsy,
cov_ranef = list(visit = B)),
"Row names are required")
B <- structure(diag(1, 4), dimnames = list(2:5, NULL))
expect_error(make_standata(count ~ Trt_c + (1|visit), data = epilepsy,
cov_ranef = list(visit = B)),
"Row names .* do not match")
B <- structure(diag(1:5), dimnames = list(c(1,5,2,4,3), NULL))
expect_equivalent(make_standata(count ~ Trt_c + (1|visit), data = epilepsy,
cov_ranef = list(visit = B))$Lcov_1,
t(chol(B[c(1,3,5,4), c(1,3,5,4)])))
B <- A
B[1,2] <- 0.5
expect_error(make_standata(count ~ Trt_c + (1|visit), data = epilepsy,
cov_ranef = list(visit = B)),
"not symmetric")
})
test_that("(deprecated) brmdata is backwards compatible", {
dat <- data.frame(y = 1:10, x = sample(1:5, 10, TRUE))
expect_identical(SW(brmdata(y ~ x + (1|x), data = dat,
family = "poisson")),
make_standata(y ~ x + (1|x), data = dat,
family = "poisson"))
expect_identical(SW(brmdata(y ~ 1, data = dat,
family = "acat", partial = ~ x)),
SW(make_standata(y ~ 1, data = dat,
family = "acat", partial = ~ x)))
})
test_that("make_standata correctly prepares data for non-linear models", {
flist <- list(a ~ x + (1|1|g), b ~ mono(z) + (1|1|g))
data <- data.frame(y = rnorm(9), x = rnorm(9), z = sample(1:9, 9),
g = rep(1:3, 3))
sdata <- make_standata(bf(y ~ a - b^z, flist = flist, nl = TRUE),
data = data)
expect_equal(names(sdata),
c("N", "Y", "C_1", "K_a", "X_a", "Z_1_a_1",
"K_b", "X_b", "Kmo_b", "Xmo_b", "Jmo_b",
"con_simplex_b_1", "Z_1_b_2", "J_1", "N_1",
"M_1", "NC_1", "prior_only")
)
expect_equal(colnames(sdata$X_a), c("Intercept", "x"))
expect_equal(sdata$J_1, as.array(data$g))
sdata <- make_standata(bf(y ~ a - b^z, flist = flist, nl = TRUE),
data = data, control = list(not4stan = TRUE))
expect_equal(colnames(sdata$C), "z")
})
test_that("make_standata correctly prepares data for monotonic effects", {
data <- data.frame(y = rpois(120, 10), x1 = rep(1:4, 30),
x2 = factor(rep(c("a", "b", "c"), 40), ordered = TRUE))
sdata <- make_standata(y ~ mono(x1 + x2), data = data)
expect_true(all(c("Xmo", "Jmo", "con_simplex_1", "con_simplex_2") %in% names(sdata)))
expect_equivalent(sdata$Xmo, cbind(data$x1 - 1, as.numeric(data$x2) - 1))
expect_equal(as.vector(unname(sdata$Jmo)),
c(max(data$x1) - 1, length(unique(data$x2)) - 1))
expect_equal(sdata$con_simplex_1, rep(1, 3))
prior <- set_prior("dirichlet(1:3)", coef = "x1",
class = "simplex", nlpar = "sigma")
sdata <- make_standata(bf(y ~ 1, sigma ~ mono(x1)),
data = data, prior = prior)
expect_equal(sdata$con_simplex_sigma_1, 1:3)
prior <- c(set_prior("normal(0,1)", class = "b", coef = "x"),
set_prior("dirichlet(c(1,0.5,2))", class = "simplex", coef = "x1"))
sdata <- make_standata(y ~ monotonic(x1 + x2), data = data, prior = prior)
expect_equal(sdata$con_simplex_1, c(1,0.5,2))
prior <- c(set_prior("dirichlet(c(1,0.5,2))", class = "simplex", coef = "x2"))
expect_error(make_standata(y ~ monotonic(x1 + x2), data = data, prior = prior),
"Invalid Dirichlet prior for the simplex of coefficient 'x2'",
fixed = TRUE)
})
test_that("make_standata returns fixed residual covariance matrices", {
data <- data.frame(y = 1:5)
V <- diag(5)
expect_equal(make_standata(y~1, data, autocor = SW(cor_fixed(V)))$V, V)
expect_error(make_standata(y~1, data, autocor = cor_fixed(diag(2))),
"'V' must have the same number of rows as 'data'")
})
test_that("make_standata returns data for bsts models", {
dat <- data.frame(y = 1:5, g = c(1:3, sample(1:3, 2, TRUE)), t = 1:5)
expect_equal(make_standata(y~1, data = dat, autocor = cor_bsts(~t|g))$tg,
as.array(sort(dat$g)))
expect_equivalent(make_standata(bf(y~1, sigma ~ 1), data = dat,
autocor = cor_bsts(~t|g))$X_sigma[, 1],
rep(1, nrow(dat)))
})
test_that("make_standata returns data for GAMMs", {
dat <- data.frame(y = rnorm(10), x1 = rnorm(10), x2 = rnorm(10),
x3 = rnorm(10), z = rnorm(10), g = factor(rep(1:2, 5)))
sdata <- make_standata(y ~ s(x1) + z + s(x2, by = x3), data = dat)
expect_equal(sdata$nb_1, 1)
expect_equal(as.vector(sdata$knots_2), 8)
expect_equal(dim(sdata$Zs_1_1), c(10, 8))
expect_equal(dim(sdata$Zs_2_1), c(10, 8))
sdata <- make_standata(bf(y ~ lp, lp ~ s(x1) + z + s(x2, by = x3),
nl = TRUE), data = dat)
expect_equal(sdata$nb_lp_1, 1)
expect_equal(as.vector(sdata$knots_lp_2), 8)
expect_equal(dim(sdata$Zs_lp_1_1), c(10, 8))
expect_equal(dim(sdata$Zs_lp_2_1), c(10, 8))
sdata <- make_standata(y ~ g + s(x2, by = g), data = dat)
expect_true(all(c("knots_1", "knots_2") %in% names(sdata)))
sdata <- make_standata(y ~ t2(x1, x2), data = dat)
expect_equal(sdata$nb_1, 3)
expect_equal(as.vector(sdata$knots_1), c(9, 6, 6))
expect_equal(dim(sdata$Zs_1_1), c(10, 9))
expect_equal(dim(sdata$Zs_1_3), c(10, 6))
expect_error(make_standata(y ~ te(x1, x2), data = dat),
"smooths 'te' and 'ti' are not yet implemented")
})
test_that("make_standata returns correct group ID data", {
form <- bf(count ~ Trt_c + (1+Trt_c|3|visit) + (1|patient),
shape ~ (1|3|visit) + (Trt_c||patient))
sdata <- make_standata(form, data = epilepsy, family = negbinomial())
expect_true(all(c("Z_1_1", "Z_2_2", "Z_3_shape_1", "Z_2_shape_3") %in%
names(sdata)))
form <- bf(count ~ a, sigma ~ (1|3|visit) + (Trt_c||patient),
a ~ Trt_c + (1+Trt_c|3|visit) + (1|patient), nl = TRUE)
sdata <- make_standata(form, data = epilepsy, family = student())
expect_true(all(c("Z_3_sigma_1", "Z_2_a_1", "Z_2_sigma_3",
"Z_1_a_1") %in% names(sdata)))
})
test_that("make_standata handles population-level intercepts", {
dat <- data.frame(y = 10:1, x = 1:10)
sdata <- make_standata(y ~ 0 + x, data = dat)
expect_equal(unname(sdata$X[, 1]), dat$x)
sdata <- make_standata(y ~ x, dat, cumulative(),
control = list(not4stan = TRUE))
expect_equal(unname(sdata$X[, 1]), dat$x)
sdata <- make_standata(y ~ 0 + intercept + x, data = dat)
expect_equal(unname(sdata$X), cbind(1, dat$x))
})
test_that("make_standata handles category specific effects", {
sdata <- make_standata(rating ~ period + carry + cse(treat),
data = inhaler, family = sratio())
expect_equivalent(sdata$Xcs, matrix(inhaler$treat))
sdata <- make_standata(rating ~ period + carry + cse(treat) + (cse(1)|subject),
data = inhaler, family = acat())
expect_equivalent(sdata$Z_1_3, as.array(rep(1, nrow(inhaler))))
sdata <- make_standata(rating ~ period + carry + (cse(treat)|subject),
data = inhaler, family = cratio())
expect_equivalent(sdata$Z_1_4, as.array(inhaler$treat))
expect_error(make_standata(rating ~ 1 + cse(treat), data = inhaler,
family = "cumulative"), "only meaningful")
expect_error(make_standata(rating ~ 1 + (treat + cse(1)|subject),
data = inhaler, family = "cratio"),
"category specific effects in separate group-level terms")
})
test_that("make_standata handles wiener diffusion models", {
dat <- RWiener::rwiener(n=100, alpha=2, tau=.3, beta=.5, delta=.5)
dat$x <- rnorm(100)
dat$dec <- ifelse(dat$resp == "lower", 0, 1)
dat$test <- "a"
sdata <- make_standata(q | dec(resp) ~ x, data = dat, family = wiener())
expect_equal(sdata$dec, as.array(dat$dec))
sdata <- make_standata(q | dec(dec) ~ x, data = dat, family = wiener())
expect_equal(sdata$dec, as.array(dat$dec))
expect_error(make_standata(q | dec(test) ~ x, data = dat, family = wiener()),
"Decisions should be 'lower' or 'upper'")
})
test_that("make_standata handles noise-free terms", {
N <- 30
dat <- data.frame(y = rnorm(N), x = rnorm(N), z = rnorm(N),
xsd = abs(rnorm(N, 1)), zsd = abs(rnorm(N, 1)),
ID = rep(1:5, each = N / 5))
sdata <- make_standata(y ~ me(x, xsd)*me(z, zsd)*x, data = dat)
expect_equal(sdata$Xn_1, as.array(dat$x))
expect_equal(sdata$noise_2, as.array(dat$zsd))
expect_equal(unname(sdata$Cme_3), dat$x)
expect_equal(sdata$Kme, 6)
})
test_that("make_standata handles multi-membership models", {
dat <- data.frame(y = rnorm(10), g1 = c(7:2, rep(10, 4)),
g2 = 1:10, w1 = rep(1, 10),
w2 = rep(abs(rnorm(10))))
sdata <- make_standata(y ~ (1|mm(g1,g2,g1,g2)), data = dat)
expect_true(all(paste0(c("W_1_", "J_1_"), 1:4) %in% names(sdata)))
expect_equal(sdata$W_1_4, rep(0.25, 10))
# this checks whether combintation of factor levels works as intended
expect_equal(sdata$J_1_1, as.array(c(6, 5, 4, 3, 2, 1, 7, 7, 7, 7)))
expect_equal(sdata$J_1_2, as.array(c(8, 1, 2, 3, 4, 5, 6, 9, 10, 7)))
})
test_that("make_standata handles calls to the 'poly' function", {
dat <- data.frame(y = rnorm(10), x = rnorm(10))
expect_equal(colnames(make_standata(y ~ 1 + poly(x, 3), dat)$X),
c("Intercept", "polyx31", "polyx32", "polyx33"))
})
test_that("make_standata allows fixed auxiliary parameters", {
dat <- list(y = 1:10)
expect_equal(make_standata(bf(y ~ 1, nu = 3), dat, student())$nu, 3)
expect_equal(make_standata(y ~ 1, dat, acat())$disc, 1)
expect_error(make_standata(bf(y ~ 1, bias = 0.5), dat),
"Invalid auxiliary parameters: 'bias'")
})
test_that("make_standata correctly includes offsets", {
data <- data.frame(y = rnorm(10), x = rnorm(10), c = 1)
sdata <- make_standata(bf(y ~ x + offset(c), sigma ~ offset(c + 1)), data)
expect_equal(sdata$offset, data$c)
expect_equal(sdata$offset_sigma, data$c + 1)
sdata <- make_standata(y ~ x + offset(c) + offset(x), data)
expect_equal(sdata$offset, data$c + data$x)
})
test_that("make_standata includes data for mixture models", {
data <- data.frame(y = rnorm(10), x = rnorm(10), c = 1)
form <- bf(y ~ x, mu1 ~ 1, family = mixture(gaussian, gaussian))
sdata <- make_standata(form, data)
expect_equal(sdata$con_theta, c(1, 1))
expect_equal(dim(sdata$X_mu1), c(10, 1))
expect_equal(dim(sdata$X_mu2), c(10, 2))
form <- bf(y ~ x, family = mixture(gaussian, gaussian))
sdata <- make_standata(form, data, prior = prior(dirichlet(10, 2), theta))
expect_equal(sdata$con_theta, c(10, 2))
form <- bf(y ~ x, theta1 = 1, theta2 = 3, family = mixture(gaussian, gaussian))
sdata <- make_standata(form, data)
expect_equal(sdata$theta1, 1/4)
expect_equal(sdata$theta2, 3/4)
})
test_that("make_standata includes data for Gaussian processes", {
dat <- data.frame(y = rnorm(10), x1 = sample(1:10, 10))
sdata <- make_standata(y ~ gp(x1), dat)
expect_equal(max(sdata$Xgp_1) - min(sdata$Xgp_1), 1)
sdata <- make_standata(y ~ gp(x1, scale = FALSE), dat)
expect_equal(max(sdata$Xgp_1) - min(sdata$Xgp_1), 9)
})
|
##The following commands reads the data directly from website, unzips it and extracts data from the txt file and stores in a data frame "cat".
##sep=";" is used because each element separated by ";" in the txt file
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",destfile= "exdata-data-household_power_consumption.zip")
cat<- read.csv(unz("exdata-data-household_power_consumption.zip","household_power_consumption.txt"),sep=";",header = TRUE,fill=FALSE,strip.white=TRUE, stringsAsFactors = FALSE)
##Date is converted into proper format using as.Date()function
cat[,1]<- as.Date(cat[,1],format='%d/%m/%Y')
##cat variable is filtered for the given dates and stored in a new variable "mat"
mat<-cat[cat$Date %in% as.Date(c('2007-02-01', '2007-02-02')),]
##All variables are converted into numeric for plotting
for(i in 3:9)
{
mat[,i]=as.numeric(mat[,i])
}
##A separate Date and Time column is created and merged with "mat" variable
DT<-strptime(paste(as.character(mat$Date), as.character(mat$Time),sep = ":"), format = '%Y-%m-%d:%H:%M:%S')
mat<- cbind(mat,DT)
##Data is plotted and stored as a PNG file
png(file = "plot4.png", width = 480, height = 480)
par(mfrow=c(2,2), mar = c(4,4,1,4))
plot(mat$DT, mat$Global_active_power, type = "l", xlab = "", ylab = "Global Active Power")
plot(mat$DT, mat$Voltage, type = "l", xlab = "datetime", ylab = "voltage")
plot(mat$DT,mat$Sub_metering_1, type = "S", xlab = "", ylab = "Energy sub metering", ylim=c(0,40))
par(new=T)
plot(mat$DT,mat$Sub_metering_2, type = "S", xlab = "", ylab = "",col="red", ylim=c(0,40))
par(new=T)
plot(mat$DT,mat$Sub_metering_3, type = "S", xlab = "", ylab = "", col="blue", ylim=c(0,40))
legend('topright',c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), bty="n", lty=1, col=c('black', 'red', 'blue'),pt.cex=1, cex=0.75)
plot(mat$DT, mat$Global_reactive_power, type = "l", xlab = "datetime", ylab = "Global_reactive_power")
dev.off() | /plot4.R | no_license | arj27083/ExData_Plotting1 | R | false | false | 1,968 | r | ##The following commands reads the data directly from website, unzips it and extracts data from the txt file and stores in a data frame "cat".
##sep=";" is used because each element separated by ";" in the txt file
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",destfile= "exdata-data-household_power_consumption.zip")
cat<- read.csv(unz("exdata-data-household_power_consumption.zip","household_power_consumption.txt"),sep=";",header = TRUE,fill=FALSE,strip.white=TRUE, stringsAsFactors = FALSE)
##Date is converted into proper format using as.Date()function
cat[,1]<- as.Date(cat[,1],format='%d/%m/%Y')
##cat variable is filtered for the given dates and stored in a new variable "mat"
mat<-cat[cat$Date %in% as.Date(c('2007-02-01', '2007-02-02')),]
##All variables are converted into numeric for plotting
for(i in 3:9)
{
mat[,i]=as.numeric(mat[,i])
}
##A separate Date and Time column is created and merged with "mat" variable
DT<-strptime(paste(as.character(mat$Date), as.character(mat$Time),sep = ":"), format = '%Y-%m-%d:%H:%M:%S')
mat<- cbind(mat,DT)
##Data is plotted and stored as a PNG file
png(file = "plot4.png", width = 480, height = 480)
par(mfrow=c(2,2), mar = c(4,4,1,4))
plot(mat$DT, mat$Global_active_power, type = "l", xlab = "", ylab = "Global Active Power")
plot(mat$DT, mat$Voltage, type = "l", xlab = "datetime", ylab = "voltage")
plot(mat$DT,mat$Sub_metering_1, type = "S", xlab = "", ylab = "Energy sub metering", ylim=c(0,40))
par(new=T)
plot(mat$DT,mat$Sub_metering_2, type = "S", xlab = "", ylab = "",col="red", ylim=c(0,40))
par(new=T)
plot(mat$DT,mat$Sub_metering_3, type = "S", xlab = "", ylab = "", col="blue", ylim=c(0,40))
legend('topright',c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), bty="n", lty=1, col=c('black', 'red', 'blue'),pt.cex=1, cex=0.75)
plot(mat$DT, mat$Global_reactive_power, type = "l", xlab = "datetime", ylab = "Global_reactive_power")
dev.off() |
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(y = matrix()) {
inversefunc <- NULL
set <- function(J) {
y <<- J
inversefunc <<- NULL
}
get <- function() y
setInverse <- function(inverse) inversefunc <<- inverse
getInverse <- function() inversefunc
list(set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inversev <- x$getInverse()
if (!is.null(inversev)) {
message("getting cached data")
return(inversev)
}
y <- x$get()
inversev <- solve(y, ...)
x$setInverse(inversev)
inversev
}
| /cachematrix.R | no_license | JulieHwayek/ProgrammingAssignment2 | R | false | false | 853 | r | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(y = matrix()) {
inversefunc <- NULL
set <- function(J) {
y <<- J
inversefunc <<- NULL
}
get <- function() y
setInverse <- function(inverse) inversefunc <<- inverse
getInverse <- function() inversefunc
list(set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inversev <- x$getInverse()
if (!is.null(inversev)) {
message("getting cached data")
return(inversev)
}
y <- x$get()
inversev <- solve(y, ...)
x$setInverse(inversev)
inversev
}
|
# This file is generated by make.paws. Please do not edit here.
#' @importFrom paws.common get_config new_operation new_request send_request
#' @include pi_service.R
NULL
#' For a specific time period, retrieve the top N dimension keys for a
#' metric
#'
#' @description
#' For a specific time period, retrieve the top `N` dimension keys for a metric.
#'
#' See [https://www.paws-r-sdk.com/docs/pi_describe_dimension_keys/](https://www.paws-r-sdk.com/docs/pi_describe_dimension_keys/) for full documentation.
#'
#' @param ServiceType [required] The Amazon Web Services service for which Performance Insights will
#' return metrics. Valid values are as follows:
#'
#' - `RDS`
#'
#' - `DOCDB`
#' @param Identifier [required] An immutable, Amazon Web Services Region-unique identifier for a data
#' source. Performance Insights gathers metrics from this data source.
#'
#' To use an Amazon RDS instance as a data source, you specify its
#' `DbiResourceId` value. For example, specify
#' `db-FAIHNTYBKTGAUSUZQYPDS2GW4A`.
#' @param StartTime [required] The date and time specifying the beginning of the requested time series
#' data. You must specify a `StartTime` within the past 7 days. The value
#' specified is *inclusive*, which means that data points equal to or
#' greater than `StartTime` are returned.
#'
#' The value for `StartTime` must be earlier than the value for `EndTime`.
#' @param EndTime [required] The date and time specifying the end of the requested time series data.
#' The value specified is *exclusive*, which means that data points less
#' than (but not equal to) `EndTime` are returned.
#'
#' The value for `EndTime` must be later than the value for `StartTime`.
#' @param Metric [required] The name of a Performance Insights metric to be measured.
#'
#' Valid values for `Metric` are:
#'
#' - `db.load.avg` - A scaled representation of the number of active
#' sessions for the database engine.
#'
#' - `db.sampledload.avg` - The raw number of active sessions for the
#' database engine.
#'
#' If the number of active sessions is less than an internal Performance
#' Insights threshold, `db.load.avg` and `db.sampledload.avg` are the same
#' value. If the number of active sessions is greater than the internal
#' threshold, Performance Insights samples the active sessions, with
#' `db.load.avg` showing the scaled values, `db.sampledload.avg` showing
#' the raw values, and `db.sampledload.avg` less than `db.load.avg`. For
#' most use cases, you can query `db.load.avg` only.
#' @param PeriodInSeconds The granularity, in seconds, of the data points returned from
#' Performance Insights. A period can be as short as one second, or as long
#' as one day (86400 seconds). Valid values are:
#'
#' - `1` (one second)
#'
#' - `60` (one minute)
#'
#' - `300` (five minutes)
#'
#' - `3600` (one hour)
#'
#' - `86400` (twenty-four hours)
#'
#' If you don't specify `PeriodInSeconds`, then Performance Insights
#' chooses a value for you, with a goal of returning roughly 100-200 data
#' points in the response.
#' @param GroupBy [required] A specification for how to aggregate the data points from a query
#' result. You must specify a valid dimension group. Performance Insights
#' returns all dimensions within this group, unless you provide the names
#' of specific dimensions within this group. You can also request that
#' Performance Insights return a limited number of values for a dimension.
#' @param AdditionalMetrics Additional metrics for the top `N` dimension keys. If the specified
#' dimension group in the `GroupBy` parameter is `db.sql_tokenized`, you
#' can specify per-SQL metrics to get the values for the top `N` SQL
#' digests. The response syntax is as follows:
#' `"AdditionalMetrics" : { "string" : "string" }`.
#' @param PartitionBy For each dimension specified in `GroupBy`, specify a secondary dimension
#' to further subdivide the partition keys in the response.
#' @param Filter One or more filters to apply in the request. Restrictions:
#'
#' - Any number of filters by the same dimension, as specified in the
#' `GroupBy` or `Partition` parameters.
#'
#' - A single filter for any other dimension in this dimension group.
#' @param MaxResults The maximum number of items to return in the response. If more items
#' exist than the specified `MaxRecords` value, a pagination token is
#' included in the response so that the remaining results can be retrieved.
#' @param NextToken An optional pagination token provided by a previous request. If this
#' parameter is specified, the response includes only records beyond the
#' token, up to the value specified by `MaxRecords`.
#'
#' @keywords internal
#'
#' @rdname pi_describe_dimension_keys
pi_describe_dimension_keys <- function(ServiceType, Identifier, StartTime, EndTime, Metric, PeriodInSeconds = NULL, GroupBy, AdditionalMetrics = NULL, PartitionBy = NULL, Filter = NULL, MaxResults = NULL, NextToken = NULL) {
op <- new_operation(
name = "DescribeDimensionKeys",
http_method = "POST",
http_path = "/",
paginator = list(input_token = "NextToken", output_token = "NextToken", limit_key = "MaxResults")
)
input <- .pi$describe_dimension_keys_input(ServiceType = ServiceType, Identifier = Identifier, StartTime = StartTime, EndTime = EndTime, Metric = Metric, PeriodInSeconds = PeriodInSeconds, GroupBy = GroupBy, AdditionalMetrics = AdditionalMetrics, PartitionBy = PartitionBy, Filter = Filter, MaxResults = MaxResults, NextToken = NextToken)
output <- .pi$describe_dimension_keys_output()
config <- get_config()
svc <- .pi$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.pi$operations$describe_dimension_keys <- pi_describe_dimension_keys
#' Get the attributes of the specified dimension group for a DB instance or
#' data source
#'
#' @description
#' Get the attributes of the specified dimension group for a DB instance or data source. For example, if you specify a SQL ID, [`get_dimension_key_details`][pi_get_dimension_key_details] retrieves the full text of the dimension `db.sql.statement` associated with this ID. This operation is useful because [`get_resource_metrics`][pi_get_resource_metrics] and [`describe_dimension_keys`][pi_describe_dimension_keys] don't support retrieval of large SQL statement text.
#'
#' See [https://www.paws-r-sdk.com/docs/pi_get_dimension_key_details/](https://www.paws-r-sdk.com/docs/pi_get_dimension_key_details/) for full documentation.
#'
#' @param ServiceType [required] The Amazon Web Services service for which Performance Insights returns
#' data. The only valid value is `RDS`.
#' @param Identifier [required] The ID for a data source from which to gather dimension data. This ID
#' must be immutable and unique within an Amazon Web Services Region. When
#' a DB instance is the data source, specify its `DbiResourceId` value. For
#' example, specify `db-ABCDEFGHIJKLMNOPQRSTU1VW2X`.
#' @param Group [required] The name of the dimension group. Performance Insights searches the
#' specified group for the dimension group ID. The following group name
#' values are valid:
#'
#' - `db.query` (Amazon DocumentDB only)
#'
#' - `db.sql` (Amazon RDS and Aurora only)
#' @param GroupIdentifier [required] The ID of the dimension group from which to retrieve dimension details.
#' For dimension group `db.sql`, the group ID is `db.sql.id`. The following
#' group ID values are valid:
#'
#' - `db.sql.id` for dimension group `db.sql` (Aurora and RDS only)
#'
#' - `db.query.id` for dimension group `db.query` (DocumentDB only)
#' @param RequestedDimensions A list of dimensions to retrieve the detail data for within the given
#' dimension group. If you don't specify this parameter, Performance
#' Insights returns all dimension data within the specified dimension
#' group. Specify dimension names for the following dimension groups:
#'
#' - `db.sql` - Specify either the full dimension name `db.sql.statement`
#' or the short dimension name `statement` (Aurora and RDS only).
#'
#' - `db.query` - Specify either the full dimension name
#' `db.query.statement` or the short dimension name `statement`
#' (DocumentDB only).
#'
#' @keywords internal
#'
#' @rdname pi_get_dimension_key_details
pi_get_dimension_key_details <- function(ServiceType, Identifier, Group, GroupIdentifier, RequestedDimensions = NULL) {
op <- new_operation(
name = "GetDimensionKeyDetails",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .pi$get_dimension_key_details_input(ServiceType = ServiceType, Identifier = Identifier, Group = Group, GroupIdentifier = GroupIdentifier, RequestedDimensions = RequestedDimensions)
output <- .pi$get_dimension_key_details_output()
config <- get_config()
svc <- .pi$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.pi$operations$get_dimension_key_details <- pi_get_dimension_key_details
#' Retrieve the metadata for different features
#'
#' @description
#' Retrieve the metadata for different features. For example, the metadata might indicate that a feature is turned on or off on a specific DB instance.
#'
#' See [https://www.paws-r-sdk.com/docs/pi_get_resource_metadata/](https://www.paws-r-sdk.com/docs/pi_get_resource_metadata/) for full documentation.
#'
#' @param ServiceType [required] The Amazon Web Services service for which Performance Insights returns
#' metrics.
#' @param Identifier [required] An immutable identifier for a data source that is unique for an Amazon
#' Web Services Region. Performance Insights gathers metrics from this data
#' source. To use a DB instance as a data source, specify its
#' `DbiResourceId` value. For example, specify
#' `db-ABCDEFGHIJKLMNOPQRSTU1VW2X`.
#'
#' @keywords internal
#'
#' @rdname pi_get_resource_metadata
pi_get_resource_metadata <- function(ServiceType, Identifier) {
op <- new_operation(
name = "GetResourceMetadata",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .pi$get_resource_metadata_input(ServiceType = ServiceType, Identifier = Identifier)
output <- .pi$get_resource_metadata_output()
config <- get_config()
svc <- .pi$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.pi$operations$get_resource_metadata <- pi_get_resource_metadata
#' Retrieve Performance Insights metrics for a set of data sources over a
#' time period
#'
#' @description
#' Retrieve Performance Insights metrics for a set of data sources over a time period. You can provide specific dimension groups and dimensions, and provide aggregation and filtering criteria for each group.
#'
#' See [https://www.paws-r-sdk.com/docs/pi_get_resource_metrics/](https://www.paws-r-sdk.com/docs/pi_get_resource_metrics/) for full documentation.
#'
#' @param ServiceType [required] The Amazon Web Services service for which Performance Insights returns
#' metrics. Valid values are as follows:
#'
#' - `RDS`
#'
#' - `DOCDB`
#' @param Identifier [required] An immutable identifier for a data source that is unique for an Amazon
#' Web Services Region. Performance Insights gathers metrics from this data
#' source. In the console, the identifier is shown as *ResourceID*. When
#' you call `DescribeDBInstances`, the identifier is returned as
#' `DbiResourceId`.
#'
#' To use a DB instance as a data source, specify its `DbiResourceId`
#' value. For example, specify `db-ABCDEFGHIJKLMNOPQRSTU1VW2X`.
#' @param MetricQueries [required] An array of one or more queries to perform. Each query must specify a
#' Performance Insights metric, and can optionally specify aggregation and
#' filtering criteria.
#' @param StartTime [required] The date and time specifying the beginning of the requested time series
#' query range. You can't specify a `StartTime` that is earlier than 7 days
#' ago. By default, Performance Insights has 7 days of retention, but you
#' can extend this range up to 2 years. The value specified is *inclusive*.
#' Thus, the command returns data points equal to or greater than
#' `StartTime`.
#'
#' The value for `StartTime` must be earlier than the value for `EndTime`.
#' @param EndTime [required] The date and time specifying the end of the requested time series query
#' range. The value specified is *exclusive*. Thus, the command returns
#' data points less than (but not equal to) `EndTime`.
#'
#' The value for `EndTime` must be later than the value for `StartTime`.
#' @param PeriodInSeconds The granularity, in seconds, of the data points returned from
#' Performance Insights. A period can be as short as one second, or as long
#' as one day (86400 seconds). Valid values are:
#'
#' - `1` (one second)
#'
#' - `60` (one minute)
#'
#' - `300` (five minutes)
#'
#' - `3600` (one hour)
#'
#' - `86400` (twenty-four hours)
#'
#' If you don't specify `PeriodInSeconds`, then Performance Insights will
#' choose a value for you, with a goal of returning roughly 100-200 data
#' points in the response.
#' @param MaxResults The maximum number of items to return in the response. If more items
#' exist than the specified `MaxRecords` value, a pagination token is
#' included in the response so that the remaining results can be retrieved.
#' @param NextToken An optional pagination token provided by a previous request. If this
#' parameter is specified, the response includes only records beyond the
#' token, up to the value specified by `MaxRecords`.
#' @param PeriodAlignment The returned timestamp which is the start or end time of the time
#' periods. The default value is `END_TIME`.
#'
#' @keywords internal
#'
#' @rdname pi_get_resource_metrics
pi_get_resource_metrics <- function(ServiceType, Identifier, MetricQueries, StartTime, EndTime, PeriodInSeconds = NULL, MaxResults = NULL, NextToken = NULL, PeriodAlignment = NULL) {
op <- new_operation(
name = "GetResourceMetrics",
http_method = "POST",
http_path = "/",
paginator = list(input_token = "NextToken", output_token = "NextToken", limit_key = "MaxResults")
)
input <- .pi$get_resource_metrics_input(ServiceType = ServiceType, Identifier = Identifier, MetricQueries = MetricQueries, StartTime = StartTime, EndTime = EndTime, PeriodInSeconds = PeriodInSeconds, MaxResults = MaxResults, NextToken = NextToken, PeriodAlignment = PeriodAlignment)
output <- .pi$get_resource_metrics_output()
config <- get_config()
svc <- .pi$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.pi$operations$get_resource_metrics <- pi_get_resource_metrics
#' Retrieve the dimensions that can be queried for each specified metric
#' type on a specified DB instance
#'
#' @description
#' Retrieve the dimensions that can be queried for each specified metric type on a specified DB instance.
#'
#' See [https://www.paws-r-sdk.com/docs/pi_list_available_resource_dimensions/](https://www.paws-r-sdk.com/docs/pi_list_available_resource_dimensions/) for full documentation.
#'
#' @param ServiceType [required] The Amazon Web Services service for which Performance Insights returns
#' metrics.
#' @param Identifier [required] An immutable identifier for a data source that is unique within an
#' Amazon Web Services Region. Performance Insights gathers metrics from
#' this data source. To use an Amazon RDS DB instance as a data source,
#' specify its `DbiResourceId` value. For example, specify
#' `db-ABCDEFGHIJKLMNOPQRSTU1VWZ`.
#' @param Metrics [required] The types of metrics for which to retrieve dimensions. Valid values
#' include `db.load`.
#' @param MaxResults The maximum number of items to return in the response. If more items
#' exist than the specified `MaxRecords` value, a pagination token is
#' included in the response so that the remaining results can be retrieved.
#' @param NextToken An optional pagination token provided by a previous request. If this
#' parameter is specified, the response includes only records beyond the
#' token, up to the value specified by `MaxRecords`.
#'
#' @keywords internal
#'
#' @rdname pi_list_available_resource_dimensions
pi_list_available_resource_dimensions <- function(ServiceType, Identifier, Metrics, MaxResults = NULL, NextToken = NULL) {
op <- new_operation(
name = "ListAvailableResourceDimensions",
http_method = "POST",
http_path = "/",
paginator = list(input_token = "NextToken", output_token = "NextToken", limit_key = "MaxResults")
)
input <- .pi$list_available_resource_dimensions_input(ServiceType = ServiceType, Identifier = Identifier, Metrics = Metrics, MaxResults = MaxResults, NextToken = NextToken)
output <- .pi$list_available_resource_dimensions_output()
config <- get_config()
svc <- .pi$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.pi$operations$list_available_resource_dimensions <- pi_list_available_resource_dimensions
#' Retrieve metrics of the specified types that can be queried for a
#' specified DB instance
#'
#' @description
#' Retrieve metrics of the specified types that can be queried for a specified DB instance.
#'
#' See [https://www.paws-r-sdk.com/docs/pi_list_available_resource_metrics/](https://www.paws-r-sdk.com/docs/pi_list_available_resource_metrics/) for full documentation.
#'
#' @param ServiceType [required] The Amazon Web Services service for which Performance Insights returns
#' metrics.
#' @param Identifier [required] An immutable identifier for a data source that is unique within an
#' Amazon Web Services Region. Performance Insights gathers metrics from
#' this data source. To use an Amazon RDS DB instance as a data source,
#' specify its `DbiResourceId` value. For example, specify
#' `db-ABCDEFGHIJKLMNOPQRSTU1VWZ`.
#' @param MetricTypes [required] The types of metrics to return in the response. Valid values in the
#' array include the following:
#'
#' - `os` (OS counter metrics) - All engines
#'
#' - `db` (DB load metrics) - All engines except for Amazon DocumentDB
#'
#' - `db.sql.stats` (per-SQL metrics) - All engines except for Amazon
#' DocumentDB
#'
#' - `db.sql_tokenized.stats` (per-SQL digest metrics) - All engines
#' except for Amazon DocumentDB
#' @param NextToken An optional pagination token provided by a previous request. If this
#' parameter is specified, the response includes only records beyond the
#' token, up to the value specified by `MaxRecords`.
#' @param MaxResults The maximum number of items to return. If the `MaxRecords` value is less
#' than the number of existing items, the response includes a pagination
#' token.
#'
#' @keywords internal
#'
#' @rdname pi_list_available_resource_metrics
pi_list_available_resource_metrics <- function(ServiceType, Identifier, MetricTypes, NextToken = NULL, MaxResults = NULL) {
op <- new_operation(
name = "ListAvailableResourceMetrics",
http_method = "POST",
http_path = "/",
paginator = list(input_token = "NextToken", output_token = "NextToken", limit_key = "MaxResults")
)
input <- .pi$list_available_resource_metrics_input(ServiceType = ServiceType, Identifier = Identifier, MetricTypes = MetricTypes, NextToken = NextToken, MaxResults = MaxResults)
output <- .pi$list_available_resource_metrics_output()
config <- get_config()
svc <- .pi$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.pi$operations$list_available_resource_metrics <- pi_list_available_resource_metrics
| /cran/paws.management/R/pi_operations.R | permissive | paws-r/paws | R | false | false | 19,887 | r | # This file is generated by make.paws. Please do not edit here.
#' @importFrom paws.common get_config new_operation new_request send_request
#' @include pi_service.R
NULL
#' For a specific time period, retrieve the top N dimension keys for a
#' metric
#'
#' @description
#' For a specific time period, retrieve the top `N` dimension keys for a metric.
#'
#' See [https://www.paws-r-sdk.com/docs/pi_describe_dimension_keys/](https://www.paws-r-sdk.com/docs/pi_describe_dimension_keys/) for full documentation.
#'
#' @param ServiceType [required] The Amazon Web Services service for which Performance Insights will
#' return metrics. Valid values are as follows:
#'
#' - `RDS`
#'
#' - `DOCDB`
#' @param Identifier [required] An immutable, Amazon Web Services Region-unique identifier for a data
#' source. Performance Insights gathers metrics from this data source.
#'
#' To use an Amazon RDS instance as a data source, you specify its
#' `DbiResourceId` value. For example, specify
#' `db-FAIHNTYBKTGAUSUZQYPDS2GW4A`.
#' @param StartTime [required] The date and time specifying the beginning of the requested time series
#' data. You must specify a `StartTime` within the past 7 days. The value
#' specified is *inclusive*, which means that data points equal to or
#' greater than `StartTime` are returned.
#'
#' The value for `StartTime` must be earlier than the value for `EndTime`.
#' @param EndTime [required] The date and time specifying the end of the requested time series data.
#' The value specified is *exclusive*, which means that data points less
#' than (but not equal to) `EndTime` are returned.
#'
#' The value for `EndTime` must be later than the value for `StartTime`.
#' @param Metric [required] The name of a Performance Insights metric to be measured.
#'
#' Valid values for `Metric` are:
#'
#' - `db.load.avg` - A scaled representation of the number of active
#' sessions for the database engine.
#'
#' - `db.sampledload.avg` - The raw number of active sessions for the
#' database engine.
#'
#' If the number of active sessions is less than an internal Performance
#' Insights threshold, `db.load.avg` and `db.sampledload.avg` are the same
#' value. If the number of active sessions is greater than the internal
#' threshold, Performance Insights samples the active sessions, with
#' `db.load.avg` showing the scaled values, `db.sampledload.avg` showing
#' the raw values, and `db.sampledload.avg` less than `db.load.avg`. For
#' most use cases, you can query `db.load.avg` only.
#' @param PeriodInSeconds The granularity, in seconds, of the data points returned from
#' Performance Insights. A period can be as short as one second, or as long
#' as one day (86400 seconds). Valid values are:
#'
#' - `1` (one second)
#'
#' - `60` (one minute)
#'
#' - `300` (five minutes)
#'
#' - `3600` (one hour)
#'
#' - `86400` (twenty-four hours)
#'
#' If you don't specify `PeriodInSeconds`, then Performance Insights
#' chooses a value for you, with a goal of returning roughly 100-200 data
#' points in the response.
#' @param GroupBy [required] A specification for how to aggregate the data points from a query
#' result. You must specify a valid dimension group. Performance Insights
#' returns all dimensions within this group, unless you provide the names
#' of specific dimensions within this group. You can also request that
#' Performance Insights return a limited number of values for a dimension.
#' @param AdditionalMetrics Additional metrics for the top `N` dimension keys. If the specified
#' dimension group in the `GroupBy` parameter is `db.sql_tokenized`, you
#' can specify per-SQL metrics to get the values for the top `N` SQL
#' digests. The response syntax is as follows:
#' `"AdditionalMetrics" : { "string" : "string" }`.
#' @param PartitionBy For each dimension specified in `GroupBy`, specify a secondary dimension
#' to further subdivide the partition keys in the response.
#' @param Filter One or more filters to apply in the request. Restrictions:
#'
#' - Any number of filters by the same dimension, as specified in the
#' `GroupBy` or `Partition` parameters.
#'
#' - A single filter for any other dimension in this dimension group.
#' @param MaxResults The maximum number of items to return in the response. If more items
#' exist than the specified `MaxRecords` value, a pagination token is
#' included in the response so that the remaining results can be retrieved.
#' @param NextToken An optional pagination token provided by a previous request. If this
#' parameter is specified, the response includes only records beyond the
#' token, up to the value specified by `MaxRecords`.
#'
#' @keywords internal
#'
#' @rdname pi_describe_dimension_keys
pi_describe_dimension_keys <- function(ServiceType, Identifier, StartTime, EndTime, Metric, PeriodInSeconds = NULL, GroupBy, AdditionalMetrics = NULL, PartitionBy = NULL, Filter = NULL, MaxResults = NULL, NextToken = NULL) {
op <- new_operation(
name = "DescribeDimensionKeys",
http_method = "POST",
http_path = "/",
paginator = list(input_token = "NextToken", output_token = "NextToken", limit_key = "MaxResults")
)
input <- .pi$describe_dimension_keys_input(ServiceType = ServiceType, Identifier = Identifier, StartTime = StartTime, EndTime = EndTime, Metric = Metric, PeriodInSeconds = PeriodInSeconds, GroupBy = GroupBy, AdditionalMetrics = AdditionalMetrics, PartitionBy = PartitionBy, Filter = Filter, MaxResults = MaxResults, NextToken = NextToken)
output <- .pi$describe_dimension_keys_output()
config <- get_config()
svc <- .pi$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.pi$operations$describe_dimension_keys <- pi_describe_dimension_keys
#' Get the attributes of the specified dimension group for a DB instance or
#' data source
#'
#' @description
#' Get the attributes of the specified dimension group for a DB instance or data source. For example, if you specify a SQL ID, [`get_dimension_key_details`][pi_get_dimension_key_details] retrieves the full text of the dimension `db.sql.statement` associated with this ID. This operation is useful because [`get_resource_metrics`][pi_get_resource_metrics] and [`describe_dimension_keys`][pi_describe_dimension_keys] don't support retrieval of large SQL statement text.
#'
#' See [https://www.paws-r-sdk.com/docs/pi_get_dimension_key_details/](https://www.paws-r-sdk.com/docs/pi_get_dimension_key_details/) for full documentation.
#'
#' @param ServiceType [required] The Amazon Web Services service for which Performance Insights returns
#' data. The only valid value is `RDS`.
#' @param Identifier [required] The ID for a data source from which to gather dimension data. This ID
#' must be immutable and unique within an Amazon Web Services Region. When
#' a DB instance is the data source, specify its `DbiResourceId` value. For
#' example, specify `db-ABCDEFGHIJKLMNOPQRSTU1VW2X`.
#' @param Group [required] The name of the dimension group. Performance Insights searches the
#' specified group for the dimension group ID. The following group name
#' values are valid:
#'
#' - `db.query` (Amazon DocumentDB only)
#'
#' - `db.sql` (Amazon RDS and Aurora only)
#' @param GroupIdentifier [required] The ID of the dimension group from which to retrieve dimension details.
#' For dimension group `db.sql`, the group ID is `db.sql.id`. The following
#' group ID values are valid:
#'
#' - `db.sql.id` for dimension group `db.sql` (Aurora and RDS only)
#'
#' - `db.query.id` for dimension group `db.query` (DocumentDB only)
#' @param RequestedDimensions A list of dimensions to retrieve the detail data for within the given
#' dimension group. If you don't specify this parameter, Performance
#' Insights returns all dimension data within the specified dimension
#' group. Specify dimension names for the following dimension groups:
#'
#' - `db.sql` - Specify either the full dimension name `db.sql.statement`
#' or the short dimension name `statement` (Aurora and RDS only).
#'
#' - `db.query` - Specify either the full dimension name
#' `db.query.statement` or the short dimension name `statement`
#' (DocumentDB only).
#'
#' @keywords internal
#'
#' @rdname pi_get_dimension_key_details
pi_get_dimension_key_details <- function(ServiceType, Identifier, Group, GroupIdentifier, RequestedDimensions = NULL) {
op <- new_operation(
name = "GetDimensionKeyDetails",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .pi$get_dimension_key_details_input(ServiceType = ServiceType, Identifier = Identifier, Group = Group, GroupIdentifier = GroupIdentifier, RequestedDimensions = RequestedDimensions)
output <- .pi$get_dimension_key_details_output()
config <- get_config()
svc <- .pi$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.pi$operations$get_dimension_key_details <- pi_get_dimension_key_details
#' Retrieve the metadata for different features
#'
#' @description
#' Retrieve the metadata for different features. For example, the metadata might indicate that a feature is turned on or off on a specific DB instance.
#'
#' See [https://www.paws-r-sdk.com/docs/pi_get_resource_metadata/](https://www.paws-r-sdk.com/docs/pi_get_resource_metadata/) for full documentation.
#'
#' @param ServiceType [required] The Amazon Web Services service for which Performance Insights returns
#' metrics.
#' @param Identifier [required] An immutable identifier for a data source that is unique for an Amazon
#' Web Services Region. Performance Insights gathers metrics from this data
#' source. To use a DB instance as a data source, specify its
#' `DbiResourceId` value. For example, specify
#' `db-ABCDEFGHIJKLMNOPQRSTU1VW2X`.
#'
#' @keywords internal
#'
#' @rdname pi_get_resource_metadata
pi_get_resource_metadata <- function(ServiceType, Identifier) {
op <- new_operation(
name = "GetResourceMetadata",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .pi$get_resource_metadata_input(ServiceType = ServiceType, Identifier = Identifier)
output <- .pi$get_resource_metadata_output()
config <- get_config()
svc <- .pi$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.pi$operations$get_resource_metadata <- pi_get_resource_metadata
#' Retrieve Performance Insights metrics for a set of data sources over a
#' time period
#'
#' @description
#' Retrieve Performance Insights metrics for a set of data sources over a time period. You can provide specific dimension groups and dimensions, and provide aggregation and filtering criteria for each group.
#'
#' See [https://www.paws-r-sdk.com/docs/pi_get_resource_metrics/](https://www.paws-r-sdk.com/docs/pi_get_resource_metrics/) for full documentation.
#'
#' @param ServiceType [required] The Amazon Web Services service for which Performance Insights returns
#' metrics. Valid values are as follows:
#'
#' - `RDS`
#'
#' - `DOCDB`
#' @param Identifier [required] An immutable identifier for a data source that is unique for an Amazon
#' Web Services Region. Performance Insights gathers metrics from this data
#' source. In the console, the identifier is shown as *ResourceID*. When
#' you call `DescribeDBInstances`, the identifier is returned as
#' `DbiResourceId`.
#'
#' To use a DB instance as a data source, specify its `DbiResourceId`
#' value. For example, specify `db-ABCDEFGHIJKLMNOPQRSTU1VW2X`.
#' @param MetricQueries [required] An array of one or more queries to perform. Each query must specify a
#' Performance Insights metric, and can optionally specify aggregation and
#' filtering criteria.
#' @param StartTime [required] The date and time specifying the beginning of the requested time series
#' query range. You can't specify a `StartTime` that is earlier than 7 days
#' ago. By default, Performance Insights has 7 days of retention, but you
#' can extend this range up to 2 years. The value specified is *inclusive*.
#' Thus, the command returns data points equal to or greater than
#' `StartTime`.
#'
#' The value for `StartTime` must be earlier than the value for `EndTime`.
#' @param EndTime [required] The date and time specifying the end of the requested time series query
#' range. The value specified is *exclusive*. Thus, the command returns
#' data points less than (but not equal to) `EndTime`.
#'
#' The value for `EndTime` must be later than the value for `StartTime`.
#' @param PeriodInSeconds The granularity, in seconds, of the data points returned from
#' Performance Insights. A period can be as short as one second, or as long
#' as one day (86400 seconds). Valid values are:
#'
#' - `1` (one second)
#'
#' - `60` (one minute)
#'
#' - `300` (five minutes)
#'
#' - `3600` (one hour)
#'
#' - `86400` (twenty-four hours)
#'
#' If you don't specify `PeriodInSeconds`, then Performance Insights will
#' choose a value for you, with a goal of returning roughly 100-200 data
#' points in the response.
#' @param MaxResults The maximum number of items to return in the response. If more items
#' exist than the specified `MaxRecords` value, a pagination token is
#' included in the response so that the remaining results can be retrieved.
#' @param NextToken An optional pagination token provided by a previous request. If this
#' parameter is specified, the response includes only records beyond the
#' token, up to the value specified by `MaxRecords`.
#' @param PeriodAlignment The returned timestamp which is the start or end time of the time
#' periods. The default value is `END_TIME`.
#'
#' @keywords internal
#'
#' @rdname pi_get_resource_metrics
pi_get_resource_metrics <- function(ServiceType, Identifier, MetricQueries, StartTime, EndTime, PeriodInSeconds = NULL, MaxResults = NULL, NextToken = NULL, PeriodAlignment = NULL) {
op <- new_operation(
name = "GetResourceMetrics",
http_method = "POST",
http_path = "/",
paginator = list(input_token = "NextToken", output_token = "NextToken", limit_key = "MaxResults")
)
input <- .pi$get_resource_metrics_input(ServiceType = ServiceType, Identifier = Identifier, MetricQueries = MetricQueries, StartTime = StartTime, EndTime = EndTime, PeriodInSeconds = PeriodInSeconds, MaxResults = MaxResults, NextToken = NextToken, PeriodAlignment = PeriodAlignment)
output <- .pi$get_resource_metrics_output()
config <- get_config()
svc <- .pi$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.pi$operations$get_resource_metrics <- pi_get_resource_metrics
#' Retrieve the dimensions that can be queried for each specified metric
#' type on a specified DB instance
#'
#' @description
#' Retrieve the dimensions that can be queried for each specified metric type on a specified DB instance.
#'
#' See [https://www.paws-r-sdk.com/docs/pi_list_available_resource_dimensions/](https://www.paws-r-sdk.com/docs/pi_list_available_resource_dimensions/) for full documentation.
#'
#' @param ServiceType [required] The Amazon Web Services service for which Performance Insights returns
#' metrics.
#' @param Identifier [required] An immutable identifier for a data source that is unique within an
#' Amazon Web Services Region. Performance Insights gathers metrics from
#' this data source. To use an Amazon RDS DB instance as a data source,
#' specify its `DbiResourceId` value. For example, specify
#' `db-ABCDEFGHIJKLMNOPQRSTU1VWZ`.
#' @param Metrics [required] The types of metrics for which to retrieve dimensions. Valid values
#' include `db.load`.
#' @param MaxResults The maximum number of items to return in the response. If more items
#' exist than the specified `MaxRecords` value, a pagination token is
#' included in the response so that the remaining results can be retrieved.
#' @param NextToken An optional pagination token provided by a previous request. If this
#' parameter is specified, the response includes only records beyond the
#' token, up to the value specified by `MaxRecords`.
#'
#' @keywords internal
#'
#' @rdname pi_list_available_resource_dimensions
pi_list_available_resource_dimensions <- function(ServiceType, Identifier, Metrics, MaxResults = NULL, NextToken = NULL) {
op <- new_operation(
name = "ListAvailableResourceDimensions",
http_method = "POST",
http_path = "/",
paginator = list(input_token = "NextToken", output_token = "NextToken", limit_key = "MaxResults")
)
input <- .pi$list_available_resource_dimensions_input(ServiceType = ServiceType, Identifier = Identifier, Metrics = Metrics, MaxResults = MaxResults, NextToken = NextToken)
output <- .pi$list_available_resource_dimensions_output()
config <- get_config()
svc <- .pi$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.pi$operations$list_available_resource_dimensions <- pi_list_available_resource_dimensions
#' Retrieve metrics of the specified types that can be queried for a
#' specified DB instance
#'
#' @description
#' Retrieve metrics of the specified types that can be queried for a specified DB instance.
#'
#' See [https://www.paws-r-sdk.com/docs/pi_list_available_resource_metrics/](https://www.paws-r-sdk.com/docs/pi_list_available_resource_metrics/) for full documentation.
#'
#' @param ServiceType [required] The Amazon Web Services service for which Performance Insights returns
#' metrics.
#' @param Identifier [required] An immutable identifier for a data source that is unique within an
#' Amazon Web Services Region. Performance Insights gathers metrics from
#' this data source. To use an Amazon RDS DB instance as a data source,
#' specify its `DbiResourceId` value. For example, specify
#' `db-ABCDEFGHIJKLMNOPQRSTU1VWZ`.
#' @param MetricTypes [required] The types of metrics to return in the response. Valid values in the
#' array include the following:
#'
#' - `os` (OS counter metrics) - All engines
#'
#' - `db` (DB load metrics) - All engines except for Amazon DocumentDB
#'
#' - `db.sql.stats` (per-SQL metrics) - All engines except for Amazon
#' DocumentDB
#'
#' - `db.sql_tokenized.stats` (per-SQL digest metrics) - All engines
#' except for Amazon DocumentDB
#' @param NextToken An optional pagination token provided by a previous request. If this
#' parameter is specified, the response includes only records beyond the
#' token, up to the value specified by `MaxRecords`.
#' @param MaxResults The maximum number of items to return. If the `MaxRecords` value is less
#' than the number of existing items, the response includes a pagination
#' token.
#'
#' @keywords internal
#'
#' @rdname pi_list_available_resource_metrics
pi_list_available_resource_metrics <- function(ServiceType, Identifier, MetricTypes, NextToken = NULL, MaxResults = NULL) {
op <- new_operation(
name = "ListAvailableResourceMetrics",
http_method = "POST",
http_path = "/",
paginator = list(input_token = "NextToken", output_token = "NextToken", limit_key = "MaxResults")
)
input <- .pi$list_available_resource_metrics_input(ServiceType = ServiceType, Identifier = Identifier, MetricTypes = MetricTypes, NextToken = NextToken, MaxResults = MaxResults)
output <- .pi$list_available_resource_metrics_output()
config <- get_config()
svc <- .pi$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.pi$operations$list_available_resource_metrics <- pi_list_available_resource_metrics
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllClasses.R
\name{ExperimentColorMap}
\alias{ExperimentColorMap}
\alias{class:ExperimentColorMap}
\alias{ExperimentColorMap-class}
\alias{assayColorMap}
\alias{colDataColorMap}
\alias{rowDataColorMap}
\alias{assayColorMap,ExperimentColorMap,character-method}
\alias{assayColorMap,ExperimentColorMap,numeric-method}
\alias{colDataColorMap,ExperimentColorMap,character-method}
\alias{rowDataColorMap,ExperimentColorMap,character-method}
\title{\code{ExperimentColorMap} objects}
\usage{
ExperimentColorMap(assays = list(), colData = list(), rowData = list(),
all_discrete = list(assays = NULL, colData = NULL, rowData = NULL),
all_continuous = list(assays = NULL, colData = NULL, rowData = NULL),
global_discrete = NULL, global_continuous = NULL, ...)
}
\arguments{
\item{assays}{List of color maps for \code{assays}.}
\item{colData}{List of color maps for \code{colData}.}
\item{rowData}{List of color maps for \code{rowData}.}
\item{all_discrete}{Discrete color maps applied to all undefined
\code{assays}, \code{colData}, and \code{rowData}, respectively.}
\item{all_continuous}{Continuous color maps applied to all undefined
\code{assays}, \code{colData}, and \code{rowData}, respectively.}
\item{global_discrete}{Discrete color maps applied to all undefined discrete
covariates.}
\item{global_continuous}{Continuous color maps applied to all undefined
discrete covariates.}
\item{...}{additional arguments passed on to the \code{ExperimentColorMap}
constructor}
}
\description{
\code{ExperimentColorMap} objects
}
\details{
Color maps must all be functions that take at least one argument: the number
of (named) colours to return as a \code{character} vector.
This argument may be ignored in the body of the color map function
to produce constant color maps.
}
\section{Accessors}{
In the following code snippets, \code{x} is an
\code{ExperimentColorMap} object. If the color map can not immediately
be found in the appropriate slot, \code{discrete} is a \code{logical(1)}
that indicates whether the default color map returned should be discrete
\code{TRUE} or continuous (\code{FALSE}, default).
\describe{
\item{\code{assayColorMap(x, i, ..., discrete=FALSE)}:}{
Get an \code{assays} colormap.}
\item{\code{colDataColorMap(x, i, ..., discrete=FALSE)}:}{
Get a \code{colData} colormap.}
\item{\code{rowDataColorMap(x, i, ..., discrete=FALSE)}:}{
Get a \code{rowData} colormap.}
}
}
\examples{
# Example color maps ----
count_colors <- function(n){
c("black","brown","red","orange","yellow")
}
fpkm_colors <- viridis::inferno
tpm_colors <- viridis::plasma
qc_color_fun <- function(n){
qc_colors <- c("forestgreen", "firebrick1")
names(qc_colors) <- c("Y", "N")
return(qc_colors)
}
# Constructor ----
ecm <- ExperimentColorMap(
assays = list(
counts = count_colors,
tophat_counts = count_colors,
cufflinks_fpkm = fpkm_colors,
cufflinks_fpkm = fpkm_colors,
rsem_tpm = tpm_colors
),
colData = list(
passes_qc_checks_s = qc_color_fun
)
)
# Accessors ----
assayColorMap(ecm, "logcounts") # [undefined --> default]
assayColorMap(ecm, "counts")
assayColorMap(ecm, "cufflinks_fpkm")
colDataColorMap(ecm, "passes_qc_checks_s")
colDataColorMap(ecm, "undefined")
rowDataColorMap(ecm, "undefined")
}
| /man/ExperimentColorMap.Rd | permissive | slowkow/iSEE | R | false | true | 3,387 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllClasses.R
\name{ExperimentColorMap}
\alias{ExperimentColorMap}
\alias{class:ExperimentColorMap}
\alias{ExperimentColorMap-class}
\alias{assayColorMap}
\alias{colDataColorMap}
\alias{rowDataColorMap}
\alias{assayColorMap,ExperimentColorMap,character-method}
\alias{assayColorMap,ExperimentColorMap,numeric-method}
\alias{colDataColorMap,ExperimentColorMap,character-method}
\alias{rowDataColorMap,ExperimentColorMap,character-method}
\title{\code{ExperimentColorMap} objects}
\usage{
ExperimentColorMap(assays = list(), colData = list(), rowData = list(),
all_discrete = list(assays = NULL, colData = NULL, rowData = NULL),
all_continuous = list(assays = NULL, colData = NULL, rowData = NULL),
global_discrete = NULL, global_continuous = NULL, ...)
}
\arguments{
\item{assays}{List of color maps for \code{assays}.}
\item{colData}{List of color maps for \code{colData}.}
\item{rowData}{List of color maps for \code{rowData}.}
\item{all_discrete}{Discrete color maps applied to all undefined
\code{assays}, \code{colData}, and \code{rowData}, respectively.}
\item{all_continuous}{Continuous color maps applied to all undefined
\code{assays}, \code{colData}, and \code{rowData}, respectively.}
\item{global_discrete}{Discrete color maps applied to all undefined discrete
covariates.}
\item{global_continuous}{Continuous color maps applied to all undefined
discrete covariates.}
\item{...}{additional arguments passed on to the \code{ExperimentColorMap}
constructor}
}
\description{
\code{ExperimentColorMap} objects
}
\details{
Color maps must all be functions that take at least one argument: the number
of (named) colours to return as a \code{character} vector.
This argument may be ignored in the body of the color map function
to produce constant color maps.
}
\section{Accessors}{
In the following code snippets, \code{x} is an
\code{ExperimentColorMap} object. If the color map can not immediately
be found in the appropriate slot, \code{discrete} is a \code{logical(1)}
that indicates whether the default color map returned should be discrete
\code{TRUE} or continuous (\code{FALSE}, default).
\describe{
\item{\code{assayColorMap(x, i, ..., discrete=FALSE)}:}{
Get an \code{assays} colormap.}
\item{\code{colDataColorMap(x, i, ..., discrete=FALSE)}:}{
Get a \code{colData} colormap.}
\item{\code{rowDataColorMap(x, i, ..., discrete=FALSE)}:}{
Get a \code{rowData} colormap.}
}
}
\examples{
# Example color maps ----
count_colors <- function(n){
c("black","brown","red","orange","yellow")
}
fpkm_colors <- viridis::inferno
tpm_colors <- viridis::plasma
qc_color_fun <- function(n){
qc_colors <- c("forestgreen", "firebrick1")
names(qc_colors) <- c("Y", "N")
return(qc_colors)
}
# Constructor ----
ecm <- ExperimentColorMap(
assays = list(
counts = count_colors,
tophat_counts = count_colors,
cufflinks_fpkm = fpkm_colors,
cufflinks_fpkm = fpkm_colors,
rsem_tpm = tpm_colors
),
colData = list(
passes_qc_checks_s = qc_color_fun
)
)
# Accessors ----
assayColorMap(ecm, "logcounts") # [undefined --> default]
assayColorMap(ecm, "counts")
assayColorMap(ecm, "cufflinks_fpkm")
colDataColorMap(ecm, "passes_qc_checks_s")
colDataColorMap(ecm, "undefined")
rowDataColorMap(ecm, "undefined")
}
|
# ============ LOAD DATA
data <- read.table("household_power_consumption.txt", header=TRUE, sep=";")
# Convert data
data$Date<- as.Date(as.character(data$Date), "%d/%m/%Y")
data$Global_active_power<- suppressWarnings(as.numeric(as.character(data$Global_active_power)))
# Subselect data
sdata <- subset(data, Date >= as.Date("2007-02-01") & Date <= as.Date("2007-02-02"))
ROWLENGTH <- length(sdata$Date)
# ============ Save to PNG
png(filename="plot2.png",
width=480,
height=480,
units="px")
plot(sdata$Global_active_power,
type="l",
xaxt="n",
xlab = "",
ylab="Global Active Power (kilowatts)")
axis(1,
at=c(0, ROWLENGTH/2, ROWLENGTH),
labels=c("Thu","Fri","Sat"))
dev.off()
| /plot2.R | no_license | rarich01/ExData_Plotting1 | R | false | false | 725 | r |
# ============ LOAD DATA
data <- read.table("household_power_consumption.txt", header=TRUE, sep=";")
# Convert data
data$Date<- as.Date(as.character(data$Date), "%d/%m/%Y")
data$Global_active_power<- suppressWarnings(as.numeric(as.character(data$Global_active_power)))
# Subselect data
sdata <- subset(data, Date >= as.Date("2007-02-01") & Date <= as.Date("2007-02-02"))
ROWLENGTH <- length(sdata$Date)
# ============ Save to PNG
png(filename="plot2.png",
width=480,
height=480,
units="px")
plot(sdata$Global_active_power,
type="l",
xaxt="n",
xlab = "",
ylab="Global Active Power (kilowatts)")
axis(1,
at=c(0, ROWLENGTH/2, ROWLENGTH),
labels=c("Thu","Fri","Sat"))
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/grammar_sampler.R
\name{grammar_sampler}
\alias{grammar_sampler}
\title{grammar_sampler}
\usage{
grammar_sampler(
n,
grammar,
max_depth,
no_cores = NULL,
unique = TRUE,
seed = NULL,
save = TRUE,
file_name = NULL
)
}
\arguments{
\item{n}{Number of functions to be sampled from grammar}
\item{grammar}{A Grammar Object, created by the create_grammar() function.}
\item{max_depth}{Maximum recursive depth used to sample grammar.}
\item{no_cores}{The number of cores for parallel computation. If NULL than all but 2 cores are used.}
\item{unique}{Should only the uniquely sampled functions be kept.}
\item{seed}{An integer to be supplied to set.seed, or NULL not to set reproducible seeds.}
\item{save}{if a .feather of the output should be saved in the current working directory.}
}
\value{
Returns a data frame with all sampled functions.
}
\description{
grammar_sampler
}
\examples{
simple_grammar <- create_grammar(a = "<b><op><c>, <a><op><b>, <a><op><c>, 1",
b = "2, 4",
c = "1, 3, 5",
op = "+, -")
\dontrun{
grammar_functions <- grammar_sampler(n = 10,
grammar = simple_grammar,
max_depth = 5,
no_cores = 1,
save = FALSE)
}
}
| /man/grammar_sampler.Rd | permissive | MoritzFeigl/FSO | R | false | true | 1,475 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/grammar_sampler.R
\name{grammar_sampler}
\alias{grammar_sampler}
\title{grammar_sampler}
\usage{
grammar_sampler(
n,
grammar,
max_depth,
no_cores = NULL,
unique = TRUE,
seed = NULL,
save = TRUE,
file_name = NULL
)
}
\arguments{
\item{n}{Number of functions to be sampled from grammar}
\item{grammar}{A Grammar Object, created by the create_grammar() function.}
\item{max_depth}{Maximum recursive depth used to sample grammar.}
\item{no_cores}{The number of cores for parallel computation. If NULL than all but 2 cores are used.}
\item{unique}{Should only the uniquely sampled functions be kept.}
\item{seed}{An integer to be supplied to set.seed, or NULL not to set reproducible seeds.}
\item{save}{if a .feather of the output should be saved in the current working directory.}
}
\value{
Returns a data frame with all sampled functions.
}
\description{
grammar_sampler
}
\examples{
simple_grammar <- create_grammar(a = "<b><op><c>, <a><op><b>, <a><op><c>, 1",
b = "2, 4",
c = "1, 3, 5",
op = "+, -")
\dontrun{
grammar_functions <- grammar_sampler(n = 10,
grammar = simple_grammar,
max_depth = 5,
no_cores = 1,
save = FALSE)
}
}
|
rm(list=ls())
library(readr)
library(data.table)
library(finML)
library(fBasics)
setwd("C:/Users/nicholas.warren/Desktop/Work/Kaggle/Houses/")
train <- data.frame(read_csv(file = "data/train.csv"))
test <- data.frame(read_csv(file = "data/test.csv"))
train$SalePrice<-log(train$SalePrice)
#or define factor names explicity
covars<-names(train)
#Remove any dupe variables
covars <- setdiff(unique(covars), c("SalePrice","Id"))
#Apply a custom feature function to data. This will allow defined interactions to be created, but also allow these interactions to be captured within partial plots in later updates
myFeatureFun<-function(data){
data$LASR<-sqrt(data$LotArea)
# interFileds<-c("OverallQual", "GrLivArea", "GarageCars", "TotalBsmtSF", "BsmtFinSF1")
# combs<-t(combn(x=interFileds,m = 2 ))
#
# for(i in 1:nrow(combs)){
# interactionName<-BBmisc::collapse(combs[i,], sep = "-X-")
# data[[interactionName]]<-data[[combs[i,1]]]*data[[combs[i,2]]]
#
# }
#For all numeric variables that are highly skewed take log transform
for(n in setdiff(names(data), c("Id", "SalePrice"))){
if(is.numeric(data[[n]])){
if(skewness(data[[n]], na.rm = T)>0.3){
data[[n]]<-log(data[[n]]+1)
}
#Robust scaling
# data[[n]]<-(data[[n]]-median(data[[n]]))/(quantile(data[[n]], 0.75, na.rm=T)-quantile(data[[n]],0.25, na.rm=T))
}
}
return(data)
}
#define data task type ie, classificaiton or regression
dT<-finDataTask(id="House"
, dataTrain=train
, dataTest= test
, response = "SalePrice"
, verbose=TRUE
, id_Column="Id")
#Apply custom feature function
dT$featureFun()
#set features to all except the response variable
#dT$setfeaturesAll()
#dT$featuresNumeric
#set features explicity
dT$setfeatures(covars)
dT$setfeatures(covars)
#Tidy up
dT$tidy()
#dT$imputeFeatures(methodCategorical = "mode", methodNumeric = "mean")
dT$imputeFeatures(methodCategorical = "mode", methodNumeric = "value")
#dT$imputeFeatures(methodCategorical = "ranger", methodNumeric = "ranger")
#dT$imputeFeatures(methodCategorical = "ranger", methodNumeric = "mean")
#
dT$encodeCategorical( method = "OneHot")
#dT$encodeCategorical( method = "Integer")
#dT$removeUnbalancedBinaryFeatures(featureName="all", threshold=0.01)
length(dT$MMFeatures)
#Make a resample instance
r<-finResample(id="QC_Resample", dataTask= dT)
r$setHoldout(train_frac=1)
#
# r$setResampleType(type = "None")
# r$setResampleType(type = "Nfold", folds = 3L)
# r$setResampleType(type = "StratifiedNfold", folds = 10L, stratifiedfield = "SalePrice")
# r$setResampleType(type = "RepeatedNfold", folds = 10L, repeats = 5L)
r$setResampleType(type = "RepeatedResample", train_frac = 0.8, repeats = 10L)
# r$setResampleType(type = "Holdout", train_frac = 0.5)
#----------------------------------------------------------------------------
#Define learner object first
BM<-finMakeBaseLearner(modelName="Reg_cubist")
#Find the parameters for the base model that can be adjusted
BM$listParameters()
BM$par.set$n.trees$default=15000
BM$par.set$shrinkage$default = 0.1
BM$par.set$bag.fraction$default = 0.5
BM$par.set$colsample_bytree$default = 0.5
BM$par.set$n.minobsinnode$default = 50
BM$par.set$interaction.depth$default=2
BM$par.set$num_parallel_tree$default = 2
BM$par.set$auto.stopping$default = 500
BM$par.set$booster$default = "gbtree"
# BM$par.set$drop.rate$default = 0.5
# Best Parameters Found:
# Round = 4 interaction.depth = 2.4853 bag.fraction = 0.9113 n.minobsinnode = 106.9180 colsample_bytree = 0.6082 num_parallel_tree = 2.2575 Value = -0.0091
#Pass baselearnerinto the learner object
trainer<-finLearner( Resample = r
, BaseLearner = BM
, Metrics=list(rmsle="RMSLE", rmse ="RMSE", gini="Gini" ))
trainer$train()
#0.13583824 Validation ERROR 10Fold x 5
#0.13020 PL
#
#
testPred<-trainer$predictTest()
testPred$Pred<-exp(testPred$Pred)
names(testPred)<-c("Id", "SalePrice")
write.csv(testPred, file = "Output_SimpleLM.csv", row.names = F)
trainer$metricsEvalSummary
# Summary Metrics across all resample cycles:
# metric Train TrainSD Valid ValidSD Holdout HoldoutSD
# 1 RMSLE 0.00285939 0.0007641778 0.009645728 0.0006029021 0.009555131 0.000337404
# 2 RMSE 0.03697439 0.0098448799 0.125128222 0.0079839813 0.122346863 0.004317128
# 3 Gini 0.99523436 0.0021975205 0.957733945 0.0073817573 0.963690683 0.002702894
# library(xgboost)
# imp_matrix<-NULL
# for (i in 1:10) {
#
# imp_matrix <- rbind(imp_matrix,xgb.importance(feature_names = trainer$resample$dataTask$MMFeatures, model = trainer$learner$model[[i]]))
#
# }
#
#
# imp_matrix <- data.table(imp_matrix)
#
# imp_matrix<-imp_matrix[, list(Gain = sum(Gain)), by = Feature]
#
# imp_matrix$Gain <- imp_matrix$Gain / sum(imp_matrix$Gain)
# imp_matrix <- imp_matrix[order( - imp_matrix$Gain)]
#
# covars<-head(imp_matrix$Feature,20)
#
# trainer$plot.PvO()
#
#
# trainer$plot.Partial(DepVar = "GarageCars"
# , partial.samples = 100
# , addAverage = T)
#
#
#
#
BM$listParameters()
BM$setTuningParameter("n.minobsinnode")
BM$par.set[["n.minobsinnode"]]$max<-500
BM$par.set[["n.minobsinnode"]]$min<-50
BM$setTuningParameter("interaction.depth")
BM$par.set[["interaction.depth"]]$max<-5
BM$par.set[["interaction.depth"]]$min<-2
BM$setTuningParameter("colsample_bytree")
BM$par.set[["colsample_bytree"]]$max<-1.0
BM$par.set[["colsample_bytree"]]$min<-0.5
BM$setTuningParameter("bag.fraction")
BM$par.set[["bag.fraction"]]$max<-1.0
BM$par.set[["bag.fraction"]]$min<-0.5
BM$setTuningParameter("num_parallel_tree")
BM$par.set[["num_parallel_tree"]]$max <- 3.0
BM$par.set[["num_parallel_tree"]]$min <- 1
optTask<-finOptimise( Resample = r
, BaseLearner = BM
, Metrics=list(rmsle="RMSLE")
, MetricMaximise = FALSE
, Optimiser ="Bayes")
optTask$Optimise(N_iter=20, int_points=10)
# ##
#
#
#
#
#
#
#
#
#
#
#
#
| /Houses/finML Houses (lm).R | no_license | nerdville/WaddleFish | R | false | false | 6,215 | r | rm(list=ls())
library(readr)
library(data.table)
library(finML)
library(fBasics)
setwd("C:/Users/nicholas.warren/Desktop/Work/Kaggle/Houses/")
train <- data.frame(read_csv(file = "data/train.csv"))
test <- data.frame(read_csv(file = "data/test.csv"))
train$SalePrice<-log(train$SalePrice)
#or define factor names explicity
covars<-names(train)
#Remove any dupe variables
covars <- setdiff(unique(covars), c("SalePrice","Id"))
#Apply a custom feature function to data. This will allow defined interactions to be created, but also allow these interactions to be captured within partial plots in later updates
myFeatureFun<-function(data){
data$LASR<-sqrt(data$LotArea)
# interFileds<-c("OverallQual", "GrLivArea", "GarageCars", "TotalBsmtSF", "BsmtFinSF1")
# combs<-t(combn(x=interFileds,m = 2 ))
#
# for(i in 1:nrow(combs)){
# interactionName<-BBmisc::collapse(combs[i,], sep = "-X-")
# data[[interactionName]]<-data[[combs[i,1]]]*data[[combs[i,2]]]
#
# }
#For all numeric variables that are highly skewed take log transform
for(n in setdiff(names(data), c("Id", "SalePrice"))){
if(is.numeric(data[[n]])){
if(skewness(data[[n]], na.rm = T)>0.3){
data[[n]]<-log(data[[n]]+1)
}
#Robust scaling
# data[[n]]<-(data[[n]]-median(data[[n]]))/(quantile(data[[n]], 0.75, na.rm=T)-quantile(data[[n]],0.25, na.rm=T))
}
}
return(data)
}
#define data task type ie, classificaiton or regression
dT<-finDataTask(id="House"
, dataTrain=train
, dataTest= test
, response = "SalePrice"
, verbose=TRUE
, id_Column="Id")
#Apply custom feature function
dT$featureFun()
#set features to all except the response variable
#dT$setfeaturesAll()
#dT$featuresNumeric
#set features explicity
dT$setfeatures(covars)
dT$setfeatures(covars)
#Tidy up
dT$tidy()
#dT$imputeFeatures(methodCategorical = "mode", methodNumeric = "mean")
dT$imputeFeatures(methodCategorical = "mode", methodNumeric = "value")
#dT$imputeFeatures(methodCategorical = "ranger", methodNumeric = "ranger")
#dT$imputeFeatures(methodCategorical = "ranger", methodNumeric = "mean")
#
dT$encodeCategorical( method = "OneHot")
#dT$encodeCategorical( method = "Integer")
#dT$removeUnbalancedBinaryFeatures(featureName="all", threshold=0.01)
length(dT$MMFeatures)
#Make a resample instance
r<-finResample(id="QC_Resample", dataTask= dT)
r$setHoldout(train_frac=1)
#
# r$setResampleType(type = "None")
# r$setResampleType(type = "Nfold", folds = 3L)
# r$setResampleType(type = "StratifiedNfold", folds = 10L, stratifiedfield = "SalePrice")
# r$setResampleType(type = "RepeatedNfold", folds = 10L, repeats = 5L)
r$setResampleType(type = "RepeatedResample", train_frac = 0.8, repeats = 10L)
# r$setResampleType(type = "Holdout", train_frac = 0.5)
#----------------------------------------------------------------------------
#Define learner object first
BM<-finMakeBaseLearner(modelName="Reg_cubist")
#Find the parameters for the base model that can be adjusted
BM$listParameters()
BM$par.set$n.trees$default=15000
BM$par.set$shrinkage$default = 0.1
BM$par.set$bag.fraction$default = 0.5
BM$par.set$colsample_bytree$default = 0.5
BM$par.set$n.minobsinnode$default = 50
BM$par.set$interaction.depth$default=2
BM$par.set$num_parallel_tree$default = 2
BM$par.set$auto.stopping$default = 500
BM$par.set$booster$default = "gbtree"
# BM$par.set$drop.rate$default = 0.5
# Best Parameters Found:
# Round = 4 interaction.depth = 2.4853 bag.fraction = 0.9113 n.minobsinnode = 106.9180 colsample_bytree = 0.6082 num_parallel_tree = 2.2575 Value = -0.0091
#Pass baselearnerinto the learner object
trainer<-finLearner( Resample = r
, BaseLearner = BM
, Metrics=list(rmsle="RMSLE", rmse ="RMSE", gini="Gini" ))
trainer$train()
#0.13583824 Validation ERROR 10Fold x 5
#0.13020 PL
#
#
testPred<-trainer$predictTest()
testPred$Pred<-exp(testPred$Pred)
names(testPred)<-c("Id", "SalePrice")
write.csv(testPred, file = "Output_SimpleLM.csv", row.names = F)
trainer$metricsEvalSummary
# Summary Metrics across all resample cycles:
# metric Train TrainSD Valid ValidSD Holdout HoldoutSD
# 1 RMSLE 0.00285939 0.0007641778 0.009645728 0.0006029021 0.009555131 0.000337404
# 2 RMSE 0.03697439 0.0098448799 0.125128222 0.0079839813 0.122346863 0.004317128
# 3 Gini 0.99523436 0.0021975205 0.957733945 0.0073817573 0.963690683 0.002702894
# library(xgboost)
# imp_matrix<-NULL
# for (i in 1:10) {
#
# imp_matrix <- rbind(imp_matrix,xgb.importance(feature_names = trainer$resample$dataTask$MMFeatures, model = trainer$learner$model[[i]]))
#
# }
#
#
# imp_matrix <- data.table(imp_matrix)
#
# imp_matrix<-imp_matrix[, list(Gain = sum(Gain)), by = Feature]
#
# imp_matrix$Gain <- imp_matrix$Gain / sum(imp_matrix$Gain)
# imp_matrix <- imp_matrix[order( - imp_matrix$Gain)]
#
# covars<-head(imp_matrix$Feature,20)
#
# trainer$plot.PvO()
#
#
# trainer$plot.Partial(DepVar = "GarageCars"
# , partial.samples = 100
# , addAverage = T)
#
#
#
#
BM$listParameters()
BM$setTuningParameter("n.minobsinnode")
BM$par.set[["n.minobsinnode"]]$max<-500
BM$par.set[["n.minobsinnode"]]$min<-50
BM$setTuningParameter("interaction.depth")
BM$par.set[["interaction.depth"]]$max<-5
BM$par.set[["interaction.depth"]]$min<-2
BM$setTuningParameter("colsample_bytree")
BM$par.set[["colsample_bytree"]]$max<-1.0
BM$par.set[["colsample_bytree"]]$min<-0.5
BM$setTuningParameter("bag.fraction")
BM$par.set[["bag.fraction"]]$max<-1.0
BM$par.set[["bag.fraction"]]$min<-0.5
BM$setTuningParameter("num_parallel_tree")
BM$par.set[["num_parallel_tree"]]$max <- 3.0
BM$par.set[["num_parallel_tree"]]$min <- 1
optTask<-finOptimise( Resample = r
, BaseLearner = BM
, Metrics=list(rmsle="RMSLE")
, MetricMaximise = FALSE
, Optimiser ="Bayes")
optTask$Optimise(N_iter=20, int_points=10)
# ##
#
#
#
#
#
#
#
#
#
#
#
#
|
library(ggplot2)
library(plotly)
library(dplyr)
library(openintro)
source("R/format_data.R")
interactiveMap <- function(year) {
data <- eval(parse(text=paste0("data_", year)))
data <- data%>%
filter(state != "National", state != "Puerto Rico")
data$code <- state2abbr(data$state)
data$total_killed <- gsub(",", "", data$total_killed)
# specify some map projection/options
g <- list(
scope = 'usa',
projection = list(type = 'albers usa'),
showlakes = TRUE,
lakecolor = 'white'
)
vals <- unique(scales::rescale(c(volcano)))
o <- order(vals, decreasing = FALSE)
cols <- scales::col_numeric(c("light green", "#dd4945"), domain = NULL)(vals)
colz <- setNames(data.frame(vals[o], cols[o]), NULL)
plot_geo(data, locationmode = 'USA-states') %>%
add_trace(
hovertext = ~state, z = ~total_killed, locations = ~code,
colorscale = colz
) %>%
layout(
geo = g
)
}
interactiveMap(2014)
| /Final_Shiny/R/interactive_map.R | no_license | bridhett/info-201-final-project | R | false | false | 974 | r | library(ggplot2)
library(plotly)
library(dplyr)
library(openintro)
source("R/format_data.R")
interactiveMap <- function(year) {
data <- eval(parse(text=paste0("data_", year)))
data <- data%>%
filter(state != "National", state != "Puerto Rico")
data$code <- state2abbr(data$state)
data$total_killed <- gsub(",", "", data$total_killed)
# specify some map projection/options
g <- list(
scope = 'usa',
projection = list(type = 'albers usa'),
showlakes = TRUE,
lakecolor = 'white'
)
vals <- unique(scales::rescale(c(volcano)))
o <- order(vals, decreasing = FALSE)
cols <- scales::col_numeric(c("light green", "#dd4945"), domain = NULL)(vals)
colz <- setNames(data.frame(vals[o], cols[o]), NULL)
plot_geo(data, locationmode = 'USA-states') %>%
add_trace(
hovertext = ~state, z = ~total_killed, locations = ~code,
colorscale = colz
) %>%
layout(
geo = g
)
}
interactiveMap(2014)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_tb.R
\name{get_tb}
\alias{get_tb}
\title{Title}
\usage{
get_tb(data, var = "id", head = FALSE)
}
\arguments{
\item{data}{data.frame need to become a tibble objsct}
\item{var}{the rownames col's names, the default is "id"}
\item{head}{if TRUE, will display the data like head()}
}
\value{
tibble object
}
\description{
Title
}
\examples{
print("creat a example to test it")
}
| /man/get_tb.Rd | permissive | wangjiaxuan666/xbox | R | false | true | 459 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_tb.R
\name{get_tb}
\alias{get_tb}
\title{Title}
\usage{
get_tb(data, var = "id", head = FALSE)
}
\arguments{
\item{data}{data.frame need to become a tibble objsct}
\item{var}{the rownames col's names, the default is "id"}
\item{head}{if TRUE, will display the data like head()}
}
\value{
tibble object
}
\description{
Title
}
\examples{
print("creat a example to test it")
}
|
#Assignment 3.1 - Session: 3
#Q1 How many vowels are there in the names of USA States?
#Solution 1:
States <- rownames(USArrests)
x <- c('a','e','i','o','u')
x
y <- rep(0,times=5)
y
input <- data.frame(x,y)
input
for(i in 1:50){
test <- States[i]
n <- nchar(test)
test <- strsplit(test,"")
temp <- test[[1]]
k <- 1
while(k <= n){
if( temp[k] == 'a' || temp[k] == 'A' )
{input[1,2]=input[1,2]+1}
if( temp[k] == 'e' || temp[k] == 'E' )
{input[2,2]=input[2,2]+1}
if( temp[k] == 'i' || temp[k] == 'I' )
{input[3,2]=input[3,2]+1}
if( temp[k] == 'o' || temp[k] == 'O' )
{input[4,2]=input[4,2]+1}
if( temp[k] == 'u' || temp[k] == 'U' )
{input[5,2]=input[5,2]+1}
k <- k + 1
}
}
print(input)
#Q2: Visualize the vowels distribution.
#Solution 2:
#Using barplot
barplot(input$y,names.arg = c('A','E','I','O','U'),xlab = "Vowels",ylab="Frequency",col = "blue")
| /Assignment_3_1.R | no_license | sheetalnishad/assignment-3.1 | R | false | false | 956 | r | #Assignment 3.1 - Session: 3
#Q1 How many vowels are there in the names of USA States?
#Solution 1:
States <- rownames(USArrests)
x <- c('a','e','i','o','u')
x
y <- rep(0,times=5)
y
input <- data.frame(x,y)
input
for(i in 1:50){
test <- States[i]
n <- nchar(test)
test <- strsplit(test,"")
temp <- test[[1]]
k <- 1
while(k <= n){
if( temp[k] == 'a' || temp[k] == 'A' )
{input[1,2]=input[1,2]+1}
if( temp[k] == 'e' || temp[k] == 'E' )
{input[2,2]=input[2,2]+1}
if( temp[k] == 'i' || temp[k] == 'I' )
{input[3,2]=input[3,2]+1}
if( temp[k] == 'o' || temp[k] == 'O' )
{input[4,2]=input[4,2]+1}
if( temp[k] == 'u' || temp[k] == 'U' )
{input[5,2]=input[5,2]+1}
k <- k + 1
}
}
print(input)
#Q2: Visualize the vowels distribution.
#Solution 2:
#Using barplot
barplot(input$y,names.arg = c('A','E','I','O','U'),xlab = "Vowels",ylab="Frequency",col = "blue")
|
#'@title Define chromosomal regions for screening
#'@description Define overlapping loci starting and ending positions for Wavelet screaming analysis
#'@param bp vector of the observed based pair positions in a chromosome
#'@param Loci_size size of the defined loci, limited by thresh size gaps on ends. Slices smaller than Loci_size will be skipped.
#'@param thresh maximal distance between two SNP within a loci. E.g 10000
#'@param Chr the chromosome's number where the slicing is made. By default set as NA
#'@export
#'@examples \dontrun{
#'Loci_size=1000000
#'thresh=10000
#'temp <- runif(n = 50000,min=1,max=10050)
#'for (i in 2:length(temp))
#'{
#' temp[i] <- temp[i]+temp[i-1]
#'}
#'bp <- temp
#'df <-slice_definition(bp=bp,Loci_size=Loci_size,thresh = thresh ,Chr=5)
#'head(df)
#'}
slice_definition <- function(bp,Loci_size=1e6,thresh=1e4,Chr=NA)
{
if(!is.vector(bp) | !is.numeric(bp)){
stop("ERROR: bp was not a numeric vector")
}
bp <- sort(bp)
#First SNp at distance 0 of itself
espacement <- c(0,diff(bp))
##################################
##location where spacing is to big
##################################
my_index <- c(1,which(espacement > thresh) )
if(length(my_index)==1)
{
my_index <- c(1, length(bp))
}
#Check distance between problematic spacing
Width_loci <- rep(0,length(my_index)-1)
#######################################
#Simple window sliding half overlapping
#######################################
##Strategy: if region to run analysis superior of 1.5 loci definition then
#run half overlapping analysis
#if lower than 1.5 than loci_size then run one analysis from the start one from the end
#df output to define the extraction
df <- data.frame(Chr= numeric(),posStart= numeric(),posEnd= numeric())
for(i in 1:(length(my_index)-1))
{
my_diff <- bp[my_index][i+1]-bp[my_index][i]
if( my_diff >= Loci_size ) #True means ok to run a wavelet analysis between my_index[i] and my_index[i+1]
{
Width_loci[i] <- my_diff
if(my_diff -1>= 1.5*Loci_size)
{
temp1 <-0
while(temp1 + Loci_size < my_diff)
{
# add one slice if there's at least one SNP inside region
# (-1 +1 just to ensure that we keep edge SNPs)
my_loci <- data.frame(Chr, posStart=bp[my_index][i]+temp1-1, posEnd=bp[my_index][i]+temp1+Loci_size+1)
if(any(bp >= my_loci[,2] & bp <= my_loci[,3])){
df <- rbind(df,my_loci)
}
temp1 <- temp1 +Loci_size/2
}
#to get the last part which is the rest of width_loci[i]/1.5*loci_size
my_loci <- data.frame(Chr, posStart=bp[my_index][i+1]-Loci_size-1, posEnd=bp[my_index][i+1]+1 )
if(any(bp >= my_loci[,2] & bp <= my_loci[,3])){
df <- rbind(df,my_loci)
}
}
else
{
my_loci <- data.frame(Chr, posStart=bp[my_index][i]-1, posEnd=bp[my_index][i] + Loci_size + 1 )
if(any(bp >= my_loci[,2] & bp <= my_loci[,3])){
df <- rbind(df,my_loci)
}
my_loci <- data.frame(Chr, posStart=bp[my_index][i+1] - Loci_size-1, posEnd=bp[my_index][i+1] + 1 )
if(any(bp >= my_loci[,2] & bp <= my_loci[,3])){
df <- rbind(df,my_loci)
}
}
}
}
# percentage of chromosome covered
# note: does not necessarily correspond to coverage of genotyped positions
coverage_of_analysis <- 100*sum(Width_loci)/(max(bp)-min(bp))
colnames(df) <- c("Chr","posStart","posEnd")
print(paste("number of slices defined: ", nrow(df)))
print(paste("percentage of chromosome covered after slice definition: ",
coverage_of_analysis,"%"))
return(df)
}
| /R/definition_slice.R | no_license | asju/WaveletScreaming | R | false | false | 3,648 | r | #'@title Define chromosomal regions for screening
#'@description Define overlapping loci starting and ending positions for Wavelet screaming analysis
#'@param bp vector of the observed based pair positions in a chromosome
#'@param Loci_size size of the defined loci, limited by thresh size gaps on ends. Slices smaller than Loci_size will be skipped.
#'@param thresh maximal distance between two SNP within a loci. E.g 10000
#'@param Chr the chromosome's number where the slicing is made. By default set as NA
#'@export
#'@examples \dontrun{
#'Loci_size=1000000
#'thresh=10000
#'temp <- runif(n = 50000,min=1,max=10050)
#'for (i in 2:length(temp))
#'{
#' temp[i] <- temp[i]+temp[i-1]
#'}
#'bp <- temp
#'df <-slice_definition(bp=bp,Loci_size=Loci_size,thresh = thresh ,Chr=5)
#'head(df)
#'}
slice_definition <- function(bp,Loci_size=1e6,thresh=1e4,Chr=NA)
{
if(!is.vector(bp) | !is.numeric(bp)){
stop("ERROR: bp was not a numeric vector")
}
bp <- sort(bp)
#First SNp at distance 0 of itself
espacement <- c(0,diff(bp))
##################################
##location where spacing is to big
##################################
my_index <- c(1,which(espacement > thresh) )
if(length(my_index)==1)
{
my_index <- c(1, length(bp))
}
#Check distance between problematic spacing
Width_loci <- rep(0,length(my_index)-1)
#######################################
#Simple window sliding half overlapping
#######################################
##Strategy: if region to run analysis superior of 1.5 loci definition then
#run half overlapping analysis
#if lower than 1.5 than loci_size then run one analysis from the start one from the end
#df output to define the extraction
df <- data.frame(Chr= numeric(),posStart= numeric(),posEnd= numeric())
for(i in 1:(length(my_index)-1))
{
my_diff <- bp[my_index][i+1]-bp[my_index][i]
if( my_diff >= Loci_size ) #True means ok to run a wavelet analysis between my_index[i] and my_index[i+1]
{
Width_loci[i] <- my_diff
if(my_diff -1>= 1.5*Loci_size)
{
temp1 <-0
while(temp1 + Loci_size < my_diff)
{
# add one slice if there's at least one SNP inside region
# (-1 +1 just to ensure that we keep edge SNPs)
my_loci <- data.frame(Chr, posStart=bp[my_index][i]+temp1-1, posEnd=bp[my_index][i]+temp1+Loci_size+1)
if(any(bp >= my_loci[,2] & bp <= my_loci[,3])){
df <- rbind(df,my_loci)
}
temp1 <- temp1 +Loci_size/2
}
#to get the last part which is the rest of width_loci[i]/1.5*loci_size
my_loci <- data.frame(Chr, posStart=bp[my_index][i+1]-Loci_size-1, posEnd=bp[my_index][i+1]+1 )
if(any(bp >= my_loci[,2] & bp <= my_loci[,3])){
df <- rbind(df,my_loci)
}
}
else
{
my_loci <- data.frame(Chr, posStart=bp[my_index][i]-1, posEnd=bp[my_index][i] + Loci_size + 1 )
if(any(bp >= my_loci[,2] & bp <= my_loci[,3])){
df <- rbind(df,my_loci)
}
my_loci <- data.frame(Chr, posStart=bp[my_index][i+1] - Loci_size-1, posEnd=bp[my_index][i+1] + 1 )
if(any(bp >= my_loci[,2] & bp <= my_loci[,3])){
df <- rbind(df,my_loci)
}
}
}
}
# percentage of chromosome covered
# note: does not necessarily correspond to coverage of genotyped positions
coverage_of_analysis <- 100*sum(Width_loci)/(max(bp)-min(bp))
colnames(df) <- c("Chr","posStart","posEnd")
print(paste("number of slices defined: ", nrow(df)))
print(paste("percentage of chromosome covered after slice definition: ",
coverage_of_analysis,"%"))
return(df)
}
|
#' Plot confidence intervals (NEEDS DOCUMENTATION)
#'
#' "Forest plot"-style plotting of confidence intervals from a regression model. Basic input is a matrix with columns of estimate/lower/upper, along with an optional 4th column for the p-value. Also works with a variety of models (lm/glm/coxph/etc.)
#'
#' @param obj The object to be plotted; can be a matrix of raw values or a model object
#'
#' @examples
#' # Supplying a matrix
#' B <- cbind(1:3, 0:2, 2:4)
#' rownames(B) <- LETTERS[1:3]
#' CIplot(B)
#'
#' # Supplying a fitted model object
#' fit <- lm(Ozone ~ Solar.R + Wind + Temp, airquality)
#' CIplot(fit)
#'
#' # Options
#'
#' @export
CIplot <- function(obj,...) UseMethod("CIplot")
#' @rdname CIplot
#'
#' @param labels Paramater labels
#' @param sort Sort parameters by estimate? (default: true)
#' @param xlim,pxlim x axis limits and breakpoints; see `pretty()`
#' @param ylim y axis limits (default: c(0.5, n+0.5), where n is number of params)
#' @param sub Text to be written at top of plot
#' @param diff Include tests of difference / p-values?
#' @param null Draw a line representing no effect at this value (default: 0)
#' @param n.ticks Number of ticks on x-axis
#' @param mar As in `par()`
#' @param axis Create an x axis?
#' @param trans Function to transform parameter space
#' @param p.label Label p-values (p=0.02 instead of just 0.02)? (default: FALSE)
#' @param xlab,ylab As in `plot()`
#' @param add Add to existing plot?
#' @param setupOnly Create a new window for plot, but don't actually plot anything yet
#' @param lwd As in `lines()`
#' @param replaceUnderscore Replace underscore with space in plotting label
#' @param ... Additional arguments to `plot()`
#'
#' @export
CIplot.matrix <- function(
obj, labels=rownames(B), sort=TRUE, pxlim, xlim, ylim, sub, diff=(ncol(B)==4), null=0, n.ticks=6, mar, axis=!add,
trans, p.label=FALSE, xlab="", ylab="", add=FALSE, setupOnly=FALSE, lwd=2, replaceUnderscore=TRUE, ...) {
B <- obj
if (sort) B <- B[order(B[,1], decreasing=TRUE),,drop=FALSE]
## Set up margins
if (missing(mar)) {
m1 <- 5
nn <- if (is.null(labels)) 10 else max(nchar(labels))
m2 <- nn/3+.5
m3 <- 2
m4 <- if (diff) 6 else 2
op <- par(mar=c(m1, m2, m3, m4))
} else op <- par(mar=mar)
n <- nrow(B)
if (!missing(trans)) B[,1:3] <- trans(B[,1:3])
## Set up plot structure and add points
if (missing(pxlim)) {
pxlim <- if (missing(xlim)) pretty(range(B[,2:3], na.rm=TRUE),n=n.ticks-1) else pretty(xlim, n=n.ticks-1)
}
if (missing(ylim)) ylim <- c(0.5,n+0.5)
if (add) {
points(B[n:1,1], 1:n, pch=19)
} else if (setupOnly) {
plot(B[n:1,1], 1:n, type="n", xlim = range(pxlim), ylim=ylim, ylab=ylab, axes=FALSE, pch=19, xlab=xlab, ...)
return(invisible(NULL))
} else {
plot(B[n:1,1], 1:n, xlim = range(pxlim), ylim=ylim, ylab=ylab, axes=FALSE, pch=19, xlab=xlab, ...)
}
## Add lines, p-values
for (i in 1:n) {
dots <- list(...)
col <- if ("col" %in% names(dots)) rep_len(dots$col[n-i+1], n) else "black"
lines(c(B[i,2:3]), c(n-i+1,n-i+1), lwd=lwd, col=col)
if (diff) {
p <- format_p(B[,4], label=p.label)
p[is.na(B[,4])] <- ""
mtext(at=n-i+1,p[i],line=1,side=4,las=1, cex=0.8*par("cex"), adj=0)
}
}
if (axis) axis(1, pxlim)
if (diff) {
if (!missing(trans)) null <- trans(null)
abline(v=null,col="gray")
}
if (!missing(sub)) mtext(sub,3,0,cex=0.8)
## Add labels
if (replaceUnderscore) labels <- gsub("_", " ", labels)
rownames(B) <- labels
if (!add) {
ind <- !is.na(B[,1])
lapply(which(ind), function(l) text(x=par("usr")[1], adj=1, y=(n:1)[l], labels=labels[[l]], xpd=TRUE, cex=.8)) ## List approach is necessary for compatibility with expressions
if (sum(!ind) > 0) {
a <- diff(par("usr")[1:2])/diff(par("plt")[1:2])
b <- par("usr")[1] - a*par("plt")[1]
text(x=b+a*.01, adj=0, y=(n:1)[!ind], labels=labels[!ind], xpd=TRUE, cex=.8)
}
}
par(op)
# Fix lwr/upr (if tau is negative)
for (i in 1:nrow(B)) {
B[i, 2:3] <- sort(B[i, 2:3])
}
invisible(B)
}
#' @export
CIplot.lm <- function(obj, intercept=FALSE, xlab="Regression coefficient", exclude=NULL, plot=TRUE, tau, ...) {
fit <- obj
p <- length(coef(fit))
j <- if (intercept) 1:p else 2:p
if (missing(tau)) tau <- 1
B <- cbind(tau*coef(fit)[j],
tau*confint(fit,j),
summary(fit)$coef[j,4])
colnames(B) <- c("Coef","Lower","Upper","p")
for (i in seq_along(exclude)) B <- B[-grep(exclude[i],rownames(B)),,drop=FALSE]
if (plot) B <- CIplot(B, xlab=xlab, ...)
return(invisible(B))
}
#' @export
CIplot.glm <- function(obj,...) CIplot.lm(obj,...)
#' @export
CIplot.mer <- function(obj, intercept=FALSE, xlab="Regression coefficient", exclude=NULL, plot=TRUE, tau, n.sim=10000, ...) {
fit <- obj
p <- length(fit@fixef)
j <- if (intercept) 1:p else 2:p
B <- cbind(fit@fixef[j], confint(fit, j, n.sim=n.sim))
if (!missing(tau)) B[,1:3] <- B[,1:3]*tau
colnames(B) <- c("Coef","Lower","Upper","p")
for (i in seq_along(exclude)) B <- B[-grep(exclude[i],rownames(B)),]
if (plot) B <- CIplot(B, xlab=xlab, ...)
return(invisible(B))
}
#' @export
CIplot.coxph <- function(obj, xlab="Regression coefficient", exclude=NULL, plot=TRUE, tau, ...) {
fit <- obj
p <- length(coef(fit))
j <- 1:p
if (missing(tau)) tau <- 1
B <- cbind(tau*coef(fit)[j],
tau*confint(fit,j),
summary(fit)$coef[j,5])
colnames(B) <- c("Coef","Lower","Upper","p")
for (i in seq_along(exclude)) B <- B[-grep(exclude[i],rownames(B)),]
if (plot) B <- CIplot(B,xlab=xlab,...)
return(invisible(B))
}
#' @export
CIplot.data.frame <- function(obj, ...) {
CIplot.matrix(as.matrix(obj), ...)
}
| /R/CIplot.R | no_license | pbreheny/hdrm | R | false | false | 5,991 | r | #' Plot confidence intervals (NEEDS DOCUMENTATION)
#'
#' "Forest plot"-style plotting of confidence intervals from a regression model. Basic input is a matrix with columns of estimate/lower/upper, along with an optional 4th column for the p-value. Also works with a variety of models (lm/glm/coxph/etc.)
#'
#' @param obj The object to be plotted; can be a matrix of raw values or a model object
#'
#' @examples
#' # Supplying a matrix
#' B <- cbind(1:3, 0:2, 2:4)
#' rownames(B) <- LETTERS[1:3]
#' CIplot(B)
#'
#' # Supplying a fitted model object
#' fit <- lm(Ozone ~ Solar.R + Wind + Temp, airquality)
#' CIplot(fit)
#'
#' # Options
#'
#' @export
CIplot <- function(obj,...) UseMethod("CIplot")
#' @rdname CIplot
#'
#' @param labels Paramater labels
#' @param sort Sort parameters by estimate? (default: true)
#' @param xlim,pxlim x axis limits and breakpoints; see `pretty()`
#' @param ylim y axis limits (default: c(0.5, n+0.5), where n is number of params)
#' @param sub Text to be written at top of plot
#' @param diff Include tests of difference / p-values?
#' @param null Draw a line representing no effect at this value (default: 0)
#' @param n.ticks Number of ticks on x-axis
#' @param mar As in `par()`
#' @param axis Create an x axis?
#' @param trans Function to transform parameter space
#' @param p.label Label p-values (p=0.02 instead of just 0.02)? (default: FALSE)
#' @param xlab,ylab As in `plot()`
#' @param add Add to existing plot?
#' @param setupOnly Create a new window for plot, but don't actually plot anything yet
#' @param lwd As in `lines()`
#' @param replaceUnderscore Replace underscore with space in plotting label
#' @param ... Additional arguments to `plot()`
#'
#' @export
CIplot.matrix <- function(
obj, labels=rownames(B), sort=TRUE, pxlim, xlim, ylim, sub, diff=(ncol(B)==4), null=0, n.ticks=6, mar, axis=!add,
trans, p.label=FALSE, xlab="", ylab="", add=FALSE, setupOnly=FALSE, lwd=2, replaceUnderscore=TRUE, ...) {
B <- obj
if (sort) B <- B[order(B[,1], decreasing=TRUE),,drop=FALSE]
## Set up margins
if (missing(mar)) {
m1 <- 5
nn <- if (is.null(labels)) 10 else max(nchar(labels))
m2 <- nn/3+.5
m3 <- 2
m4 <- if (diff) 6 else 2
op <- par(mar=c(m1, m2, m3, m4))
} else op <- par(mar=mar)
n <- nrow(B)
if (!missing(trans)) B[,1:3] <- trans(B[,1:3])
## Set up plot structure and add points
if (missing(pxlim)) {
pxlim <- if (missing(xlim)) pretty(range(B[,2:3], na.rm=TRUE),n=n.ticks-1) else pretty(xlim, n=n.ticks-1)
}
if (missing(ylim)) ylim <- c(0.5,n+0.5)
if (add) {
points(B[n:1,1], 1:n, pch=19)
} else if (setupOnly) {
plot(B[n:1,1], 1:n, type="n", xlim = range(pxlim), ylim=ylim, ylab=ylab, axes=FALSE, pch=19, xlab=xlab, ...)
return(invisible(NULL))
} else {
plot(B[n:1,1], 1:n, xlim = range(pxlim), ylim=ylim, ylab=ylab, axes=FALSE, pch=19, xlab=xlab, ...)
}
## Add lines, p-values
for (i in 1:n) {
dots <- list(...)
col <- if ("col" %in% names(dots)) rep_len(dots$col[n-i+1], n) else "black"
lines(c(B[i,2:3]), c(n-i+1,n-i+1), lwd=lwd, col=col)
if (diff) {
p <- format_p(B[,4], label=p.label)
p[is.na(B[,4])] <- ""
mtext(at=n-i+1,p[i],line=1,side=4,las=1, cex=0.8*par("cex"), adj=0)
}
}
if (axis) axis(1, pxlim)
if (diff) {
if (!missing(trans)) null <- trans(null)
abline(v=null,col="gray")
}
if (!missing(sub)) mtext(sub,3,0,cex=0.8)
## Add labels
if (replaceUnderscore) labels <- gsub("_", " ", labels)
rownames(B) <- labels
if (!add) {
ind <- !is.na(B[,1])
lapply(which(ind), function(l) text(x=par("usr")[1], adj=1, y=(n:1)[l], labels=labels[[l]], xpd=TRUE, cex=.8)) ## List approach is necessary for compatibility with expressions
if (sum(!ind) > 0) {
a <- diff(par("usr")[1:2])/diff(par("plt")[1:2])
b <- par("usr")[1] - a*par("plt")[1]
text(x=b+a*.01, adj=0, y=(n:1)[!ind], labels=labels[!ind], xpd=TRUE, cex=.8)
}
}
par(op)
# Fix lwr/upr (if tau is negative)
for (i in 1:nrow(B)) {
B[i, 2:3] <- sort(B[i, 2:3])
}
invisible(B)
}
#' @export
CIplot.lm <- function(obj, intercept=FALSE, xlab="Regression coefficient", exclude=NULL, plot=TRUE, tau, ...) {
fit <- obj
p <- length(coef(fit))
j <- if (intercept) 1:p else 2:p
if (missing(tau)) tau <- 1
B <- cbind(tau*coef(fit)[j],
tau*confint(fit,j),
summary(fit)$coef[j,4])
colnames(B) <- c("Coef","Lower","Upper","p")
for (i in seq_along(exclude)) B <- B[-grep(exclude[i],rownames(B)),,drop=FALSE]
if (plot) B <- CIplot(B, xlab=xlab, ...)
return(invisible(B))
}
#' @export
CIplot.glm <- function(obj,...) CIplot.lm(obj,...)
#' @export
CIplot.mer <- function(obj, intercept=FALSE, xlab="Regression coefficient", exclude=NULL, plot=TRUE, tau, n.sim=10000, ...) {
fit <- obj
p <- length(fit@fixef)
j <- if (intercept) 1:p else 2:p
B <- cbind(fit@fixef[j], confint(fit, j, n.sim=n.sim))
if (!missing(tau)) B[,1:3] <- B[,1:3]*tau
colnames(B) <- c("Coef","Lower","Upper","p")
for (i in seq_along(exclude)) B <- B[-grep(exclude[i],rownames(B)),]
if (plot) B <- CIplot(B, xlab=xlab, ...)
return(invisible(B))
}
#' @export
CIplot.coxph <- function(obj, xlab="Regression coefficient", exclude=NULL, plot=TRUE, tau, ...) {
fit <- obj
p <- length(coef(fit))
j <- 1:p
if (missing(tau)) tau <- 1
B <- cbind(tau*coef(fit)[j],
tau*confint(fit,j),
summary(fit)$coef[j,5])
colnames(B) <- c("Coef","Lower","Upper","p")
for (i in seq_along(exclude)) B <- B[-grep(exclude[i],rownames(B)),]
if (plot) B <- CIplot(B,xlab=xlab,...)
return(invisible(B))
}
#' @export
CIplot.data.frame <- function(obj, ...) {
CIplot.matrix(as.matrix(obj), ...)
}
|
source("../data/clean.R")
# |||||||||||| PLOT 4 ||||||||||||||
# ||||||| Save to plot4.png ||||||||
Sys.setlocale(category = "LC_TIME", locale = "en_GB.UTF-8")
par(mfrow = c(2,2))
# TL
with(selected_dates, plot(Time, Global_active_power,
type = "l",
xlab = "", ylab = "Global Active Power",
cex.lab = 0.7, cex.axis = 0.8,
))
# TR
with(selected_dates, plot(Time, Voltage,
type = "l",
xlab = "", ylab = "Voltage",
cex.lab = 0.7, cex.axis = 0.8,
))
# BL
plot(selected_dates$Time, selected_dates$Sub_metering_1,
type = "l", ylab = "Energy sub metering", xlab = "", cex.lab = 0.7, cex.axis = 0.8)
lines(selected_dates$Time, selected_dates$Sub_metering_2, col = "red")
lines(selected_dates$Time, selected_dates$Sub_metering_3, col = "blue")
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lty = c(1,1,1), col = c("black","red", "blue"), cex = 0.7, bty = "n")
# BR
with(selected_dates, plot(Time, selected_dates$Global_reactive_power,
type = "l",lwd = 0.5,
xlab = "datetime", ylab = "Global_reactive_power",
cex.lab = 0.7, cex.axis = 0.8))
dev.copy(png, 'plot4.png', width = 480, height = 480)
dev.off()
| /rplots/plot4.R | no_license | ezRAez/ExData_Plotting1 | R | false | false | 1,197 | r | source("../data/clean.R")
# |||||||||||| PLOT 4 ||||||||||||||
# ||||||| Save to plot4.png ||||||||
Sys.setlocale(category = "LC_TIME", locale = "en_GB.UTF-8")
par(mfrow = c(2,2))
# TL
with(selected_dates, plot(Time, Global_active_power,
type = "l",
xlab = "", ylab = "Global Active Power",
cex.lab = 0.7, cex.axis = 0.8,
))
# TR
with(selected_dates, plot(Time, Voltage,
type = "l",
xlab = "", ylab = "Voltage",
cex.lab = 0.7, cex.axis = 0.8,
))
# BL
plot(selected_dates$Time, selected_dates$Sub_metering_1,
type = "l", ylab = "Energy sub metering", xlab = "", cex.lab = 0.7, cex.axis = 0.8)
lines(selected_dates$Time, selected_dates$Sub_metering_2, col = "red")
lines(selected_dates$Time, selected_dates$Sub_metering_3, col = "blue")
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lty = c(1,1,1), col = c("black","red", "blue"), cex = 0.7, bty = "n")
# BR
with(selected_dates, plot(Time, selected_dates$Global_reactive_power,
type = "l",lwd = 0.5,
xlab = "datetime", ylab = "Global_reactive_power",
cex.lab = 0.7, cex.axis = 0.8))
dev.copy(png, 'plot4.png', width = 480, height = 480)
dev.off()
|
library("igraph")
library("scales")
library("graphsim")
vo_huong <- make_graph( ~ A-B-C-D-E-A, G-A:B:C:D:E)
plot(vo_huong)
| /CTDLGT_R/BT12_CaiDatDoThiVoHuong.R | no_license | thucuyen127/Giai-thuat | R | false | false | 132 | r |
library("igraph")
library("scales")
library("graphsim")
vo_huong <- make_graph( ~ A-B-C-D-E-A, G-A:B:C:D:E)
plot(vo_huong)
|
#' Convert pharmacokinetic parameters for three compartment model
#'
#' Calculate pharmacokinetic parameters with volume of distributions (Vd and V1),
#' clearance (Cl1) and half-lives (t_alpha, t_beta, and t_gamma)
#'
#' @usage ThreeComp_Volume_Clearance_HalfLife(V1,Vd,Cl1,t_alpha,t_beta,t_gamma,
#' V1.sd=NA,Vd.sd=NA,Cl1.sd=NA,t_alpha.sd=NA,t_beta.sd=NA,t_gamma.sd=NA,
#' covar=c(V1Vd=NA,V1Cl1=NA,V1talpha=NA,V1tbeta=NA,V1tgamma=NA,VdCl1=NA,
#' Vdtalpha=NA,Vdtbeta=NA,Vdtgamma=NA,Cl1talpha=NA,Cl1tbeta=NA,
#' Cl1tgamma=NA,talphatbeta=NA,talphatgamma=NA,tbetatgamma=NA),...)
#' @param Vd Total volume of distributions
#' @param V1 The volume of distribution of compartment 1
#' @param Cl1 Clearance from compartment 1
#' @param t_alpha half life of compartment 1
#' @param t_beta half life of compartment 2
#' @param t_gamma half life of compartment 3
#' @param Vd.sd standard error of Vd
#' @param V1.sd standard error of V1
#' @param Cl1.sd standard error of Cl1
#' @param t_alpha.sd standard error of t_alpha
#' @param t_beta.sd standard error of t_beta
#' @param t_gamma.sd standard error of t_gamma
#' @param covar covariances among parameters
#' @param ... arguments to be passed to methods
#' @references \url{http://www.nonmemcourse.com/convert.xls}
#' @export
#' @examples
#' ThreeComp_Volume_Clearance_HalfLife(V1=5,Vd=1110,Cl1=3,
#' t_alpha=1.142,t_beta=52.2,t_gamma=931, V1.sd=0.01,Vd.sd=20,Cl1.sd=0.01,
#' t_alpha.sd=0.002,t_beta.sd=0.5,t_gamma.sd=5.6)
ThreeComp_Volume_Clearance_HalfLife<-function(V1,Vd,Cl1,t_alpha,t_beta,t_gamma,
V1.sd=NA,Vd.sd=NA,Cl1.sd=NA,t_alpha.sd=NA,t_beta.sd=NA,t_gamma.sd=NA,
covar=c(V1Vd=NA,V1Cl1=NA,V1talpha=NA,V1tbeta=NA,V1tgamma=NA,VdCl1=NA,
Vdtalpha=NA,Vdtbeta=NA,Vdtgamma=NA,Cl1talpha=NA,Cl1tbeta=NA,
Cl1tgamma=NA,talphatbeta=NA,talphatgamma=NA,tbetatgamma=NA),...){
if(is.na(covar[1])) covar<-rep(0,15)
V1.var = (V1.sd)^2; Vd.var = (Vd.sd)^2
Cl1.var = (Cl1.sd)^2; t_alpha.var = (t_alpha.sd)^2;
t_beta.var = (t_beta.sd)^2; t_gamma.var = (t_gamma.sd)^2
f.V2<-quote(quote((V1)*((log(2)/t_alpha)+(log(2)/t_beta)+(log(2)/t_gamma)-
(Cl1/V1)-((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+
((log(2)^2)/(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))-((((Vd/V1-1)*
(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-((log(2)/t_alpha)+
(log(2)/t_beta)+(log(2)/t_gamma)-(Cl1/V1)-((((log(2)^2)/
(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+((log(2)^2)/
(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/
(Cl1/V1)))-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/
(Cl1/V1)))*((((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/
(t_alpha*t_gamma))+((log(2)^2)/(t_beta*t_gamma))-((Vd-V1)/V1*
(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))-(sqrt(((((log(2)^2)/
(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+((log(2)^2)/
(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/
(Cl1/V1)))-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/
(Cl1/V1))^2-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4)))/2))/
(sqrt(((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+
((log(2)^2)/(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))^2-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4))))/((((((log(2)^2)/
(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+((log(2)^2)/
(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/
(Cl1/V1)))-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))+
(sqrt(((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+
((log(2)^2)/(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))^2-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4)))/2)))
V2<-eval(eval(f.V2))
ff.V2<-stats::as.formula(paste("~",as.character(f.V2[2],"")))
f.V3<-quote(quote((V1)*((((Vd/V1-1)*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/
(Cl1/V1)))-((log(2)/t_alpha)+(log(2)/t_beta)+(log(2)/t_gamma)-(Cl1/V1)-
((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+
((log(2)^2)/(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1)))*((((((log(2)^2)/
(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+((log(2)^2)/
(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/
(Cl1/V1)))-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))-
(sqrt(((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+
((log(2)^2)/(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))^2-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4)))/2))/(sqrt(((((log(2)^2)/
(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+((log(2)^2)/
(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/
(Cl1/V1)))-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/
(Cl1/V1))^2-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4)))/
((((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+
((log(2)^2)/(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))-(sqrt(((((log(2)^2)/
(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+((log(2)^2)/
(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/
(Cl1/V1)))-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/
(Cl1/V1))^2-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4)))/2)))
V3<-eval(eval(f.V3))
ff.V3<-stats::as.formula(paste("~",as.character(f.V3[2],"")))
V2_deriv<-as.matrix(attr(eval(stats::deriv(ff.V2,
c("V1","Vd","Cl1","t_alpha","t_beta","t_gamma"))),"gradient"))
V3_deriv<-as.matrix(attr(eval(stats::deriv(ff.V3,
c("V1","Vd","Cl1","t_alpha","t_beta","t_gamma"))),"gradient"))
f.Vdss<-quote(quote(((V1)*((log(2)/t_alpha)+
(log(2)/t_beta)+(log(2)/t_gamma)-(Cl1/V1)-((((log(2)^2)/
(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+((log(2)^2)/
(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/
(Cl1/V1)))-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))
-((((Vd/V1-1)*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-
((log(2)/t_alpha)+(log(2)/t_beta)+(log(2)/t_gamma)-(Cl1/V1)-
((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+
((log(2)^2)/(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1)))*((((((log(2)^2)/
(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+((log(2)^2)/
(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/
(Cl1/V1)))-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))-
(sqrt(((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+
((log(2)^2)/(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))^2-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4)))/2))/(sqrt(((((log(2)^2)/
(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+((log(2)^2)/
(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/
(Cl1/V1)))-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/
(Cl1/V1))^2-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4))))/
((((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+
((log(2)^2)/(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-(((log(2))^3)/(t_alpha*t_beta*
t_gamma)/(Cl1/V1)))/(Cl1/V1))+(sqrt(((((log(2)^2)/(t_alpha*t_beta))+
((log(2)^2)/(t_alpha*t_gamma))+((log(2)^2)/(t_beta*t_gamma))-
((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-
(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))^2-
(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4)))/2))+
(V1*((((Vd/V1-1)*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-
((log(2)/t_alpha)+(log(2)/t_beta)+(log(2)/t_gamma)-(Cl1/V1)-
((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+
((log(2)^2)/(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1)))*((((((log(2)^2)/
(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+((log(2)^2)/
(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/
(Cl1/V1)))-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))-
(sqrt(((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+
((log(2)^2)/(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-(((log(2))^3)/(t_alpha*t_beta*
t_gamma)/(Cl1/V1)))/(Cl1/V1))^2-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/
(Cl1/V1))*4)))/2))/(sqrt(((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/
(t_alpha*t_gamma))+((log(2)^2)/(t_beta*t_gamma))-((Vd-V1)/V1*
(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))^2-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4)))/((((((log(2)^2)/
(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+((log(2)^2)/
(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/
(Cl1/V1)))-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))-
(sqrt(((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+
((log(2)^2)/(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))^2-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4)))/2))+(V1)))
Vdss<-eval(eval(f.Vdss))
ff.Vdss<-stats::as.formula(paste("~",as.character(f.Vdss[2],"")))
Vdss_deriv<-as.matrix(attr(eval(stats::deriv(ff.Vdss,
c("V1","Vd","Cl1","t_alpha","t_beta","t_gamma"))),"gradient"))
sigma6<-matrix(as.numeric(c(V1.var,covar[1],covar[2],covar[3],covar[4],
covar[5],covar[1],Vd.var,covar[6],covar[7],covar[8],covar[9],
covar[2],covar[6],Cl1.var,covar[9],covar[11],covar[12],
covar[3],covar[7],covar[10],t_alpha.var,covar[13],covar[14],
covar[4],covar[8],covar[11],covar[13],t_beta.var,covar[15],
covar[5],covar[9],covar[12],covar[14],covar[15],t_gamma.var)),
6,6,byrow=T)
V2.sd<-sqrt(V2_deriv %*% sigma6 %*% t(V2_deriv))
V3.sd<-sqrt(V3_deriv %*% sigma6 %*% t(V3_deriv))
Vdss.sd<-sqrt(Vdss_deriv %*% sigma6 %*% t(Vdss_deriv))
f.Cl2<-quote(quote(V1*((log(2)/t_alpha)+(log(2)/t_beta)+(log(2)/t_gamma)-
(Cl1/V1)-((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+
((log(2)^2)/(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))-((((Vd/V1-1)*
(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-((log(2)/t_alpha)+
(log(2)/t_beta)+(log(2)/t_gamma)-(Cl1/V1)-((((log(2)^2)/
(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+((log(2)^2)/
(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/
(Cl1/V1)))-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1)))*
((((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+
((log(2)^2)/(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))-(sqrt(((((log(2)^2)/
(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+((log(2)^2)/
(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/
(Cl1/V1)))-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/
(Cl1/V1))^2-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4)))/2))/
(sqrt(((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+
((log(2)^2)/(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))^2-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4))))))
Cl2<-eval(eval(f.Cl2))
ff.Cl2<-stats::as.formula(paste("~",as.character(f.Cl2[2],"")))
f.Cl3<-quote(quote(V1*((((Vd/V1-1)*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/
(Cl1/V1)))-((log(2)/t_alpha)+(log(2)/t_beta)+(log(2)/t_gamma)-(Cl1/V1)-
((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+
((log(2)^2)/(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1)))*((((((log(2)^2)/
(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+((log(2)^2)/
(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/
(Cl1/V1)))-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))-
(sqrt(((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+
((log(2)^2)/(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))^2-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4)))/2))/(sqrt(((((log(2)^2)/
(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+((log(2)^2)/
(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/
(Cl1/V1)))-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/
(Cl1/V1))^2-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4)))))
Cl3<-eval(eval(f.Cl3))
ff.Cl3<-stats::as.formula(paste("~",as.character(f.Cl3[2],"")))
Cl2_deriv<-as.matrix(attr(eval(stats::deriv(ff.Cl2,
c("V1","Vd","Cl1","t_alpha","t_beta","t_gamma"))),"gradient"))
Cl2.sd<-sqrt(Cl2_deriv %*% sigma6 %*% t(Cl2_deriv))
Cl3_deriv<-as.matrix(attr(eval(stats::deriv(ff.Cl3,
c("V1","Vd","Cl1","t_alpha","t_beta","t_gamma"))),"gradient"))
Cl3.sd<-sqrt(Cl3_deriv %*% sigma6 %*% t(Cl3_deriv))
k10<-Cl1/V1
f.k12<-quote(quote((log(2)/t_alpha)+(log(2)/t_beta)+(log(2)/t_gamma)-(Cl1/V1)-
((((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+
((log(2)^2)/(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))+(sqrt(((((log(2)^2)/
(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+((log(2)^2)/
(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/
(Cl1/V1)))-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/
(Cl1/V1))^2-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4)))/2)-
((((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+
((log(2)^2)/(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))-(sqrt(((((log(2)^2)/
(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+((log(2)^2)/
(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/
(Cl1/V1)))-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/
(Cl1/V1))^2-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4)))/2)-
((((Vd/V1-1)*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-
((log(2)/t_alpha)+(log(2)/t_beta)+(log(2)/t_gamma)-(Cl1/V1)-
((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+
((log(2)^2)/(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1)))*((((((log(2)^2)/
(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+((log(2)^2)/
(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/
(Cl1/V1)))-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/
(Cl1/V1))-(sqrt(((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/
(t_alpha*t_gamma))+((log(2)^2)/(t_beta*t_gamma))-((Vd-V1)/V1*
(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))^2-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4)))/2))/(sqrt(((((log(2)^2)/
(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+((log(2)^2)/
(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/
(Cl1/V1)))-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/
(Cl1/V1))^2-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4)))))
k12<-eval(eval(f.k12))
ff.k12<-stats::as.formula(paste("~",as.character(f.k12[2],"")))
f.k13<-quote(quote((((Vd/V1-1)*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/
(Cl1/V1)))-((log(2)/t_alpha)+(log(2)/t_beta)+(log(2)/t_gamma)-(Cl1/V1)-
((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+
((log(2)^2)/(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1)))*((((((log(2)^2)/
(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+((log(2)^2)/
(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/
(Cl1/V1)))-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))-
(sqrt(((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+
((log(2)^2)/(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))^2-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4)))/2))/(sqrt(((((log(2)^2)/
(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+((log(2)^2)/
(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/
(Cl1/V1)))-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/
(Cl1/V1))^2-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4))))
k13<-eval(eval(f.k13))
ff.k13<-stats::as.formula(paste("~",as.character(f.k13[2],"")))
f.k21<-quote(quote((((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/
(t_alpha*t_gamma))+((log(2)^2)/(t_beta*t_gamma))-((Vd-V1)/V1*
(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))+(sqrt(((((log(2)^2)/
(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+((log(2)^2)/
(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/
(Cl1/V1)))-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/
(Cl1/V1))^2-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4)))/2))
k21<-eval(eval(f.k21))
ff.k21<-stats::as.formula(paste("~",as.character(f.k21[2],"")))
f.k31<-quote(quote((((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/
(t_alpha*t_gamma))+((log(2)^2)/(t_beta*t_gamma))-((Vd-V1)/V1*
(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))-(sqrt(((((log(2)^2)/
(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+((log(2)^2)/
(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/
(Cl1/V1)))-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/
(Cl1/V1))^2-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4)))/2))
k31<-eval(eval(f.k31))
ff.k31<-stats::as.formula(paste("~",as.character(f.k31[2],"")))
sigma2<-matrix(as.numeric(c(V1.var,covar[1],covar[1],Cl1.var)),2,2,byrow=T)
k10_deriv<-as.matrix(attr(eval(stats::deriv(~Cl1/V1,c("V1","Cl1"))),
"gradient"))
k12_deriv<-as.matrix(attr(eval(stats::deriv(ff.k12,
c("V1","Vd","Cl1","t_alpha","t_beta","t_gamma"))),"gradient"))
k13_deriv<-as.matrix(attr(eval(stats::deriv(ff.k13,
c("V1","Vd","Cl1","t_alpha","t_beta","t_gamma"))),"gradient"))
k21_deriv<-as.matrix(attr(eval(stats::deriv(ff.k21,
c("t_alpha","t_beta","t_gamma","V1","Vd","Cl1"))),"gradient"))
k31_deriv<-as.matrix(attr(eval(stats::deriv(ff.k31,
c("V1","Vd","Cl1","t_alpha","t_beta","t_gamma"))),"gradient"))
k10.sd<-sqrt(k10_deriv %*% sigma2 %*% t(k10_deriv))
k12.sd<-sqrt(k12_deriv %*% sigma6 %*% t(k12_deriv))
k13.sd<-sqrt(k13_deriv %*% sigma6 %*% t(k13_deriv))
k21.sd<-sqrt(k21_deriv %*% sigma6 %*% t(k21_deriv))
k31.sd<-sqrt(k31_deriv %*% sigma6 %*% t(k31_deriv))
f.true_A<-quote(quote((((((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/
(t_alpha*t_gamma))+((log(2)^2)/(t_beta*t_gamma))-((Vd-V1)/V1*
(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))+(sqrt(((((log(2)^2)/
(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+((log(2)^2)/
(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/
(Cl1/V1)))-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/
(Cl1/V1))^2-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4)))/2)-
(log(2)/t_alpha))*(((((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/
(t_alpha*t_gamma))+((log(2)^2)/(t_beta*t_gamma))-((Vd-V1)/V1*
(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))-(sqrt(((((log(2)^2)/
(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+((log(2)^2)/
(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/
(Cl1/V1)))-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))^2-
(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4)))/2)-
(log(2)/t_alpha))/((log(2)/t_alpha)-(log(2)/t_beta))/
((log(2)/t_alpha)-(log(2)/t_gamma))/V1))
true_A<-eval(eval(f.true_A))
ff.true_A<-stats::as.formula(paste("~",as.character(f.true_A[2],"")))
f.true_B<-quote(quote((((((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/
(t_alpha*t_gamma))+((log(2)^2)/(t_beta*t_gamma))-((Vd-V1)/V1*
(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))+(sqrt(((((log(2)^2)/
(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+((log(2)^2)/
(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/
(Cl1/V1)))-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/
(Cl1/V1))^2-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4)))/2)-
(log(2)/t_beta))*(((((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/
(t_alpha*t_gamma))+((log(2)^2)/(t_beta*t_gamma))-((Vd-V1)/V1*
(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))-(sqrt(((((log(2)^2)/
(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+((log(2)^2)/
(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/
(Cl1/V1)))-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/
(Cl1/V1))^2-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4)))/2)-
(log(2)/t_beta))/((log(2)/t_beta)-(log(2)/t_alpha))/((log(2)/t_beta)-
(log(2)/t_gamma))/V1))
true_B<-eval(eval(f.true_B))
ff.true_B<-stats::as.formula(paste("~",as.character(f.true_B[2],"")))
f.true_C<-quote(quote((((((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/
(t_alpha*t_gamma))+((log(2)^2)/(t_beta*t_gamma))-((Vd-V1)/V1*
(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))+(sqrt(((((log(2)^2)/
(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+((log(2)^2)/
(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/
(Cl1/V1)))-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/
(Cl1/V1))^2-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4)))/2)-
(log(2)/t_gamma))*(((((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/
(t_alpha*t_gamma))+((log(2)^2)/(t_beta*t_gamma))-((Vd-V1)/V1*
(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))-(sqrt(((((log(2)^2)/
(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+((log(2)^2)/
(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/
(Cl1/V1)))-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/
(Cl1/V1))^2-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4)))/2)-
(log(2)/t_gamma))/((log(2)/t_gamma)-(log(2)/t_beta))/((log(2)/t_gamma)-
(log(2)/t_alpha))/V1))
true_C<-eval(eval(f.true_C))
ff.true_C<-stats::as.formula(paste("~",as.character(f.true_C[2],"")))
true_A_deriv<-as.matrix(attr(eval(stats::deriv(ff.true_A,
c("V1","Vd","Cl1","t_alpha","t_beta","t_gamma"))),"gradient"))
true_B_deriv<-as.matrix(attr(eval(stats::deriv(ff.true_B,
c("V1","Vd","Cl1","t_alpha","t_beta","t_gamma"))),"gradient"))
true_C_deriv<-as.matrix(attr(eval(stats::deriv(ff.true_C,
c("V1","Vd","Cl1","t_alpha","t_beta","t_gamma"))),"gradient"))
true_A.sd<-sqrt(true_A_deriv %*% sigma6 %*% t(true_A_deriv))
true_B.sd<-sqrt(true_B_deriv %*% sigma6 %*% t(true_B_deriv))
true_C.sd<-sqrt(true_C_deriv %*% sigma6 %*% t(true_C_deriv))
f.frac_A<-quote(quote((((((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/
(t_alpha*t_gamma))+((log(2)^2)/(t_beta*t_gamma))-
((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-
(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))+
(sqrt(((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+
((log(2)^2)/(t_beta*t_gamma))-
((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-
(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))^2-
(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4)))/2)-
(log(2)/t_alpha))*(((((((log(2)^2)/(t_alpha*t_beta))+
((log(2)^2)/(t_alpha*t_gamma))+((log(2)^2)/(t_beta*t_gamma))-
((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-
(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))-
(sqrt(((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+
((log(2)^2)/(t_beta*t_gamma))-
((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-
(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))^2-
(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4)))/2)-
(log(2)/t_alpha))/((log(2)/t_alpha)-(log(2)/t_beta))/((log(2)/t_alpha)-
(log(2)/t_gamma))))
frac_A<-eval(eval(f.frac_A))
ff.frac_A<-stats::as.formula(paste("~",as.character(f.frac_A[2],"")))
f.frac_B<-quote(quote((((((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/
(t_alpha*t_gamma))+((log(2)^2)/(t_beta*t_gamma))-
((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-
(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))+
(sqrt(((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+
((log(2)^2)/(t_beta*t_gamma))-
((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-
(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))^2-
(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4)))/2)-
(log(2)/t_beta))*(((((((log(2)^2)/(t_alpha*t_beta))+
((log(2)^2)/(t_alpha*t_gamma))+((log(2)^2)/(t_beta*t_gamma))-
((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-
(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))-
(sqrt(((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+
((log(2)^2)/(t_beta*t_gamma))-
((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-
(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))^2-
(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4)))/2)-
(log(2)/t_beta))/((log(2)/t_beta)-(log(2)/t_alpha))/((log(2)/t_beta)-
(log(2)/t_gamma))))
frac_B<-eval(eval(f.frac_B))
ff.frac_B<-stats::as.formula(paste("~",as.character(f.frac_B[2],"")))
f.frac_C<-quote(quote((((((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/
(t_alpha*t_gamma))+((log(2)^2)/(t_beta*t_gamma))-
((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-
(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))+
(sqrt(((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+
((log(2)^2)/(t_beta*t_gamma))-
((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-
(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))^2-
(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4)))/2)-
(log(2)/t_gamma))*(((((((log(2)^2)/(t_alpha*t_beta))+
((log(2)^2)/(t_alpha*t_gamma))+((log(2)^2)/(t_beta*t_gamma))-
((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-
(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))-
(sqrt(((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+
((log(2)^2)/(t_beta*t_gamma))-
((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-
(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))^2-
(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4)))/2)-
(log(2)/t_gamma))/((log(2)/t_gamma)-(log(2)/t_beta))/((log(2)/t_gamma)-
(log(2)/t_alpha))))
frac_C<-eval(eval(f.frac_C))
ff.frac_C<-stats::as.formula(paste("~",as.character(f.frac_C[2],"")))
frac_A_deriv<-as.matrix(attr(eval(stats::deriv(ff.frac_A,
c("V1","Vd","Cl1","t_alpha","t_beta","t_gamma"))),"gradient"))
frac_B_deriv<-as.matrix(attr(eval(stats::deriv(ff.frac_B,
c("V1","Vd","Cl1","t_alpha","t_beta","t_gamma"))),"gradient"))
frac_C_deriv<-as.matrix(attr(eval(stats::deriv(ff.frac_C,
c("V1","Vd","Cl1","t_alpha","t_beta","t_gamma"))),"gradient"))
frac_A.sd<-sqrt(frac_A_deriv %*% sigma6 %*% t(frac_A_deriv))
frac_B.sd<-sqrt(frac_B_deriv %*% sigma6 %*% t(frac_B_deriv))
frac_C.sd<-sqrt(frac_C_deriv %*% sigma6 %*% t(frac_C_deriv))
alpha<-log(2)/t_alpha; beta<-log(2)/t_beta; gamma<-log(2)/t_gamma
alpha_deriv<-as.matrix(attr(eval(stats::deriv(~log(2)/t_alpha,"t_alpha")),
"gradient"))
beta_deriv<-as.matrix(attr(eval(stats::deriv(~log(2)/t_beta,"t_beta")),
"gradient"))
gamma_deriv<-as.matrix(attr(eval(stats::deriv(~log(2)/t_gamma,"t_gamma")),
"gradient"))
alpha.sd<-sqrt(alpha_deriv * t_alpha.var * alpha_deriv)
beta.sd<-sqrt(beta_deriv * t_beta.var * beta_deriv)
gamma.sd<-sqrt(gamma_deriv * t_gamma.var * gamma_deriv)
if(is.na(V1[1])){
param = rep(NA,24)
sd = rep(NA,24)
} else{
param = c(V1,Vdss,Cl1,t_alpha,t_beta,t_gamma,V2,V3,Cl2,Cl3,k10,k12,k13,
k21,k31,true_A,true_B,true_C,frac_A,frac_B,frac_C,alpha,beta,gamma)
sd = c(V1.sd,Vdss.sd,Cl1.sd,t_alpha.sd,t_beta.sd,t_gamma.sd,V2.sd,V3.sd,
Cl2.sd,Cl3.sd,k10.sd,k12.sd,k13.sd,k21.sd,k31.sd,true_A.sd,true_B.sd,
true_C.sd,frac_A.sd,frac_B.sd,frac_C.sd,alpha.sd,beta.sd,gamma.sd)
}
result = data.frame(Parameter=c("V1","Vdss","Cl1","t_alpha","t_beta",
"t_gamma","V2","V3","Cl2","Cl3","k10","k12","k13",
"k21","k31","True_A","True_B","True_C","Frac_A",
"Frac_B","Frac_C","alpha","beta","gamma"),
Estimate=param, Std.err=sd)
row.names(result) <- c("V1","Vdss","Cl1","t_alpha","t_beta","t_gamma","V2",
"V3","Cl2","Cl3","k10","k12","k13","k21","k31","True_A","True_B","True_C",
"Frac_A","Frac_B","Frac_C","alpha","beta","gamma")
result<-result[c("Vdss","V1","V2","V3","Cl1","Cl2","Cl3",
"k10","k12","k21","k13","k31","alpha","beta","gamma",
"t_alpha","t_beta","t_gamma","True_A","True_B","True_C",
"Frac_A","Frac_B","Frac_C"),]
return(result)
}
| /R/ThreeComp_Volume_Clearance_HalfLife.R | no_license | cran/PKconverter | R | false | false | 33,164 | r | #' Convert pharmacokinetic parameters for three compartment model
#'
#' Calculate pharmacokinetic parameters with volume of distributions (Vd and V1),
#' clearance (Cl1) and half-lives (t_alpha, t_beta, and t_gamma)
#'
#' @usage ThreeComp_Volume_Clearance_HalfLife(V1,Vd,Cl1,t_alpha,t_beta,t_gamma,
#' V1.sd=NA,Vd.sd=NA,Cl1.sd=NA,t_alpha.sd=NA,t_beta.sd=NA,t_gamma.sd=NA,
#' covar=c(V1Vd=NA,V1Cl1=NA,V1talpha=NA,V1tbeta=NA,V1tgamma=NA,VdCl1=NA,
#' Vdtalpha=NA,Vdtbeta=NA,Vdtgamma=NA,Cl1talpha=NA,Cl1tbeta=NA,
#' Cl1tgamma=NA,talphatbeta=NA,talphatgamma=NA,tbetatgamma=NA),...)
#' @param Vd Total volume of distributions
#' @param V1 The volume of distribution of compartment 1
#' @param Cl1 Clearance from compartment 1
#' @param t_alpha half life of compartment 1
#' @param t_beta half life of compartment 2
#' @param t_gamma half life of compartment 3
#' @param Vd.sd standard error of Vd
#' @param V1.sd standard error of V1
#' @param Cl1.sd standard error of Cl1
#' @param t_alpha.sd standard error of t_alpha
#' @param t_beta.sd standard error of t_beta
#' @param t_gamma.sd standard error of t_gamma
#' @param covar covariances among parameters
#' @param ... arguments to be passed to methods
#' @references \url{http://www.nonmemcourse.com/convert.xls}
#' @export
#' @examples
#' ThreeComp_Volume_Clearance_HalfLife(V1=5,Vd=1110,Cl1=3,
#' t_alpha=1.142,t_beta=52.2,t_gamma=931, V1.sd=0.01,Vd.sd=20,Cl1.sd=0.01,
#' t_alpha.sd=0.002,t_beta.sd=0.5,t_gamma.sd=5.6)
ThreeComp_Volume_Clearance_HalfLife<-function(V1,Vd,Cl1,t_alpha,t_beta,t_gamma,
V1.sd=NA,Vd.sd=NA,Cl1.sd=NA,t_alpha.sd=NA,t_beta.sd=NA,t_gamma.sd=NA,
covar=c(V1Vd=NA,V1Cl1=NA,V1talpha=NA,V1tbeta=NA,V1tgamma=NA,VdCl1=NA,
Vdtalpha=NA,Vdtbeta=NA,Vdtgamma=NA,Cl1talpha=NA,Cl1tbeta=NA,
Cl1tgamma=NA,talphatbeta=NA,talphatgamma=NA,tbetatgamma=NA),...){
if(is.na(covar[1])) covar<-rep(0,15)
V1.var = (V1.sd)^2; Vd.var = (Vd.sd)^2
Cl1.var = (Cl1.sd)^2; t_alpha.var = (t_alpha.sd)^2;
t_beta.var = (t_beta.sd)^2; t_gamma.var = (t_gamma.sd)^2
f.V2<-quote(quote((V1)*((log(2)/t_alpha)+(log(2)/t_beta)+(log(2)/t_gamma)-
(Cl1/V1)-((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+
((log(2)^2)/(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))-((((Vd/V1-1)*
(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-((log(2)/t_alpha)+
(log(2)/t_beta)+(log(2)/t_gamma)-(Cl1/V1)-((((log(2)^2)/
(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+((log(2)^2)/
(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/
(Cl1/V1)))-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/
(Cl1/V1)))*((((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/
(t_alpha*t_gamma))+((log(2)^2)/(t_beta*t_gamma))-((Vd-V1)/V1*
(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))-(sqrt(((((log(2)^2)/
(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+((log(2)^2)/
(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/
(Cl1/V1)))-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/
(Cl1/V1))^2-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4)))/2))/
(sqrt(((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+
((log(2)^2)/(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))^2-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4))))/((((((log(2)^2)/
(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+((log(2)^2)/
(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/
(Cl1/V1)))-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))+
(sqrt(((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+
((log(2)^2)/(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))^2-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4)))/2)))
V2<-eval(eval(f.V2))
ff.V2<-stats::as.formula(paste("~",as.character(f.V2[2],"")))
f.V3<-quote(quote((V1)*((((Vd/V1-1)*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/
(Cl1/V1)))-((log(2)/t_alpha)+(log(2)/t_beta)+(log(2)/t_gamma)-(Cl1/V1)-
((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+
((log(2)^2)/(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1)))*((((((log(2)^2)/
(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+((log(2)^2)/
(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/
(Cl1/V1)))-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))-
(sqrt(((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+
((log(2)^2)/(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))^2-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4)))/2))/(sqrt(((((log(2)^2)/
(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+((log(2)^2)/
(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/
(Cl1/V1)))-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/
(Cl1/V1))^2-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4)))/
((((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+
((log(2)^2)/(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))-(sqrt(((((log(2)^2)/
(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+((log(2)^2)/
(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/
(Cl1/V1)))-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/
(Cl1/V1))^2-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4)))/2)))
V3<-eval(eval(f.V3))
ff.V3<-stats::as.formula(paste("~",as.character(f.V3[2],"")))
V2_deriv<-as.matrix(attr(eval(stats::deriv(ff.V2,
c("V1","Vd","Cl1","t_alpha","t_beta","t_gamma"))),"gradient"))
V3_deriv<-as.matrix(attr(eval(stats::deriv(ff.V3,
c("V1","Vd","Cl1","t_alpha","t_beta","t_gamma"))),"gradient"))
f.Vdss<-quote(quote(((V1)*((log(2)/t_alpha)+
(log(2)/t_beta)+(log(2)/t_gamma)-(Cl1/V1)-((((log(2)^2)/
(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+((log(2)^2)/
(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/
(Cl1/V1)))-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))
-((((Vd/V1-1)*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-
((log(2)/t_alpha)+(log(2)/t_beta)+(log(2)/t_gamma)-(Cl1/V1)-
((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+
((log(2)^2)/(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1)))*((((((log(2)^2)/
(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+((log(2)^2)/
(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/
(Cl1/V1)))-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))-
(sqrt(((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+
((log(2)^2)/(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))^2-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4)))/2))/(sqrt(((((log(2)^2)/
(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+((log(2)^2)/
(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/
(Cl1/V1)))-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/
(Cl1/V1))^2-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4))))/
((((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+
((log(2)^2)/(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-(((log(2))^3)/(t_alpha*t_beta*
t_gamma)/(Cl1/V1)))/(Cl1/V1))+(sqrt(((((log(2)^2)/(t_alpha*t_beta))+
((log(2)^2)/(t_alpha*t_gamma))+((log(2)^2)/(t_beta*t_gamma))-
((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-
(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))^2-
(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4)))/2))+
(V1*((((Vd/V1-1)*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-
((log(2)/t_alpha)+(log(2)/t_beta)+(log(2)/t_gamma)-(Cl1/V1)-
((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+
((log(2)^2)/(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1)))*((((((log(2)^2)/
(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+((log(2)^2)/
(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/
(Cl1/V1)))-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))-
(sqrt(((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+
((log(2)^2)/(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-(((log(2))^3)/(t_alpha*t_beta*
t_gamma)/(Cl1/V1)))/(Cl1/V1))^2-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/
(Cl1/V1))*4)))/2))/(sqrt(((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/
(t_alpha*t_gamma))+((log(2)^2)/(t_beta*t_gamma))-((Vd-V1)/V1*
(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))^2-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4)))/((((((log(2)^2)/
(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+((log(2)^2)/
(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/
(Cl1/V1)))-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))-
(sqrt(((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+
((log(2)^2)/(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))^2-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4)))/2))+(V1)))
Vdss<-eval(eval(f.Vdss))
ff.Vdss<-stats::as.formula(paste("~",as.character(f.Vdss[2],"")))
Vdss_deriv<-as.matrix(attr(eval(stats::deriv(ff.Vdss,
c("V1","Vd","Cl1","t_alpha","t_beta","t_gamma"))),"gradient"))
sigma6<-matrix(as.numeric(c(V1.var,covar[1],covar[2],covar[3],covar[4],
covar[5],covar[1],Vd.var,covar[6],covar[7],covar[8],covar[9],
covar[2],covar[6],Cl1.var,covar[9],covar[11],covar[12],
covar[3],covar[7],covar[10],t_alpha.var,covar[13],covar[14],
covar[4],covar[8],covar[11],covar[13],t_beta.var,covar[15],
covar[5],covar[9],covar[12],covar[14],covar[15],t_gamma.var)),
6,6,byrow=T)
V2.sd<-sqrt(V2_deriv %*% sigma6 %*% t(V2_deriv))
V3.sd<-sqrt(V3_deriv %*% sigma6 %*% t(V3_deriv))
Vdss.sd<-sqrt(Vdss_deriv %*% sigma6 %*% t(Vdss_deriv))
f.Cl2<-quote(quote(V1*((log(2)/t_alpha)+(log(2)/t_beta)+(log(2)/t_gamma)-
(Cl1/V1)-((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+
((log(2)^2)/(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))-((((Vd/V1-1)*
(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-((log(2)/t_alpha)+
(log(2)/t_beta)+(log(2)/t_gamma)-(Cl1/V1)-((((log(2)^2)/
(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+((log(2)^2)/
(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/
(Cl1/V1)))-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1)))*
((((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+
((log(2)^2)/(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))-(sqrt(((((log(2)^2)/
(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+((log(2)^2)/
(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/
(Cl1/V1)))-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/
(Cl1/V1))^2-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4)))/2))/
(sqrt(((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+
((log(2)^2)/(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))^2-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4))))))
Cl2<-eval(eval(f.Cl2))
ff.Cl2<-stats::as.formula(paste("~",as.character(f.Cl2[2],"")))
f.Cl3<-quote(quote(V1*((((Vd/V1-1)*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/
(Cl1/V1)))-((log(2)/t_alpha)+(log(2)/t_beta)+(log(2)/t_gamma)-(Cl1/V1)-
((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+
((log(2)^2)/(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1)))*((((((log(2)^2)/
(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+((log(2)^2)/
(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/
(Cl1/V1)))-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))-
(sqrt(((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+
((log(2)^2)/(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))^2-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4)))/2))/(sqrt(((((log(2)^2)/
(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+((log(2)^2)/
(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/
(Cl1/V1)))-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/
(Cl1/V1))^2-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4)))))
Cl3<-eval(eval(f.Cl3))
ff.Cl3<-stats::as.formula(paste("~",as.character(f.Cl3[2],"")))
Cl2_deriv<-as.matrix(attr(eval(stats::deriv(ff.Cl2,
c("V1","Vd","Cl1","t_alpha","t_beta","t_gamma"))),"gradient"))
Cl2.sd<-sqrt(Cl2_deriv %*% sigma6 %*% t(Cl2_deriv))
Cl3_deriv<-as.matrix(attr(eval(stats::deriv(ff.Cl3,
c("V1","Vd","Cl1","t_alpha","t_beta","t_gamma"))),"gradient"))
Cl3.sd<-sqrt(Cl3_deriv %*% sigma6 %*% t(Cl3_deriv))
k10<-Cl1/V1
f.k12<-quote(quote((log(2)/t_alpha)+(log(2)/t_beta)+(log(2)/t_gamma)-(Cl1/V1)-
((((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+
((log(2)^2)/(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))+(sqrt(((((log(2)^2)/
(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+((log(2)^2)/
(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/
(Cl1/V1)))-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/
(Cl1/V1))^2-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4)))/2)-
((((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+
((log(2)^2)/(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))-(sqrt(((((log(2)^2)/
(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+((log(2)^2)/
(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/
(Cl1/V1)))-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/
(Cl1/V1))^2-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4)))/2)-
((((Vd/V1-1)*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-
((log(2)/t_alpha)+(log(2)/t_beta)+(log(2)/t_gamma)-(Cl1/V1)-
((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+
((log(2)^2)/(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1)))*((((((log(2)^2)/
(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+((log(2)^2)/
(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/
(Cl1/V1)))-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/
(Cl1/V1))-(sqrt(((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/
(t_alpha*t_gamma))+((log(2)^2)/(t_beta*t_gamma))-((Vd-V1)/V1*
(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))^2-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4)))/2))/(sqrt(((((log(2)^2)/
(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+((log(2)^2)/
(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/
(Cl1/V1)))-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/
(Cl1/V1))^2-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4)))))
k12<-eval(eval(f.k12))
ff.k12<-stats::as.formula(paste("~",as.character(f.k12[2],"")))
f.k13<-quote(quote((((Vd/V1-1)*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/
(Cl1/V1)))-((log(2)/t_alpha)+(log(2)/t_beta)+(log(2)/t_gamma)-(Cl1/V1)-
((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+
((log(2)^2)/(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1)))*((((((log(2)^2)/
(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+((log(2)^2)/
(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/
(Cl1/V1)))-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))-
(sqrt(((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+
((log(2)^2)/(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))^2-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4)))/2))/(sqrt(((((log(2)^2)/
(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+((log(2)^2)/
(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/
(Cl1/V1)))-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/
(Cl1/V1))^2-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4))))
k13<-eval(eval(f.k13))
ff.k13<-stats::as.formula(paste("~",as.character(f.k13[2],"")))
f.k21<-quote(quote((((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/
(t_alpha*t_gamma))+((log(2)^2)/(t_beta*t_gamma))-((Vd-V1)/V1*
(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))+(sqrt(((((log(2)^2)/
(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+((log(2)^2)/
(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/
(Cl1/V1)))-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/
(Cl1/V1))^2-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4)))/2))
k21<-eval(eval(f.k21))
ff.k21<-stats::as.formula(paste("~",as.character(f.k21[2],"")))
f.k31<-quote(quote((((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/
(t_alpha*t_gamma))+((log(2)^2)/(t_beta*t_gamma))-((Vd-V1)/V1*
(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))-(sqrt(((((log(2)^2)/
(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+((log(2)^2)/
(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/
(Cl1/V1)))-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/
(Cl1/V1))^2-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4)))/2))
k31<-eval(eval(f.k31))
ff.k31<-stats::as.formula(paste("~",as.character(f.k31[2],"")))
sigma2<-matrix(as.numeric(c(V1.var,covar[1],covar[1],Cl1.var)),2,2,byrow=T)
k10_deriv<-as.matrix(attr(eval(stats::deriv(~Cl1/V1,c("V1","Cl1"))),
"gradient"))
k12_deriv<-as.matrix(attr(eval(stats::deriv(ff.k12,
c("V1","Vd","Cl1","t_alpha","t_beta","t_gamma"))),"gradient"))
k13_deriv<-as.matrix(attr(eval(stats::deriv(ff.k13,
c("V1","Vd","Cl1","t_alpha","t_beta","t_gamma"))),"gradient"))
k21_deriv<-as.matrix(attr(eval(stats::deriv(ff.k21,
c("t_alpha","t_beta","t_gamma","V1","Vd","Cl1"))),"gradient"))
k31_deriv<-as.matrix(attr(eval(stats::deriv(ff.k31,
c("V1","Vd","Cl1","t_alpha","t_beta","t_gamma"))),"gradient"))
k10.sd<-sqrt(k10_deriv %*% sigma2 %*% t(k10_deriv))
k12.sd<-sqrt(k12_deriv %*% sigma6 %*% t(k12_deriv))
k13.sd<-sqrt(k13_deriv %*% sigma6 %*% t(k13_deriv))
k21.sd<-sqrt(k21_deriv %*% sigma6 %*% t(k21_deriv))
k31.sd<-sqrt(k31_deriv %*% sigma6 %*% t(k31_deriv))
f.true_A<-quote(quote((((((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/
(t_alpha*t_gamma))+((log(2)^2)/(t_beta*t_gamma))-((Vd-V1)/V1*
(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))+(sqrt(((((log(2)^2)/
(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+((log(2)^2)/
(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/
(Cl1/V1)))-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/
(Cl1/V1))^2-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4)))/2)-
(log(2)/t_alpha))*(((((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/
(t_alpha*t_gamma))+((log(2)^2)/(t_beta*t_gamma))-((Vd-V1)/V1*
(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))-(sqrt(((((log(2)^2)/
(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+((log(2)^2)/
(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/
(Cl1/V1)))-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))^2-
(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4)))/2)-
(log(2)/t_alpha))/((log(2)/t_alpha)-(log(2)/t_beta))/
((log(2)/t_alpha)-(log(2)/t_gamma))/V1))
true_A<-eval(eval(f.true_A))
ff.true_A<-stats::as.formula(paste("~",as.character(f.true_A[2],"")))
f.true_B<-quote(quote((((((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/
(t_alpha*t_gamma))+((log(2)^2)/(t_beta*t_gamma))-((Vd-V1)/V1*
(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))+(sqrt(((((log(2)^2)/
(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+((log(2)^2)/
(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/
(Cl1/V1)))-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/
(Cl1/V1))^2-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4)))/2)-
(log(2)/t_beta))*(((((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/
(t_alpha*t_gamma))+((log(2)^2)/(t_beta*t_gamma))-((Vd-V1)/V1*
(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))-(sqrt(((((log(2)^2)/
(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+((log(2)^2)/
(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/
(Cl1/V1)))-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/
(Cl1/V1))^2-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4)))/2)-
(log(2)/t_beta))/((log(2)/t_beta)-(log(2)/t_alpha))/((log(2)/t_beta)-
(log(2)/t_gamma))/V1))
true_B<-eval(eval(f.true_B))
ff.true_B<-stats::as.formula(paste("~",as.character(f.true_B[2],"")))
f.true_C<-quote(quote((((((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/
(t_alpha*t_gamma))+((log(2)^2)/(t_beta*t_gamma))-((Vd-V1)/V1*
(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))+(sqrt(((((log(2)^2)/
(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+((log(2)^2)/
(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/
(Cl1/V1)))-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/
(Cl1/V1))^2-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4)))/2)-
(log(2)/t_gamma))*(((((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/
(t_alpha*t_gamma))+((log(2)^2)/(t_beta*t_gamma))-((Vd-V1)/V1*
(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-(((log(2))^3)/
(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))-(sqrt(((((log(2)^2)/
(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+((log(2)^2)/
(t_beta*t_gamma))-((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/
(Cl1/V1)))-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/
(Cl1/V1))^2-(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4)))/2)-
(log(2)/t_gamma))/((log(2)/t_gamma)-(log(2)/t_beta))/((log(2)/t_gamma)-
(log(2)/t_alpha))/V1))
true_C<-eval(eval(f.true_C))
ff.true_C<-stats::as.formula(paste("~",as.character(f.true_C[2],"")))
true_A_deriv<-as.matrix(attr(eval(stats::deriv(ff.true_A,
c("V1","Vd","Cl1","t_alpha","t_beta","t_gamma"))),"gradient"))
true_B_deriv<-as.matrix(attr(eval(stats::deriv(ff.true_B,
c("V1","Vd","Cl1","t_alpha","t_beta","t_gamma"))),"gradient"))
true_C_deriv<-as.matrix(attr(eval(stats::deriv(ff.true_C,
c("V1","Vd","Cl1","t_alpha","t_beta","t_gamma"))),"gradient"))
true_A.sd<-sqrt(true_A_deriv %*% sigma6 %*% t(true_A_deriv))
true_B.sd<-sqrt(true_B_deriv %*% sigma6 %*% t(true_B_deriv))
true_C.sd<-sqrt(true_C_deriv %*% sigma6 %*% t(true_C_deriv))
f.frac_A<-quote(quote((((((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/
(t_alpha*t_gamma))+((log(2)^2)/(t_beta*t_gamma))-
((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-
(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))+
(sqrt(((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+
((log(2)^2)/(t_beta*t_gamma))-
((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-
(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))^2-
(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4)))/2)-
(log(2)/t_alpha))*(((((((log(2)^2)/(t_alpha*t_beta))+
((log(2)^2)/(t_alpha*t_gamma))+((log(2)^2)/(t_beta*t_gamma))-
((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-
(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))-
(sqrt(((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+
((log(2)^2)/(t_beta*t_gamma))-
((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-
(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))^2-
(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4)))/2)-
(log(2)/t_alpha))/((log(2)/t_alpha)-(log(2)/t_beta))/((log(2)/t_alpha)-
(log(2)/t_gamma))))
frac_A<-eval(eval(f.frac_A))
ff.frac_A<-stats::as.formula(paste("~",as.character(f.frac_A[2],"")))
f.frac_B<-quote(quote((((((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/
(t_alpha*t_gamma))+((log(2)^2)/(t_beta*t_gamma))-
((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-
(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))+
(sqrt(((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+
((log(2)^2)/(t_beta*t_gamma))-
((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-
(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))^2-
(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4)))/2)-
(log(2)/t_beta))*(((((((log(2)^2)/(t_alpha*t_beta))+
((log(2)^2)/(t_alpha*t_gamma))+((log(2)^2)/(t_beta*t_gamma))-
((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-
(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))-
(sqrt(((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+
((log(2)^2)/(t_beta*t_gamma))-
((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-
(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))^2-
(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4)))/2)-
(log(2)/t_beta))/((log(2)/t_beta)-(log(2)/t_alpha))/((log(2)/t_beta)-
(log(2)/t_gamma))))
frac_B<-eval(eval(f.frac_B))
ff.frac_B<-stats::as.formula(paste("~",as.character(f.frac_B[2],"")))
f.frac_C<-quote(quote((((((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/
(t_alpha*t_gamma))+((log(2)^2)/(t_beta*t_gamma))-
((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-
(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))+
(sqrt(((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+
((log(2)^2)/(t_beta*t_gamma))-
((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-
(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))^2-
(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4)))/2)-
(log(2)/t_gamma))*(((((((log(2)^2)/(t_alpha*t_beta))+
((log(2)^2)/(t_alpha*t_gamma))+((log(2)^2)/(t_beta*t_gamma))-
((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-
(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))-
(sqrt(((((log(2)^2)/(t_alpha*t_beta))+((log(2)^2)/(t_alpha*t_gamma))+
((log(2)^2)/(t_beta*t_gamma))-
((Vd-V1)/V1*(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))-
(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1)))/(Cl1/V1))^2-
(((log(2))^3)/(t_alpha*t_beta*t_gamma)/(Cl1/V1))*4)))/2)-
(log(2)/t_gamma))/((log(2)/t_gamma)-(log(2)/t_beta))/((log(2)/t_gamma)-
(log(2)/t_alpha))))
frac_C<-eval(eval(f.frac_C))
ff.frac_C<-stats::as.formula(paste("~",as.character(f.frac_C[2],"")))
frac_A_deriv<-as.matrix(attr(eval(stats::deriv(ff.frac_A,
c("V1","Vd","Cl1","t_alpha","t_beta","t_gamma"))),"gradient"))
frac_B_deriv<-as.matrix(attr(eval(stats::deriv(ff.frac_B,
c("V1","Vd","Cl1","t_alpha","t_beta","t_gamma"))),"gradient"))
frac_C_deriv<-as.matrix(attr(eval(stats::deriv(ff.frac_C,
c("V1","Vd","Cl1","t_alpha","t_beta","t_gamma"))),"gradient"))
frac_A.sd<-sqrt(frac_A_deriv %*% sigma6 %*% t(frac_A_deriv))
frac_B.sd<-sqrt(frac_B_deriv %*% sigma6 %*% t(frac_B_deriv))
frac_C.sd<-sqrt(frac_C_deriv %*% sigma6 %*% t(frac_C_deriv))
alpha<-log(2)/t_alpha; beta<-log(2)/t_beta; gamma<-log(2)/t_gamma
alpha_deriv<-as.matrix(attr(eval(stats::deriv(~log(2)/t_alpha,"t_alpha")),
"gradient"))
beta_deriv<-as.matrix(attr(eval(stats::deriv(~log(2)/t_beta,"t_beta")),
"gradient"))
gamma_deriv<-as.matrix(attr(eval(stats::deriv(~log(2)/t_gamma,"t_gamma")),
"gradient"))
alpha.sd<-sqrt(alpha_deriv * t_alpha.var * alpha_deriv)
beta.sd<-sqrt(beta_deriv * t_beta.var * beta_deriv)
gamma.sd<-sqrt(gamma_deriv * t_gamma.var * gamma_deriv)
if(is.na(V1[1])){
param = rep(NA,24)
sd = rep(NA,24)
} else{
param = c(V1,Vdss,Cl1,t_alpha,t_beta,t_gamma,V2,V3,Cl2,Cl3,k10,k12,k13,
k21,k31,true_A,true_B,true_C,frac_A,frac_B,frac_C,alpha,beta,gamma)
sd = c(V1.sd,Vdss.sd,Cl1.sd,t_alpha.sd,t_beta.sd,t_gamma.sd,V2.sd,V3.sd,
Cl2.sd,Cl3.sd,k10.sd,k12.sd,k13.sd,k21.sd,k31.sd,true_A.sd,true_B.sd,
true_C.sd,frac_A.sd,frac_B.sd,frac_C.sd,alpha.sd,beta.sd,gamma.sd)
}
result = data.frame(Parameter=c("V1","Vdss","Cl1","t_alpha","t_beta",
"t_gamma","V2","V3","Cl2","Cl3","k10","k12","k13",
"k21","k31","True_A","True_B","True_C","Frac_A",
"Frac_B","Frac_C","alpha","beta","gamma"),
Estimate=param, Std.err=sd)
row.names(result) <- c("V1","Vdss","Cl1","t_alpha","t_beta","t_gamma","V2",
"V3","Cl2","Cl3","k10","k12","k13","k21","k31","True_A","True_B","True_C",
"Frac_A","Frac_B","Frac_C","alpha","beta","gamma")
result<-result[c("Vdss","V1","V2","V3","Cl1","Cl2","Cl3",
"k10","k12","k21","k13","k31","alpha","beta","gamma",
"t_alpha","t_beta","t_gamma","True_A","True_B","True_C",
"Frac_A","Frac_B","Frac_C"),]
return(result)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Decodificar.R
\name{Decodificaramb}
\alias{Decodificaramb}
\title{Decodifica o arquivo EXPORTA BPA}
\usage{
Decodificaramb()
}
\value{
Dois arquivos: BPAC e BPAI
}
\description{
Decodifica o arquivo EXPORTA BPA
}
| /man/Decodificaramb.Rd | no_license | Glauco1990/DECODIFICACAOAMB | R | false | true | 291 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Decodificar.R
\name{Decodificaramb}
\alias{Decodificaramb}
\title{Decodifica o arquivo EXPORTA BPA}
\usage{
Decodificaramb()
}
\value{
Dois arquivos: BPAC e BPAI
}
\description{
Decodifica o arquivo EXPORTA BPA
}
|
library(dendextend)
### Name: cutree_1h.dendrogram
### Title: cutree for dendrogram (by 1 height only!)
### Aliases: cutree_1h.dendrogram
### ** Examples
hc <- hclust(dist(USArrests[c(1,6,13,20, 23),]), "ave")
dend <- as.dendrogram(hc)
cutree(hc, h=50) # on hclust
cutree_1h.dendrogram(dend, h=50) # on a dendrogram
labels(dend)
# the default (ordered by original data's order)
cutree_1h.dendrogram(dend, h=50, order_clusters_as_data = TRUE)
# A different order of labels - order by their order in the tree
cutree_1h.dendrogram(dend, h=50, order_clusters_as_data = FALSE)
# make it faster
## Not run:
##D library(microbenchmark)
##D microbenchmark(
##D cutree_1h.dendrogram(dend, h=50),
##D cutree_1h.dendrogram(dend, h=50,use_labels_not_values = FALSE)
##D )
##D # 0.8 vs 0.6 sec - for 100 runs
## End(Not run)
| /data/genthat_extracted_code/dendextend/examples/cutree_1h.dendrogram.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 865 | r | library(dendextend)
### Name: cutree_1h.dendrogram
### Title: cutree for dendrogram (by 1 height only!)
### Aliases: cutree_1h.dendrogram
### ** Examples
hc <- hclust(dist(USArrests[c(1,6,13,20, 23),]), "ave")
dend <- as.dendrogram(hc)
cutree(hc, h=50) # on hclust
cutree_1h.dendrogram(dend, h=50) # on a dendrogram
labels(dend)
# the default (ordered by original data's order)
cutree_1h.dendrogram(dend, h=50, order_clusters_as_data = TRUE)
# A different order of labels - order by their order in the tree
cutree_1h.dendrogram(dend, h=50, order_clusters_as_data = FALSE)
# make it faster
## Not run:
##D library(microbenchmark)
##D microbenchmark(
##D cutree_1h.dendrogram(dend, h=50),
##D cutree_1h.dendrogram(dend, h=50,use_labels_not_values = FALSE)
##D )
##D # 0.8 vs 0.6 sec - for 100 runs
## End(Not run)
|
library(ape)
testtree <- read.tree("10415_1.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="10415_1_unrooted.txt") | /codeml_files/newick_trees_processed/10415_1/rinput.R | no_license | DaniBoo/cyanobacteria_project | R | false | false | 137 | r | library(ape)
testtree <- read.tree("10415_1.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="10415_1_unrooted.txt") |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calibrate.R
\name{lift}
\alias{lift}
\alias{plot.lift}
\title{Gain and lift charts}
\usage{
lift(prob, y, pos.class = NULL, cumulative = TRUE, nbins = 0)
\method{plot}{lift}(x, refline.col = "red", refline.lty = "dashed", refline.lwd = 1, ...)
}
\arguments{
\item{prob}{Vector of predicted probabilities.}
\item{y}{Vector of binary (i.e., 0/1) outcomes. If \code{y} is coded as
anything other than 0/1, then you must specify which of the two categories
represents the "positive" class (i.e., the class for which the probabilities
specified in \code{prob} correspond to) via the \code{pos.class} argument.}
\item{pos.class}{Numeric/character string specifying which values in \code{y}
correspond to the "positive" class. Default is \code{NULL}. (Must be
specified whenever \code{y} is not coded as 0/1, where 1 is assumed to
represent the "positive" class.)}
\item{cumulative}{Logical indicating whether or not to compute cumulative
lift (i.e., gain). Default is \code{TRUE}.}
\item{nbins}{Integer specifying the number of bins to use when computing
lift. Default is 0, which corresponds to no binning. For example, setting
\code{nbins = 10} will result in computing lift within each decile of the
sorted probabilities.}
\item{x}{An object of class \code{"lift"}.}
\item{refline.col}{The color to use for the reference line. Default is
\code{"red"}.}
\item{refline.lty}{The type of line to use for the reference line. Default is
\code{"dashed"}.}
\item{refline.lwd}{The width of the reference line. Default is 1.}
\item{...}{Additional optional argument to be passed on to other methods.}
}
\value{
A \code{"lift"} object, which is essentially a list with the
following components:
\describe{
\item{\code{"lift"}}{A numeric vector containing the computed lift values.}
\item{\code{"prop"}}{The corresponding proportion of cases associated with
each lift value.}
\item{\code{"cumulative"}}{Same value as that supplied via the
\code{cumulative} argument. (Used by the \code{plot.lift()} method.)}
}
}
\description{
Validates predicted probabilities against a set of observed (binary)
outcomes.
}
| /man/lift.Rd | no_license | bgreenwell/calibrater | R | false | true | 2,195 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calibrate.R
\name{lift}
\alias{lift}
\alias{plot.lift}
\title{Gain and lift charts}
\usage{
lift(prob, y, pos.class = NULL, cumulative = TRUE, nbins = 0)
\method{plot}{lift}(x, refline.col = "red", refline.lty = "dashed", refline.lwd = 1, ...)
}
\arguments{
\item{prob}{Vector of predicted probabilities.}
\item{y}{Vector of binary (i.e., 0/1) outcomes. If \code{y} is coded as
anything other than 0/1, then you must specify which of the two categories
represents the "positive" class (i.e., the class for which the probabilities
specified in \code{prob} correspond to) via the \code{pos.class} argument.}
\item{pos.class}{Numeric/character string specifying which values in \code{y}
correspond to the "positive" class. Default is \code{NULL}. (Must be
specified whenever \code{y} is not coded as 0/1, where 1 is assumed to
represent the "positive" class.)}
\item{cumulative}{Logical indicating whether or not to compute cumulative
lift (i.e., gain). Default is \code{TRUE}.}
\item{nbins}{Integer specifying the number of bins to use when computing
lift. Default is 0, which corresponds to no binning. For example, setting
\code{nbins = 10} will result in computing lift within each decile of the
sorted probabilities.}
\item{x}{An object of class \code{"lift"}.}
\item{refline.col}{The color to use for the reference line. Default is
\code{"red"}.}
\item{refline.lty}{The type of line to use for the reference line. Default is
\code{"dashed"}.}
\item{refline.lwd}{The width of the reference line. Default is 1.}
\item{...}{Additional optional argument to be passed on to other methods.}
}
\value{
A \code{"lift"} object, which is essentially a list with the
following components:
\describe{
\item{\code{"lift"}}{A numeric vector containing the computed lift values.}
\item{\code{"prop"}}{The corresponding proportion of cases associated with
each lift value.}
\item{\code{"cumulative"}}{Same value as that supplied via the
\code{cumulative} argument. (Used by the \code{plot.lift()} method.)}
}
}
\description{
Validates predicted probabilities against a set of observed (binary)
outcomes.
}
|
library(dplyr)
library(stringr)
load(url("http://alizaidi.blob.core.windows.net/training/taxi_df.RData"))
(taxi_df <- tbl_df(taxi_df))
# Include shared lib ------------------------------------------------------
.libPaths(c(.libPaths(),
"/Rlib/x86_64-pc-linux-gnu-library/3.2"))
# Standard Evaluation in dplyr --------------------------------------------
select_fn <- function(col_name) {
select_df <- select_(taxi_df, col_name)
return(select_df)
}
select(arrange(taxi_df, desc(tip_amount), pickup_dow, dropoff_nhood), tip_amount, pickup_dow, dropoff_nhood)
# Two Groups
group_by(taxi_df, pickup_nhood, dropoff_nhood)
# Return Value is a Single Group
summarise(group_by(taxi_df, pickup_nhood, dropoff_nhood),
Num = n(), ave_tip_pct = mean(tip_pct))
# Return Value is Number of Groups in Pickup NHood
summarise(summarise(group_by(taxi_df, pickup_nhood, dropoff_nhood),
Num = n(), ave_tip_pct = mean(tip_pct)), total_records = sum(Num))
taxi_df %>% group_by(pickup_nhood, dropoff_nhood) %>%
summarise(ave_tip_pct = mean(tip_pct)) %>%
arrange(desc(tip_pct))
taxi_skinny <- taxi_df %>%
select(pickup_nhood, dropoff_nhood, trip_distance)
taxi_grouped <- taxi_skinny %>%
group_by(pickup_nhood, dropoff_nhood)
taxi_order <- taxi_grouped %>%
arrange(pickup_nhood,
dropoff_nhood,
desc(trip_distance))
taxi_order
normalize_function <- function(x) {
scale <- sd(x)
dist <- mean(x)
# browser()
(x - dist)/scale
}
taxi_mutate <- taxi_grouped %>%
mutate(scaled_value = normalize_function(trip_distance))
taxi_grouped
taxi_summarise <- taxi_grouped %>%
summarise(ave_dist = mean(trip_distance))
taxi_mutate_summarized <- taxi_summarise %>%
group_by_(c("pickup_nhood", "dropoff_nhood")) %>%
mutate(scaled_value = normalize_function(ave_dist))
iris_grouped <- iris %>% group_by(Species)
iris_grouped %>% tally()
iris_grouped[[3, "Species"]]
pick_group <- function(df = iris,
selection = iris_grouped[[3, "Species"]]) {
df %>% filter_()
}
taxi_df <- mutate(taxi_df,
tip_pct = tip_amount/fare_amount)
tip_models <- taxi_df %>%
group_by(dropoff_dow) %>%
sample_n(10^3) %>%
do(lm_tip = lm(tip_pct ~ pickup_nhood + passenger_count + pickup_hour,
data = .),
tip_pct = .$tip_pct) %>%
mutate(ave_tip_pct = mean(tip_pct))
length(tip_models$lm_tip[[1]]$fitted.values)
lapply(tip_models$lm_tip,
function(x) length(x$fitted.values))
library(broom)
taxi_models <- taxi_df %>%
group_by(dropoff_dow) %>%
sample_n(10^3) %>%
do(tidy(lm(tip_pct ~ pickup_nhood + passenger_count + pickup_hour,
data = .)))
# scoring models ----------------------------------------------------------
taxi_df <- mutate(taxi_df,
tip_pct = tip_amount/fare_amount)
tip_models <- taxi_df %>%
group_by(dropoff_dow) %>%
sample_n(10^3) %>%
do(lm_tip = lm(tip_pct ~ trip_distance,
data = .))
test_set_fri <- taxi_df %>% filter(pickup_dow == "Fri")
tip_df <- as.data.frame(tip_models)
fri_model <- tip_models[tip_models$dropoff_dow == "Fri", 2]
fri_model <- fri_model[[1]]
test_scores_fri <- predict(fri_model[[1]], test_set_fri)
test_set_fri$predicted_tips <- test_scores_fri
tip_list <- tip_models[[2]]
names(tip_list) <- tip_models[[1]]
fri_model <- tip_list$Fri
test_scores_fri <- predict(fri_model, test_set_fri)
ggplot(filter(test_set_fri, tip_amount < 20),
aes(x = payment_type, y = tip_amount)) +
geom_boxplot()
# RxText ------------------------------------------------------------------
mort_text <- RxTextData("/usr/lib64/microsoft-r/8.0/lib64/R/library/RevoScaleR/SampleData/mortDefaultSmall2009.csv")
# rxSplit and Transforms --------------------------------------------------
create_partition <- function(xdf = mort_xdf,
partition_size = 0.75,
output_path = "output/", ...) {
# rxDataStep(inData = xdf,
# outFile = xdf,
# transforms = list(
# trainvalidate = factor(
# ifelse(rbinom(.rxNumRows,
# size = 1, prob = splitperc),
# "train", "validate")
# )
# ),
# transformObjects = list(splitperc = partition_size),
# overwrite = TRUE, ...)
splitDS <- rxSplit(inData = xdf,
outFilesBase = output_path,
outFileSuffixes = c("train", "validate"),
splitByFactor = "trainvalidate",
transforms = list(
trainvalidate = factor(
ifelse(rbinom(.rxNumRows,
size = 1,
prob = splitperc),
"train", "validate")
)
),
transformObjects = list(splitperc = partition_size),
overwrite = TRUE)
return(splitDS)
}
mort_split <- create_partition(reportProgress = 0)
names(mort_split) <- c("train", "validate")
lapply(mort_split, rxGetInfo)
# if you lose the variable mort_split
mort_split <- list(train = RxXdfData("mortgage.trainvalidate.train.xdf"),
validate = RxXdfData("mortgage.trainvalidate.validate.xdf"))
# dtree model -------------------------------------------------------------
default_model_tree <- estimate_model(mort_split$train,
model = rxDTree,
minBucket = 10)
x <- rxAddInheritance(default_model_tree)
plot(x)
plot(as.rpart(default_model_tree))
rxGetInfo(mort_split$train, getVarInfo = TRUE)
rxSummary(~., data = mort_split$train)
library(RevoTreeView)
plot(createTreeView(default_model_tree))
default_tree_scored <- rxPredict(default_model_tree,
mort_split$validate,
"scored.xdf",
writeModelVars = TRUE,
predVarNames = c("pred_tree_current",
"pred_tree_default"))
scored_xdf <- RxXdfData("scored.xdf")
rxGetInfo(scored_xdf, numRows = 5)
rxRoc(actualVarName = "default",
predVarNames = c("pred_logit_default", "pred_tree_default"),
data = scored_xdf)
rxRocCurve(actualVarName = "default",
predVarNames = c("pred_logit_default",
"pred_tree_default"),
data = scored_xdf)
# ensemble algorithms -----------------------------------------------------
rxDForest
rxBTrees
system.time(default_model_forest <- estimate_model(mort_split$train,
model = rxDForest,
importance = TRUE))
rxPredict(modelObject = default_model_forest,
data = mort_split$validate,
outData = "scored.xdf",
writeModelVars = TRUE,
type = "prob",
predVarNames = c("pred_forest_current",
"pred_forest_default",
"pred_forest_label"))
rxGetInfo(scored_xdf,
getVarInfo = TRUE,
numRows = 5)
system.time(default_model_btree <- estimate_model(mort_split$train,
model = rxBTrees)
)
rxPredict(modelObject = default_model_btree,
data = mort_split$validate,
outData = "scored.xdf",
writeModelVars = TRUE,
type = "prob",
predVarNames = "pred_btree_default")
rxRocCurve(actualVarName = "default",
predVarNames = c("pred_logit_default",
"pred_tree_default",
"pred_forest_default",
"pred_btree_default"),
data = scored_xdf)
# Saving objects as compressed Rds files ----------------------------------
saveRDS(object = default_model_btree, file = "defaultbtree.rds")
rm(default_model_btree)
default_model_btree <- readRDS("defaultbtree.rds")
| /class-examples.R | no_license | AAbusnina/R-cadence | R | false | false | 8,573 | r | library(dplyr)
library(stringr)
load(url("http://alizaidi.blob.core.windows.net/training/taxi_df.RData"))
(taxi_df <- tbl_df(taxi_df))
# Include shared lib ------------------------------------------------------
.libPaths(c(.libPaths(),
"/Rlib/x86_64-pc-linux-gnu-library/3.2"))
# Standard Evaluation in dplyr --------------------------------------------
select_fn <- function(col_name) {
select_df <- select_(taxi_df, col_name)
return(select_df)
}
select(arrange(taxi_df, desc(tip_amount), pickup_dow, dropoff_nhood), tip_amount, pickup_dow, dropoff_nhood)
# Two Groups
group_by(taxi_df, pickup_nhood, dropoff_nhood)
# Return Value is a Single Group
summarise(group_by(taxi_df, pickup_nhood, dropoff_nhood),
Num = n(), ave_tip_pct = mean(tip_pct))
# Return Value is Number of Groups in Pickup NHood
summarise(summarise(group_by(taxi_df, pickup_nhood, dropoff_nhood),
Num = n(), ave_tip_pct = mean(tip_pct)), total_records = sum(Num))
taxi_df %>% group_by(pickup_nhood, dropoff_nhood) %>%
summarise(ave_tip_pct = mean(tip_pct)) %>%
arrange(desc(tip_pct))
taxi_skinny <- taxi_df %>%
select(pickup_nhood, dropoff_nhood, trip_distance)
taxi_grouped <- taxi_skinny %>%
group_by(pickup_nhood, dropoff_nhood)
taxi_order <- taxi_grouped %>%
arrange(pickup_nhood,
dropoff_nhood,
desc(trip_distance))
taxi_order
normalize_function <- function(x) {
scale <- sd(x)
dist <- mean(x)
# browser()
(x - dist)/scale
}
taxi_mutate <- taxi_grouped %>%
mutate(scaled_value = normalize_function(trip_distance))
taxi_grouped
taxi_summarise <- taxi_grouped %>%
summarise(ave_dist = mean(trip_distance))
taxi_mutate_summarized <- taxi_summarise %>%
group_by_(c("pickup_nhood", "dropoff_nhood")) %>%
mutate(scaled_value = normalize_function(ave_dist))
iris_grouped <- iris %>% group_by(Species)
iris_grouped %>% tally()
iris_grouped[[3, "Species"]]
pick_group <- function(df = iris,
selection = iris_grouped[[3, "Species"]]) {
df %>% filter_()
}
taxi_df <- mutate(taxi_df,
tip_pct = tip_amount/fare_amount)
tip_models <- taxi_df %>%
group_by(dropoff_dow) %>%
sample_n(10^3) %>%
do(lm_tip = lm(tip_pct ~ pickup_nhood + passenger_count + pickup_hour,
data = .),
tip_pct = .$tip_pct) %>%
mutate(ave_tip_pct = mean(tip_pct))
length(tip_models$lm_tip[[1]]$fitted.values)
lapply(tip_models$lm_tip,
function(x) length(x$fitted.values))
library(broom)
taxi_models <- taxi_df %>%
group_by(dropoff_dow) %>%
sample_n(10^3) %>%
do(tidy(lm(tip_pct ~ pickup_nhood + passenger_count + pickup_hour,
data = .)))
# scoring models ----------------------------------------------------------
taxi_df <- mutate(taxi_df,
tip_pct = tip_amount/fare_amount)
tip_models <- taxi_df %>%
group_by(dropoff_dow) %>%
sample_n(10^3) %>%
do(lm_tip = lm(tip_pct ~ trip_distance,
data = .))
test_set_fri <- taxi_df %>% filter(pickup_dow == "Fri")
tip_df <- as.data.frame(tip_models)
fri_model <- tip_models[tip_models$dropoff_dow == "Fri", 2]
fri_model <- fri_model[[1]]
test_scores_fri <- predict(fri_model[[1]], test_set_fri)
test_set_fri$predicted_tips <- test_scores_fri
tip_list <- tip_models[[2]]
names(tip_list) <- tip_models[[1]]
fri_model <- tip_list$Fri
test_scores_fri <- predict(fri_model, test_set_fri)
ggplot(filter(test_set_fri, tip_amount < 20),
aes(x = payment_type, y = tip_amount)) +
geom_boxplot()
# RxText ------------------------------------------------------------------
mort_text <- RxTextData("/usr/lib64/microsoft-r/8.0/lib64/R/library/RevoScaleR/SampleData/mortDefaultSmall2009.csv")
# rxSplit and Transforms --------------------------------------------------
create_partition <- function(xdf = mort_xdf,
partition_size = 0.75,
output_path = "output/", ...) {
# rxDataStep(inData = xdf,
# outFile = xdf,
# transforms = list(
# trainvalidate = factor(
# ifelse(rbinom(.rxNumRows,
# size = 1, prob = splitperc),
# "train", "validate")
# )
# ),
# transformObjects = list(splitperc = partition_size),
# overwrite = TRUE, ...)
splitDS <- rxSplit(inData = xdf,
outFilesBase = output_path,
outFileSuffixes = c("train", "validate"),
splitByFactor = "trainvalidate",
transforms = list(
trainvalidate = factor(
ifelse(rbinom(.rxNumRows,
size = 1,
prob = splitperc),
"train", "validate")
)
),
transformObjects = list(splitperc = partition_size),
overwrite = TRUE)
return(splitDS)
}
mort_split <- create_partition(reportProgress = 0)
names(mort_split) <- c("train", "validate")
lapply(mort_split, rxGetInfo)
# if you lose the variable mort_split
mort_split <- list(train = RxXdfData("mortgage.trainvalidate.train.xdf"),
validate = RxXdfData("mortgage.trainvalidate.validate.xdf"))
# dtree model -------------------------------------------------------------
default_model_tree <- estimate_model(mort_split$train,
model = rxDTree,
minBucket = 10)
x <- rxAddInheritance(default_model_tree)
plot(x)
plot(as.rpart(default_model_tree))
rxGetInfo(mort_split$train, getVarInfo = TRUE)
rxSummary(~., data = mort_split$train)
library(RevoTreeView)
plot(createTreeView(default_model_tree))
default_tree_scored <- rxPredict(default_model_tree,
mort_split$validate,
"scored.xdf",
writeModelVars = TRUE,
predVarNames = c("pred_tree_current",
"pred_tree_default"))
scored_xdf <- RxXdfData("scored.xdf")
rxGetInfo(scored_xdf, numRows = 5)
rxRoc(actualVarName = "default",
predVarNames = c("pred_logit_default", "pred_tree_default"),
data = scored_xdf)
rxRocCurve(actualVarName = "default",
predVarNames = c("pred_logit_default",
"pred_tree_default"),
data = scored_xdf)
# ensemble algorithms -----------------------------------------------------
rxDForest
rxBTrees
system.time(default_model_forest <- estimate_model(mort_split$train,
model = rxDForest,
importance = TRUE))
rxPredict(modelObject = default_model_forest,
data = mort_split$validate,
outData = "scored.xdf",
writeModelVars = TRUE,
type = "prob",
predVarNames = c("pred_forest_current",
"pred_forest_default",
"pred_forest_label"))
rxGetInfo(scored_xdf,
getVarInfo = TRUE,
numRows = 5)
system.time(default_model_btree <- estimate_model(mort_split$train,
model = rxBTrees)
)
rxPredict(modelObject = default_model_btree,
data = mort_split$validate,
outData = "scored.xdf",
writeModelVars = TRUE,
type = "prob",
predVarNames = "pred_btree_default")
rxRocCurve(actualVarName = "default",
predVarNames = c("pred_logit_default",
"pred_tree_default",
"pred_forest_default",
"pred_btree_default"),
data = scored_xdf)
# Saving objects as compressed Rds files ----------------------------------
saveRDS(object = default_model_btree, file = "defaultbtree.rds")
rm(default_model_btree)
default_model_btree <- readRDS("defaultbtree.rds")
|
#' Understand what's been set inside of a **gt** table object
#'
#' @description
#'
#' It can become increasingly difficult to recall the ID values associated with
#' different labels in a **gt** table. Further to this, there are also
#' situations where **gt** will generate ID values on your behalf (e.g., with
#' [tab_spanner_delim()], etc.) while ensuring that duplicate ID values aren't
#' produced. For the latter case, it is impossible to know what those ID values
#' are unless one were to carefully examine to correct component of the `gt_tbl`
#' object.
#'
#' Because it's so essential to know these ID values for targeting purposes
#' (when styling with [tab_style()], adding footnote marks with
#' [tab_footnote()], etc.), the `tab_info()` function can help with all of this.
#' It summarizes (by location) all of the table's ID values and their associated
#' labels. The product is an informational **gt** table, designed for easy
#' retrieval of the necessary values.
#'
#' @inheritParams fmt_number
#'
#' @return An object of class `gt_tbl`.
#'
#' @section Examples:
#'
#' Let's use a portion of the [`gtcars`] dataset to create a **gt** table. We'll
#' use the [tab_spanner()] function to group two columns together under a
#' spanner column with the ID and label `"performance"`. Finally, we can use the
#' `tab_info()` function in a separate, interactive statement so that we can
#' inspect a table that summarizes the ID values any associated label text for
#' all parts of the table.
#'
#' ```r
#' gt_tbl <-
#' gtcars |>
#' dplyr::select(model, year, starts_with("hp"), msrp) |>
#' dplyr::slice(1:4) |>
#' gt(rowname_col = "model") |>
#' tab_spanner(
#' label = "performance",
#' columns = starts_with("hp")
#' )
#'
#' gt_tbl |> tab_info()
#' ```
#'
#' \if{html}{\out{
#' `r man_get_image_tag(file = "man_tab_info_1.png")`
#' }}
#'
#' @family part creation/modification functions
#' @section Function ID:
#' 2-12
#'
#' @section Function Introduced:
#' `v0.8.0` (November 16, 2022)
#'
#' @export
tab_info <- function(data) {
empty_tbl <-
dplyr::tibble(
id = character(0),
i = integer(0),
label = character(0),
type = character(0),
location = character(0)
)
built_data <- build_data(data, context = "html")
#
# Columns
#
boxhead <- dt_boxhead_get(data = data)
columns <- dplyr::select(boxhead, id = var, label = column_label, type)
columns <- dplyr::filter(columns, type %in% c("default", "stub", "hidden"))
columns <- dplyr::mutate(columns, label = unlist(label))
columns <- dplyr::mutate(columns, location = "Columns")
columns <- dplyr::mutate(columns, i = seq_len(nrow(columns)))
columns <- dplyr::select(columns, id, i, label, type, location)
#
# Rows
#
stub_df <- dt_stub_df_get(data = data)
stub_layout <- get_stub_layout(data = built_data)
if ("rowname" %in% stub_layout) {
data_df <- dt_data_get(data = data)
rowname_col <- dt_boxhead_get_var_stub(data = data)
row_name_vals <- dplyr::pull(dplyr::select(data_df, dplyr::all_of(rowname_col)), 1)
rownames <- dplyr::select(stub_df, id = row_id, i = rownum_i)
rownames <- dplyr::mutate(rownames, label = row_name_vals)
rownames <- dplyr::mutate(rownames, type = NA_character_)
rownames <- dplyr::mutate(rownames, location = "Rows")
rownames <- dplyr::select(rownames, id, i, label, type, location)
} else if (nrow(stub_df) == 1) {
rownames <-
dplyr::tibble(
id = "<< Single index value of 1 >>",
i = NA_integer_,
label = NA_character_,
type = NA_character_,
location = "Rows"
)
} else if (nrow(stub_df) == 0) {
rownames <-
dplyr::tibble(
id = "<< No rows in table >>",
i = NA_integer_,
label = NA_character_,
type = NA_character_,
location = "Rows"
)
} else {
rownum_i <- stub_df[["rownum_i"]]
rownum_desc <- paste0("<< Index values ", min(rownum_i), " to ", max(rownum_i), " >>")
rownames <-
dplyr::tibble(
id = rownum_desc,
i = NA_integer_,
label = NA_character_,
type = NA_character_,
location = "Rows"
)
}
#
# Spanners
#
if (dt_spanners_exists(data = data)) {
span_df <- dt_spanners_get(data = data)
spanners <- dplyr::select(span_df, id = spanner_id, label = spanner_label, i = spanner_level)
spanners <- dplyr::mutate(spanners, type = NA_character_)
spanners <- dplyr::mutate(spanners, i = as.integer(i))
spanners <- dplyr::mutate(spanners, label = unlist(label))
spanners <- dplyr::mutate(spanners, location = "Spanners")
spanners <- dplyr::select(spanners, id, i, label, type, location)
} else {
spanners <- empty_tbl
}
#
# Row Groups
#
if ("group_label" %in% stub_layout) {
groups_rows <- dt_row_groups_get(data = data)
row_groups <- dplyr::select(stub_df, id = group_id, label = group_label)
row_groups <- dplyr::group_by(row_groups, id)
row_groups <- dplyr::filter(row_groups, dplyr::row_number() == 1)
row_groups <- dplyr::mutate(row_groups, i = which(groups_rows %in% id))
row_groups <- dplyr::mutate(row_groups, type = NA_character_)
row_groups <- dplyr::mutate(row_groups, label = unlist(label))
row_groups <- dplyr::mutate(row_groups, location = "Row Groups")
row_groups <- dplyr::select(row_groups, id, i, label, type, location)
} else {
row_groups <- empty_tbl
}
#
# Summary Rows
#
summaries_present <- dt_summary_exists(data = data)
if (summaries_present) {
list_of_summaries <- extract_summary(data = data)
groups_rows_df <- dt_groups_rows_get(data = built_data)
# Group Summaries
if (grand_summary_col %in% names(list_of_summaries$summary_df_data_list)) {
group_id_vec <- groups_rows_df[["group_id"]]
group_summary <- empty_tbl
for (group_id in group_id_vec) {
if (group_id %in% names(list_of_summaries$summary_df_data_list)) {
group_summary_row_id <-
names(list_of_summaries$summary_df_data_list[[group_id]][["rowname"]])
group_summary_i <-
dplyr::bind_rows(
dplyr::tibble(
id = group_id,
i = NA_integer_,
label = NA_character_,
type = "::group_id::",
location = "Group Summary"
),
dplyr::tibble(
id = group_summary_row_id,
i = seq_len(length(group_summary_row_id)),
label = group_summary_row_id,
type = group_id,
location = "Group Summary"
)
)
group_summary <-
dplyr::bind_rows(
group_summary,
group_summary_i
)
}
}
} else {
group_summary <- empty_tbl
}
# Grand Summary
if (grand_summary_col %in% names(list_of_summaries$summary_df_data_list)) {
grand_summary_row_id <-
names(list_of_summaries$summary_df_data_list[[grand_summary_col]][["rowname"]])
grand_summary <-
dplyr::tibble(
id = grand_summary_row_id,
i = seq_len(length(grand_summary_row_id)),
label = grand_summary_row_id,
type = NA_character_,
location = "Grand Summary"
)
} else {
grand_summary <- empty_tbl
}
summaries <-
dplyr::bind_rows(
group_summary,
grand_summary
)
} else {
summaries <- empty_tbl
}
#
# Combine the tables for each part together
#
combined_tbl <-
dplyr::bind_rows(
columns,
rownames,
spanners,
row_groups,
summaries
)
#
# Generate the gt table for output
#
gt_tbl <- gt(combined_tbl, rowname_col = "id", groupname_col = "location")
gt_tbl <- tab_stubhead(data = gt_tbl, label = "ID")
gt_tbl <-
cols_label(
.data = gt_tbl,
label = "Label",
i = md("*Idx* \n*Lvl*"),
type = ""
)
gt_tbl <-
cols_width(
.data = gt_tbl,
id ~ px(250),
label ~ px(280),
i ~ px(50),
everything() ~ px(30)
)
gt_tbl <-
tab_style(
data = gt_tbl,
style = cell_text(font = google_font("IBM Plex Mono"), size = px(14)),
locations = list(cells_body(columns = c(i, label)), cells_stub())
)
gt_tbl <-
tab_style(
data = gt_tbl,
style = cell_borders(sides = c("top", "bottom"), color = "lightblue"),
locations = list(
cells_stub(rows = type == "::group_id::"),
cells_body(rows = type == "::group_id::")
)
)
gt_tbl <- opt_all_caps(data = gt_tbl, locations = c("row_group", "column_labels"))
gt_tbl <- sub_missing(data = gt_tbl, columns = i, missing_text = "")
gt_tbl <- sub_missing(data = gt_tbl, columns = c(id, label), missing_text = "")
gt_tbl <-
tab_header(
data = gt_tbl,
title = md("Information on ID and Label Values")
)
gt_tbl <- opt_align_table_header(data = gt_tbl, align = "left")
gt_tbl <- opt_table_lines(data = gt_tbl, extent = "none")
gt_tbl <- cols_hide(data = gt_tbl, columns = type)
gt_tbl <-
tab_options(
data = gt_tbl,
table.width = px(800),
row_group.padding = px(12),
data_row.padding = px(4),
table_body.hlines.style = "solid",
table_body.hlines.width = px(1),
table_body.hlines.color = "#F7F7F7",
row_group.border.top.style = "solid",
row_group.border.top.width = px(1),
row_group.border.bottom.width = px(1),
table_body.border.bottom.style = "solid",
table_body.border.bottom.width = px(1),
table.border.bottom.style = "solid",
table.border.bottom.width = px(1),
table.border.bottom.color = "#F7F7F7",
source_notes.font.size = px(10),
source_notes.padding = px(6)
)
gt_tbl
}
| /R/tab_info.R | permissive | rstudio/gt | R | false | false | 9,919 | r | #' Understand what's been set inside of a **gt** table object
#'
#' @description
#'
#' It can become increasingly difficult to recall the ID values associated with
#' different labels in a **gt** table. Further to this, there are also
#' situations where **gt** will generate ID values on your behalf (e.g., with
#' [tab_spanner_delim()], etc.) while ensuring that duplicate ID values aren't
#' produced. For the latter case, it is impossible to know what those ID values
#' are unless one were to carefully examine to correct component of the `gt_tbl`
#' object.
#'
#' Because it's so essential to know these ID values for targeting purposes
#' (when styling with [tab_style()], adding footnote marks with
#' [tab_footnote()], etc.), the `tab_info()` function can help with all of this.
#' It summarizes (by location) all of the table's ID values and their associated
#' labels. The product is an informational **gt** table, designed for easy
#' retrieval of the necessary values.
#'
#' @inheritParams fmt_number
#'
#' @return An object of class `gt_tbl`.
#'
#' @section Examples:
#'
#' Let's use a portion of the [`gtcars`] dataset to create a **gt** table. We'll
#' use the [tab_spanner()] function to group two columns together under a
#' spanner column with the ID and label `"performance"`. Finally, we can use the
#' `tab_info()` function in a separate, interactive statement so that we can
#' inspect a table that summarizes the ID values any associated label text for
#' all parts of the table.
#'
#' ```r
#' gt_tbl <-
#' gtcars |>
#' dplyr::select(model, year, starts_with("hp"), msrp) |>
#' dplyr::slice(1:4) |>
#' gt(rowname_col = "model") |>
#' tab_spanner(
#' label = "performance",
#' columns = starts_with("hp")
#' )
#'
#' gt_tbl |> tab_info()
#' ```
#'
#' \if{html}{\out{
#' `r man_get_image_tag(file = "man_tab_info_1.png")`
#' }}
#'
#' @family part creation/modification functions
#' @section Function ID:
#' 2-12
#'
#' @section Function Introduced:
#' `v0.8.0` (November 16, 2022)
#'
#' @export
tab_info <- function(data) {
empty_tbl <-
dplyr::tibble(
id = character(0),
i = integer(0),
label = character(0),
type = character(0),
location = character(0)
)
built_data <- build_data(data, context = "html")
#
# Columns
#
boxhead <- dt_boxhead_get(data = data)
columns <- dplyr::select(boxhead, id = var, label = column_label, type)
columns <- dplyr::filter(columns, type %in% c("default", "stub", "hidden"))
columns <- dplyr::mutate(columns, label = unlist(label))
columns <- dplyr::mutate(columns, location = "Columns")
columns <- dplyr::mutate(columns, i = seq_len(nrow(columns)))
columns <- dplyr::select(columns, id, i, label, type, location)
#
# Rows
#
stub_df <- dt_stub_df_get(data = data)
stub_layout <- get_stub_layout(data = built_data)
if ("rowname" %in% stub_layout) {
data_df <- dt_data_get(data = data)
rowname_col <- dt_boxhead_get_var_stub(data = data)
row_name_vals <- dplyr::pull(dplyr::select(data_df, dplyr::all_of(rowname_col)), 1)
rownames <- dplyr::select(stub_df, id = row_id, i = rownum_i)
rownames <- dplyr::mutate(rownames, label = row_name_vals)
rownames <- dplyr::mutate(rownames, type = NA_character_)
rownames <- dplyr::mutate(rownames, location = "Rows")
rownames <- dplyr::select(rownames, id, i, label, type, location)
} else if (nrow(stub_df) == 1) {
rownames <-
dplyr::tibble(
id = "<< Single index value of 1 >>",
i = NA_integer_,
label = NA_character_,
type = NA_character_,
location = "Rows"
)
} else if (nrow(stub_df) == 0) {
rownames <-
dplyr::tibble(
id = "<< No rows in table >>",
i = NA_integer_,
label = NA_character_,
type = NA_character_,
location = "Rows"
)
} else {
rownum_i <- stub_df[["rownum_i"]]
rownum_desc <- paste0("<< Index values ", min(rownum_i), " to ", max(rownum_i), " >>")
rownames <-
dplyr::tibble(
id = rownum_desc,
i = NA_integer_,
label = NA_character_,
type = NA_character_,
location = "Rows"
)
}
#
# Spanners
#
if (dt_spanners_exists(data = data)) {
span_df <- dt_spanners_get(data = data)
spanners <- dplyr::select(span_df, id = spanner_id, label = spanner_label, i = spanner_level)
spanners <- dplyr::mutate(spanners, type = NA_character_)
spanners <- dplyr::mutate(spanners, i = as.integer(i))
spanners <- dplyr::mutate(spanners, label = unlist(label))
spanners <- dplyr::mutate(spanners, location = "Spanners")
spanners <- dplyr::select(spanners, id, i, label, type, location)
} else {
spanners <- empty_tbl
}
#
# Row Groups
#
if ("group_label" %in% stub_layout) {
groups_rows <- dt_row_groups_get(data = data)
row_groups <- dplyr::select(stub_df, id = group_id, label = group_label)
row_groups <- dplyr::group_by(row_groups, id)
row_groups <- dplyr::filter(row_groups, dplyr::row_number() == 1)
row_groups <- dplyr::mutate(row_groups, i = which(groups_rows %in% id))
row_groups <- dplyr::mutate(row_groups, type = NA_character_)
row_groups <- dplyr::mutate(row_groups, label = unlist(label))
row_groups <- dplyr::mutate(row_groups, location = "Row Groups")
row_groups <- dplyr::select(row_groups, id, i, label, type, location)
} else {
row_groups <- empty_tbl
}
#
# Summary Rows
#
summaries_present <- dt_summary_exists(data = data)
if (summaries_present) {
list_of_summaries <- extract_summary(data = data)
groups_rows_df <- dt_groups_rows_get(data = built_data)
# Group Summaries
if (grand_summary_col %in% names(list_of_summaries$summary_df_data_list)) {
group_id_vec <- groups_rows_df[["group_id"]]
group_summary <- empty_tbl
for (group_id in group_id_vec) {
if (group_id %in% names(list_of_summaries$summary_df_data_list)) {
group_summary_row_id <-
names(list_of_summaries$summary_df_data_list[[group_id]][["rowname"]])
group_summary_i <-
dplyr::bind_rows(
dplyr::tibble(
id = group_id,
i = NA_integer_,
label = NA_character_,
type = "::group_id::",
location = "Group Summary"
),
dplyr::tibble(
id = group_summary_row_id,
i = seq_len(length(group_summary_row_id)),
label = group_summary_row_id,
type = group_id,
location = "Group Summary"
)
)
group_summary <-
dplyr::bind_rows(
group_summary,
group_summary_i
)
}
}
} else {
group_summary <- empty_tbl
}
# Grand Summary
if (grand_summary_col %in% names(list_of_summaries$summary_df_data_list)) {
grand_summary_row_id <-
names(list_of_summaries$summary_df_data_list[[grand_summary_col]][["rowname"]])
grand_summary <-
dplyr::tibble(
id = grand_summary_row_id,
i = seq_len(length(grand_summary_row_id)),
label = grand_summary_row_id,
type = NA_character_,
location = "Grand Summary"
)
} else {
grand_summary <- empty_tbl
}
summaries <-
dplyr::bind_rows(
group_summary,
grand_summary
)
} else {
summaries <- empty_tbl
}
#
# Combine the tables for each part together
#
combined_tbl <-
dplyr::bind_rows(
columns,
rownames,
spanners,
row_groups,
summaries
)
#
# Generate the gt table for output
#
gt_tbl <- gt(combined_tbl, rowname_col = "id", groupname_col = "location")
gt_tbl <- tab_stubhead(data = gt_tbl, label = "ID")
gt_tbl <-
cols_label(
.data = gt_tbl,
label = "Label",
i = md("*Idx* \n*Lvl*"),
type = ""
)
gt_tbl <-
cols_width(
.data = gt_tbl,
id ~ px(250),
label ~ px(280),
i ~ px(50),
everything() ~ px(30)
)
gt_tbl <-
tab_style(
data = gt_tbl,
style = cell_text(font = google_font("IBM Plex Mono"), size = px(14)),
locations = list(cells_body(columns = c(i, label)), cells_stub())
)
gt_tbl <-
tab_style(
data = gt_tbl,
style = cell_borders(sides = c("top", "bottom"), color = "lightblue"),
locations = list(
cells_stub(rows = type == "::group_id::"),
cells_body(rows = type == "::group_id::")
)
)
gt_tbl <- opt_all_caps(data = gt_tbl, locations = c("row_group", "column_labels"))
gt_tbl <- sub_missing(data = gt_tbl, columns = i, missing_text = "")
gt_tbl <- sub_missing(data = gt_tbl, columns = c(id, label), missing_text = "")
gt_tbl <-
tab_header(
data = gt_tbl,
title = md("Information on ID and Label Values")
)
gt_tbl <- opt_align_table_header(data = gt_tbl, align = "left")
gt_tbl <- opt_table_lines(data = gt_tbl, extent = "none")
gt_tbl <- cols_hide(data = gt_tbl, columns = type)
gt_tbl <-
tab_options(
data = gt_tbl,
table.width = px(800),
row_group.padding = px(12),
data_row.padding = px(4),
table_body.hlines.style = "solid",
table_body.hlines.width = px(1),
table_body.hlines.color = "#F7F7F7",
row_group.border.top.style = "solid",
row_group.border.top.width = px(1),
row_group.border.bottom.width = px(1),
table_body.border.bottom.style = "solid",
table_body.border.bottom.width = px(1),
table.border.bottom.style = "solid",
table.border.bottom.width = px(1),
table.border.bottom.color = "#F7F7F7",
source_notes.font.size = px(10),
source_notes.padding = px(6)
)
gt_tbl
}
|
# use edgeR package to analysis the gene reads and generate MDS plot and sample distance heatmap
# load the counts of each sample
MOCK<-read.table(file = "mock_gene_count.tsv",header = TRUE,row.names = 1)
IN<-read.table(file = "infect_gene_count.tsv",header = TRUE,row.names = 1)
MOCK_IN<-cbind(MOCK,IN)
# rename the colname to the group name
group<-rep(c("mock","SARSCOV2"),each=3)
group<-as.factor(group)
colnames(MOCK_IN)<-paste(group,1:3,sep = "-")
# Put the data into a DGEList object
library(edgeR)
genelist<-rownames(MOCK_IN)
y<-DGEList(counts=MOCK_IN,genes=genelist)
# Filtering
countsPerMillion <- cpm(y)
countCheck <- countsPerMillion > 1
keep <- which(rowSums(countCheck) > 1)
y <- y[keep, ]
# Normalization
y <- calcNormFactors(y, method="TMM")
y$samples$group <- group
design <- model.matrix(~0+group)
colnames(design) <- levels(group)
design
# exploring differences between libraries
# MDS plot
pch<-c(15,16)
colors<-rep(c("red","green"),3)
plotMDS(y,col=colors[group],pch=pch[group])
legend("top",legend=levels(group),pch=pch,col=colors)
# samples distance plot
library("RColorBrewer")
library("pheatmap")
sampleDists <- dist(t(MOCK_IN))
sampleDistMatrix <- as.matrix(sampleDists)
rownames(sampleDistMatrix) <- paste(colnames(MOCK_IN))
colnames(sampleDistMatrix) <- paste(colnames(MOCK_IN))
colors2 <- colorRampPalette( rev(brewer.pal(9, "Blues")) )(255)
sample_distance_plot<-
pheatmap(sampleDistMatrix,
clustering_distance_rows=sampleDists,
clustering_distance_cols=sampleDists,
col=colors2)
| /bulkRNAseq/run_edgeR_analysis.R | no_license | shuibingchen/COVID-19_Hearts | R | false | false | 1,631 | r | # use edgeR package to analysis the gene reads and generate MDS plot and sample distance heatmap
# load the counts of each sample
MOCK<-read.table(file = "mock_gene_count.tsv",header = TRUE,row.names = 1)
IN<-read.table(file = "infect_gene_count.tsv",header = TRUE,row.names = 1)
MOCK_IN<-cbind(MOCK,IN)
# rename the colname to the group name
group<-rep(c("mock","SARSCOV2"),each=3)
group<-as.factor(group)
colnames(MOCK_IN)<-paste(group,1:3,sep = "-")
# Put the data into a DGEList object
library(edgeR)
genelist<-rownames(MOCK_IN)
y<-DGEList(counts=MOCK_IN,genes=genelist)
# Filtering
countsPerMillion <- cpm(y)
countCheck <- countsPerMillion > 1
keep <- which(rowSums(countCheck) > 1)
y <- y[keep, ]
# Normalization
y <- calcNormFactors(y, method="TMM")
y$samples$group <- group
design <- model.matrix(~0+group)
colnames(design) <- levels(group)
design
# exploring differences between libraries
# MDS plot
pch<-c(15,16)
colors<-rep(c("red","green"),3)
plotMDS(y,col=colors[group],pch=pch[group])
legend("top",legend=levels(group),pch=pch,col=colors)
# samples distance plot
library("RColorBrewer")
library("pheatmap")
sampleDists <- dist(t(MOCK_IN))
sampleDistMatrix <- as.matrix(sampleDists)
rownames(sampleDistMatrix) <- paste(colnames(MOCK_IN))
colnames(sampleDistMatrix) <- paste(colnames(MOCK_IN))
colors2 <- colorRampPalette( rev(brewer.pal(9, "Blues")) )(255)
sample_distance_plot<-
pheatmap(sampleDistMatrix,
clustering_distance_rows=sampleDists,
clustering_distance_cols=sampleDists,
col=colors2)
|
library(ggplot2)
library(solitude)
library(profvis)
# source('R/generics.R')
# source('R/iforest.R')
# source('R/itree.R')
# Test based on
# https://www.kaggle.com/norealityshows/outlier-detection-with-isolation-forest-in-r
profile_build_forest = function(data) {
iForest(data)
}
profile_predict_forest = function(iforest, data) {
predict(iforest, data)
}
#' First run on artificial dataset: 23660 ms
#' Optimization log:
#' - Made path_length non-generic: 21180 ms
#' - Optimized path_length_node to drop temporary q variable and
#' index data array with [[]] instead of dataframe: 17660 ms
#' - Use vapply instead of sapply in return of predict.iForest: 16140
#' - Change $ to [[]] in 'external' conditional: 17600
#' - Convert data frame to matrix: 8450
| /tests/profiler.R | no_license | pdjely/iforest | R | false | false | 781 | r | library(ggplot2)
library(solitude)
library(profvis)
# source('R/generics.R')
# source('R/iforest.R')
# source('R/itree.R')
# Test based on
# https://www.kaggle.com/norealityshows/outlier-detection-with-isolation-forest-in-r
profile_build_forest = function(data) {
iForest(data)
}
profile_predict_forest = function(iforest, data) {
predict(iforest, data)
}
#' First run on artificial dataset: 23660 ms
#' Optimization log:
#' - Made path_length non-generic: 21180 ms
#' - Optimized path_length_node to drop temporary q variable and
#' index data array with [[]] instead of dataframe: 17660 ms
#' - Use vapply instead of sapply in return of predict.iForest: 16140
#' - Change $ to [[]] in 'external' conditional: 17600
#' - Convert data frame to matrix: 8450
|
# -------- calculate waves
waves <- function(x,f=60,nc=2){
# angular frequency (radians/sec)
w=2*pi*f
# period (sec)
T= 1/f
# time sequence for a number n of cycles
t = seq(0,nc*T,0.01*T); nt = length(t)
# number of waves
nw <- length(x)
# matrix with all waves, arrays with amplitude and phase
y <- matrix(ncol=nw,nrow=nt); ym <- array(); ang <- array()
# for each wave rename mag and phase and calc to fill the matrix
for(i in 1:nw){
ym[i] <- x[[i]][1]; ang[i] <- x[[i]][2]
y[,i]=ym[i]*cos(w*t+ang[i]*pi/180)
}
yrms <- ym/sqrt(2)
return(list(w=w,t=t,nw=nw,ym=ym,ang=ang,y=y,yrms=yrms))
}
# ------- plot horizontal lines with labels for magnitude and rms
horiz.lab <- function(nw,ym,tmax,ymax,units,yrms,rms){
for(i in 1:nw){
abline(h=ym[i]+0.005*ymax,lty=i,col='gray')
text(0.1*tmax,ym[i]+0.02*ymax,paste(round(ym[i],1),units[i],sep=""),cex=0.7)
if(rms==TRUE){
abline(h=yrms[i]+0.005*ymax,lty=i,col='gray')
text(0.2*tmax,yrms[i]+0.02*ymax,paste("RMS=",round(yrms[i],0),units[i],sep=""),cex=0.7)
}
}
}
# -------------- write out waves for legend
wave.leg <- function(nw,ang,lab,ym,w,units){
wave <- array(); s.sym <- "+"
ym <- round(ym,1); w<- round(w,0);ang <- round(ang,1)
for(i in 1:nw){
if(ang[i]<0) s.sym <-"" else s.sym <- "+"
wave[i] <- as.expression(bquote(.(lab[i])*"="*.(ym[i])*"cos("*.(w)*"t"*.(s.sym)* .(ang[i])*degree*")"*.(units[i])))
}
legend('topright',legend=wave,lty=1:nw,col=1,bg='white',cex=0.7)
}
# --------plot AC cosine wave ---------------------
ac.plot <- function(v.t,v.lab="v(t)",v.units="V",y.lab="v(t)[V]",rms=FALSE){
# pad max with margin of 20%
ymax <- 1.2*max(v.t$y); tmax <- max(v.t$t)
# plot graph
matplot(v.t$t,v.t$y,type="l", ylim=c(-ymax,ymax), xlab="t[sec]",
ylab=y.lab,lty=1:v.t$nw,col=1,lwd=1.5)
abline(v=0,h=0,lty=1,col='gray')
horiz.lab(v.t$nw,v.t$ym,tmax,ymax,v.units,v.t$yrms,rms)
wave.leg(v.t$nw,v.t$ang,v.lab,v.t$ym,v.t$w,v.units)
}
# -------------- write out waves for legend
phas.leg <- function(np,mag,ang,lab,units,lty.p){
phas <-array()
mag <- round(mag,1);ang <- round(ang,1)
for(i in 1:np){
phas[i] <- as.expression(bquote(.(lab[i])*"="*.(mag[i])*.(units[i])*","*.(ang[i])*degree))
}
legend('topleft',legend=phas,lty=lty.p,col=1,bg='white',cex=0.7)
}
arc <- function(mag,ang){
rd <- mag
ar <- seq(ang[1],ang[2],0.01); ar <- ar*pi/180
nar <- length(ar); fnar <- round(0.9*nar,0)
lines(rd*cos(ar),rd*sin(ar),col=1,lwd=1)
arrows(x0=rd*cos(ar[fnar]),y0=rd*sin(ar[fnar]),
x1=rd*cos(ar[nar]),y1=rd*sin(ar[nar]),
col=1,lwd=1,lty=1,length=0.05)
}
rot.fig <- function(vp,v.lab="wt"){
# number of phasors
np <- length(vp)
# magnitude
mag <- array(); ang <- array()
for(i in 1:np){
mag[i] <- vp[[i]][1]; ang[i] <- vp[[i]][2]
}
angr <- ang*pi/180
# max of all magnitude and pad by 20%
xmax <- max(mag); x <- seq(-xmax,xmax,0.1)
# input conversion to rect
v.r <- list(); for(i in 1:np) v.r[[i]] <- recta(vp[[i]])
# plot no axis
plot(x,x,type="n",axes=FALSE,ann=FALSE)
abline(h=0,v=0,col='gray')
# draw circle
ar <- seq(0,2*pi,0.01)
lines(xmax*cos(ar),xmax*sin(ar),col=1,lwd=1)
# plot arrows
for(i in 1:np){
# plot phasor and label
arrows(x0=0,y0=0,x1=v.r[[i]][1],y1=v.r[[i]][2],length=0.1,lty=i,lwd=2)
#text(v.r[[i]][1]+0.05*xmax,v.r[[i]][2]+0.05*xmax,v.lab[i],cex=0.7)
arc(mag[i]/2,c(0,ang[i]))
text((mag[i]/2)*cos(angr[i])+0.2*xmax,(mag[i]/2)*sin(angr[i])-0.1*xmax,v.lab[i])
}
#phas.leg(np,mag,ang,v.lab,v.units)
}
sinplot <- function(xlab,ylab){
a <- seq(0,360,0.1); ar <- a*pi/180
y <- 1*sin(ar)
plot(a,y,type="l",ylab=ylab,xlab=xlab)
abline(h=0, col='gray')
}
gridcir <- function(rmax){
x <- rmax
# max of all magnitude and pad by 20%
xmax <- 1.2*x
xd <- pretty(c(0,xmax)); nd <- length(xd)
xs <- seq(-xd[nd],xd[nd],0.1)
xl <- 1.01*c(min(xs),max(xs))
# plot axis
plot(xs,xs,type="n",xlab="Re",ylab="Im",xlim=xl,ylim=xl,cex.axis=0.6)
abline(h=0,v=0,col='gray')
# circles and labels
rd <- xd; nr <- nd
ar <- seq(0,2*pi,0.01)
for(i in 1:nr){
lines(rd[i]*cos(ar),rd[i]*sin(ar),col='gray')
text(rd[i],-0.05*x,rd[i],cex=0.6, col='gray')
}
# radial lines and labels
ang <- seq(0,360,30);ng <- length(ang); angr <- ang*pi/180
for(i in 1:(ng-1)){
coord <- c(rd[nr]*cos(angr[i]),rd[nr]*sin(angr[i]))
lines(c(0,coord[1]),c(0,coord[2]),col='gray')
coordtxt <- coord+sign(coord)*0.05*x
text(coordtxt[1],coordtxt[2],as.expression(bquote(.(ang[i])*degree)),cex=0.6,col='gray')
}
}
# ---------plot phasors -----------------
phasor.plot <- function(v.p,v.lab="V",v.units="V",lty.p=NULL){
# number of phasors
np <- length(v.p)
if(is.null(lty.p)) lty.p <- c(1:np)
# magnitude and phase
mag <- array(); ang <- array()
for(i in 1:np) {mag[i] <- v.p[[i]][1];ang[i] <- v.p[[i]][2]}
# input conversion to rect
v.r <- list(); for(i in 1:np) v.r[[i]] <- recta(v.p[[i]])
xmax <- max(mag)
gridcir(xmax)
# plot arrows
for(i in 1:np){
# plot phasor and label
arrows(x0=0,y0=0,x1=v.r[[i]][1],y1=v.r[[i]][2],length=0.1,lty=lty.p[i],lwd=1.8)
xt <- v.r[[i]][1]; if(xt==0) xt <- 0.001
yt <- v.r[[i]][2]; if(yt==0) yt <- 0.001
text(xt+0.06*xmax*sign(xt),yt+0.06*xmax*sign(yt),v.lab[i],cex=0.7)
}
phas.leg(np,mag,ang,v.lab,v.units,lty.p)
}
# ------------ complex conversion
polar <- function(rec){
x <- rec[1]; y <- rec[2]
mag <- sqrt(x^2+y^2)
if(is.nan(y/x)) ang <- 0 else ang <- atan(y/x)*180/pi
return(round(c(mag,ang),3))
}
recta <- function(pol){
mag <- pol[1]; ang <- pol[2]
a <- mag * cos(ang*pi/180)
b <- mag * sin(ang*pi/180)
return(round(c(a,b),3))
}
mult.polar <- function(x1,x2){
z.m <- x1[1]*x2[1]; z.ang <- x1[2]+x2[2]
return(round(c(z.m,z.ang),3))
}
div.polar <- function(x1,x2){
z.m <- x1[1]/x2[1]; z.ang <- x1[2]-x2[2]
return(round(c(z.m,z.ang),3))
}
# ---------- admittance
admit <- function(Z.r){
Z.p <- polar(Z.r)
Y.p <- div.polar(c(1,0),Z.p)
Y.r <- recta(Y.p)
return(round(Y.r,3))
}
vector.phasor <- function(V,I){
nV <- length(V); VI <- list()
V.p <- matrix(c(Re(V),Im(V)),ncol=nV)
for(i in 1:nV) VI[[i]] <- polar(V.p[i,])
nI <- length(I)
I.p <- matrix(c(Re(I),Im(I)),ncol=nI)
for(i in 1:nI) VI[[i+nV]] <- polar(I.p[i,])
return(list(V.p=V.p,I.p=I.p,VI=VI))
}
| /R/ac-functions.R | no_license | cran/renpow | R | false | false | 6,431 | r | # -------- calculate waves
waves <- function(x,f=60,nc=2){
# angular frequency (radians/sec)
w=2*pi*f
# period (sec)
T= 1/f
# time sequence for a number n of cycles
t = seq(0,nc*T,0.01*T); nt = length(t)
# number of waves
nw <- length(x)
# matrix with all waves, arrays with amplitude and phase
y <- matrix(ncol=nw,nrow=nt); ym <- array(); ang <- array()
# for each wave rename mag and phase and calc to fill the matrix
for(i in 1:nw){
ym[i] <- x[[i]][1]; ang[i] <- x[[i]][2]
y[,i]=ym[i]*cos(w*t+ang[i]*pi/180)
}
yrms <- ym/sqrt(2)
return(list(w=w,t=t,nw=nw,ym=ym,ang=ang,y=y,yrms=yrms))
}
# ------- plot horizontal lines with labels for magnitude and rms
horiz.lab <- function(nw,ym,tmax,ymax,units,yrms,rms){
for(i in 1:nw){
abline(h=ym[i]+0.005*ymax,lty=i,col='gray')
text(0.1*tmax,ym[i]+0.02*ymax,paste(round(ym[i],1),units[i],sep=""),cex=0.7)
if(rms==TRUE){
abline(h=yrms[i]+0.005*ymax,lty=i,col='gray')
text(0.2*tmax,yrms[i]+0.02*ymax,paste("RMS=",round(yrms[i],0),units[i],sep=""),cex=0.7)
}
}
}
# -------------- write out waves for legend
wave.leg <- function(nw,ang,lab,ym,w,units){
wave <- array(); s.sym <- "+"
ym <- round(ym,1); w<- round(w,0);ang <- round(ang,1)
for(i in 1:nw){
if(ang[i]<0) s.sym <-"" else s.sym <- "+"
wave[i] <- as.expression(bquote(.(lab[i])*"="*.(ym[i])*"cos("*.(w)*"t"*.(s.sym)* .(ang[i])*degree*")"*.(units[i])))
}
legend('topright',legend=wave,lty=1:nw,col=1,bg='white',cex=0.7)
}
# --------plot AC cosine wave ---------------------
ac.plot <- function(v.t,v.lab="v(t)",v.units="V",y.lab="v(t)[V]",rms=FALSE){
# pad max with margin of 20%
ymax <- 1.2*max(v.t$y); tmax <- max(v.t$t)
# plot graph
matplot(v.t$t,v.t$y,type="l", ylim=c(-ymax,ymax), xlab="t[sec]",
ylab=y.lab,lty=1:v.t$nw,col=1,lwd=1.5)
abline(v=0,h=0,lty=1,col='gray')
horiz.lab(v.t$nw,v.t$ym,tmax,ymax,v.units,v.t$yrms,rms)
wave.leg(v.t$nw,v.t$ang,v.lab,v.t$ym,v.t$w,v.units)
}
# -------------- write out waves for legend
phas.leg <- function(np,mag,ang,lab,units,lty.p){
phas <-array()
mag <- round(mag,1);ang <- round(ang,1)
for(i in 1:np){
phas[i] <- as.expression(bquote(.(lab[i])*"="*.(mag[i])*.(units[i])*","*.(ang[i])*degree))
}
legend('topleft',legend=phas,lty=lty.p,col=1,bg='white',cex=0.7)
}
arc <- function(mag,ang){
rd <- mag
ar <- seq(ang[1],ang[2],0.01); ar <- ar*pi/180
nar <- length(ar); fnar <- round(0.9*nar,0)
lines(rd*cos(ar),rd*sin(ar),col=1,lwd=1)
arrows(x0=rd*cos(ar[fnar]),y0=rd*sin(ar[fnar]),
x1=rd*cos(ar[nar]),y1=rd*sin(ar[nar]),
col=1,lwd=1,lty=1,length=0.05)
}
rot.fig <- function(vp,v.lab="wt"){
# number of phasors
np <- length(vp)
# magnitude
mag <- array(); ang <- array()
for(i in 1:np){
mag[i] <- vp[[i]][1]; ang[i] <- vp[[i]][2]
}
angr <- ang*pi/180
# max of all magnitude and pad by 20%
xmax <- max(mag); x <- seq(-xmax,xmax,0.1)
# input conversion to rect
v.r <- list(); for(i in 1:np) v.r[[i]] <- recta(vp[[i]])
# plot no axis
plot(x,x,type="n",axes=FALSE,ann=FALSE)
abline(h=0,v=0,col='gray')
# draw circle
ar <- seq(0,2*pi,0.01)
lines(xmax*cos(ar),xmax*sin(ar),col=1,lwd=1)
# plot arrows
for(i in 1:np){
# plot phasor and label
arrows(x0=0,y0=0,x1=v.r[[i]][1],y1=v.r[[i]][2],length=0.1,lty=i,lwd=2)
#text(v.r[[i]][1]+0.05*xmax,v.r[[i]][2]+0.05*xmax,v.lab[i],cex=0.7)
arc(mag[i]/2,c(0,ang[i]))
text((mag[i]/2)*cos(angr[i])+0.2*xmax,(mag[i]/2)*sin(angr[i])-0.1*xmax,v.lab[i])
}
#phas.leg(np,mag,ang,v.lab,v.units)
}
sinplot <- function(xlab,ylab){
a <- seq(0,360,0.1); ar <- a*pi/180
y <- 1*sin(ar)
plot(a,y,type="l",ylab=ylab,xlab=xlab)
abline(h=0, col='gray')
}
gridcir <- function(rmax){
x <- rmax
# max of all magnitude and pad by 20%
xmax <- 1.2*x
xd <- pretty(c(0,xmax)); nd <- length(xd)
xs <- seq(-xd[nd],xd[nd],0.1)
xl <- 1.01*c(min(xs),max(xs))
# plot axis
plot(xs,xs,type="n",xlab="Re",ylab="Im",xlim=xl,ylim=xl,cex.axis=0.6)
abline(h=0,v=0,col='gray')
# circles and labels
rd <- xd; nr <- nd
ar <- seq(0,2*pi,0.01)
for(i in 1:nr){
lines(rd[i]*cos(ar),rd[i]*sin(ar),col='gray')
text(rd[i],-0.05*x,rd[i],cex=0.6, col='gray')
}
# radial lines and labels
ang <- seq(0,360,30);ng <- length(ang); angr <- ang*pi/180
for(i in 1:(ng-1)){
coord <- c(rd[nr]*cos(angr[i]),rd[nr]*sin(angr[i]))
lines(c(0,coord[1]),c(0,coord[2]),col='gray')
coordtxt <- coord+sign(coord)*0.05*x
text(coordtxt[1],coordtxt[2],as.expression(bquote(.(ang[i])*degree)),cex=0.6,col='gray')
}
}
# ---------plot phasors -----------------
phasor.plot <- function(v.p,v.lab="V",v.units="V",lty.p=NULL){
# number of phasors
np <- length(v.p)
if(is.null(lty.p)) lty.p <- c(1:np)
# magnitude and phase
mag <- array(); ang <- array()
for(i in 1:np) {mag[i] <- v.p[[i]][1];ang[i] <- v.p[[i]][2]}
# input conversion to rect
v.r <- list(); for(i in 1:np) v.r[[i]] <- recta(v.p[[i]])
xmax <- max(mag)
gridcir(xmax)
# plot arrows
for(i in 1:np){
# plot phasor and label
arrows(x0=0,y0=0,x1=v.r[[i]][1],y1=v.r[[i]][2],length=0.1,lty=lty.p[i],lwd=1.8)
xt <- v.r[[i]][1]; if(xt==0) xt <- 0.001
yt <- v.r[[i]][2]; if(yt==0) yt <- 0.001
text(xt+0.06*xmax*sign(xt),yt+0.06*xmax*sign(yt),v.lab[i],cex=0.7)
}
phas.leg(np,mag,ang,v.lab,v.units,lty.p)
}
# ------------ complex conversion
polar <- function(rec){
x <- rec[1]; y <- rec[2]
mag <- sqrt(x^2+y^2)
if(is.nan(y/x)) ang <- 0 else ang <- atan(y/x)*180/pi
return(round(c(mag,ang),3))
}
recta <- function(pol){
mag <- pol[1]; ang <- pol[2]
a <- mag * cos(ang*pi/180)
b <- mag * sin(ang*pi/180)
return(round(c(a,b),3))
}
mult.polar <- function(x1,x2){
z.m <- x1[1]*x2[1]; z.ang <- x1[2]+x2[2]
return(round(c(z.m,z.ang),3))
}
div.polar <- function(x1,x2){
z.m <- x1[1]/x2[1]; z.ang <- x1[2]-x2[2]
return(round(c(z.m,z.ang),3))
}
# ---------- admittance
admit <- function(Z.r){
Z.p <- polar(Z.r)
Y.p <- div.polar(c(1,0),Z.p)
Y.r <- recta(Y.p)
return(round(Y.r,3))
}
vector.phasor <- function(V,I){
nV <- length(V); VI <- list()
V.p <- matrix(c(Re(V),Im(V)),ncol=nV)
for(i in 1:nV) VI[[i]] <- polar(V.p[i,])
nI <- length(I)
I.p <- matrix(c(Re(I),Im(I)),ncol=nI)
for(i in 1:nI) VI[[i+nV]] <- polar(I.p[i,])
return(list(V.p=V.p,I.p=I.p,VI=VI))
}
|
tic()
results <- rep(NA, nrow(cv_df))
for (i in 1:nrow(cv_df)) {
results[i] <- cv_fun(i)
}
toc() | /04_for_loop_pre_allocations.R | no_license | MJWeldy/R_GLM_speed_test | R | false | false | 103 | r | tic()
results <- rep(NA, nrow(cv_df))
for (i in 1:nrow(cv_df)) {
results[i] <- cv_fun(i)
}
toc() |
\name{InfoSaniR_extract}
\alias{InfoSaniR_extract}
\title{Extraction de tous les fichiers InfoSani annuel}
\description{Extrait automatiquement toutes les donnees brutes du fichier InfoSani dans toutes les structures sanitaires d'une province pour une annnee. Cette fonction neccessite tous les fichiers xlsx InfoSani annuel.}
\usage{
InfoSaniR_extract(health = 'patho', Year=2000)
}
\arguments{
\item{health}{Donnee InfoSani brute. Soit pathologie (patho), sante maternite et infantile (accouch1, accouch2 ou malnu), vaccination (vac) ou toutes les donnees (all).}
\item{Year}{ Annee de recueil des donnees InfoSani. Annee doit etre numerique et compris entre 2000 et 2099.}
}
\details{Vous devez specifier correctement le dossier contenant des fichiers xlsx et le dossier ou les donnees extraites seront exportees.}
\value{
La fonction retourne automatiquement des donnees extraites dans le dossier specifie pour exportation sur les pathologies, sante maternite infantile et vaccination:
\item{data_health_year.xlsx }{Donnees extraites sous format excel (xlsx)}
\item{data_health_year.RData }{Donnees extraites sous format image (RData) lisible par R}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
Regis Obiang <regis.obiang@lambarene.org>,
Bertrand Lell <bertrand.lell@gmail.com>
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
##Convertir prealablement les extensions xls en xlsx
##les fichiers InfoSani excel
##Extraire automatiquement les donnees de pathologies en 2012
##de toutes les structures de sante
InfoSaniR_extract(health = 'patho', Year=2012)
##Extraire automatiquement les donnees sante de maternite infantile en 2012
##de toutes les structures de sante
InfoSaniR_extract(health = 'accouch1', Year=2012)
InfoSaniR_extract(health = 'accouch2', Year=2012)
InfoSaniR_extract(health = 'malnu', Year=2012)
##Extraire automatiquement les donnees sante de vaccination en 2012
##de toutes les structures de sante
InfoSaniR_extract(health = 'vac', Year=2012)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
| /man/InfoSaniR_extract.Rd | no_license | RegisOB/GabSaniR | R | false | false | 2,308 | rd | \name{InfoSaniR_extract}
\alias{InfoSaniR_extract}
\title{Extraction de tous les fichiers InfoSani annuel}
\description{Extrait automatiquement toutes les donnees brutes du fichier InfoSani dans toutes les structures sanitaires d'une province pour une annnee. Cette fonction neccessite tous les fichiers xlsx InfoSani annuel.}
\usage{
InfoSaniR_extract(health = 'patho', Year=2000)
}
\arguments{
\item{health}{Donnee InfoSani brute. Soit pathologie (patho), sante maternite et infantile (accouch1, accouch2 ou malnu), vaccination (vac) ou toutes les donnees (all).}
\item{Year}{ Annee de recueil des donnees InfoSani. Annee doit etre numerique et compris entre 2000 et 2099.}
}
\details{Vous devez specifier correctement le dossier contenant des fichiers xlsx et le dossier ou les donnees extraites seront exportees.}
\value{
La fonction retourne automatiquement des donnees extraites dans le dossier specifie pour exportation sur les pathologies, sante maternite infantile et vaccination:
\item{data_health_year.xlsx }{Donnees extraites sous format excel (xlsx)}
\item{data_health_year.RData }{Donnees extraites sous format image (RData) lisible par R}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
Regis Obiang <regis.obiang@lambarene.org>,
Bertrand Lell <bertrand.lell@gmail.com>
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
##Convertir prealablement les extensions xls en xlsx
##les fichiers InfoSani excel
##Extraire automatiquement les donnees de pathologies en 2012
##de toutes les structures de sante
InfoSaniR_extract(health = 'patho', Year=2012)
##Extraire automatiquement les donnees sante de maternite infantile en 2012
##de toutes les structures de sante
InfoSaniR_extract(health = 'accouch1', Year=2012)
InfoSaniR_extract(health = 'accouch2', Year=2012)
InfoSaniR_extract(health = 'malnu', Year=2012)
##Extraire automatiquement les donnees sante de vaccination en 2012
##de toutes les structures de sante
InfoSaniR_extract(health = 'vac', Year=2012)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
################################################################################
# PLOT.MANYLM, PLOT.MANYGLM: #
# Plot for evaluation of goodness of fit for lm.mvabund objects #
################################################################################
default.plot.manyglm <- function(x, which = 1, res.type="pit.norm", caption = c("Residuals vs Fitted","Normal Q-Q", "Scale-Location", "Cook's distance"), overlay=TRUE, n.vars=Inf, var.subset=NULL, panel = if (add.smooth) panel.smooth else points, sub.caption = NULL, main = "", ask, ..., id.n = if (overlay) 0 else 3, labels.id=rownames(x$Pearson.residuals), cex.id = 0.75, qqline = TRUE, add.smooth = if(!is.null(getOption("add.smooth"))){ getOption("add.smooth") } else TRUE, label.pos = c(4, 2), cex.caption=1.5, asp = 1, legend.pos= "nextplot", mfrow= if(overlay) {length(which)+(legend.pos=="nextplot")} else if(write.plot=="show") c(min(n.vars,3),length(which)) else length(which), mfcol=NULL, write.plot="show", filename="plot.mvabund", keep.window= if(is.null(c(mfrow,mfcol))) TRUE else FALSE, legend=FALSE)
{
allargs <- match.call(expand.dots = FALSE)
dots <- allargs$...
if ("cex" %in% names(dots)) cex <- dots$cex
else cex <- 1.5
if ("cex.lab" %in% names(dots)) clab <- dots$cex.lab
else clab <- 1.5
if ("col.lab" %in% names(dots)) colab <- dots$col.lab
else colab <- par("col.lab")
if ("lwd" %in% names(dots)) lwd <- dots$lwd
else lwd <- 2
if ("cex.axis" %in% names(dots)) caxis <- dots$cex.axis
else caxis <- 1.5
dev <- dev.list()
dev.name <- getOption("device")
if (write.plot!="show") {
if (write.plot=="eps" | write.plot=="postscript")
postscript(paste(filename,".eps", sep="") )
else if (write.plot=="pdf")
pdf(paste(filename,".pdf", sep="") )
else if (write.plot=="jpeg" )
jpeg(paste(filename,".jpeg", sep=""))
else if (write.plot=="bmp" )
bmp(paste(filename,".bmp", sep=""))
else if (write.plot=="png" )
png(paste(filename,".png", sep=""))
on.exit( dev.off() )
}
if (length(dots)>0)
{
# in the plot function.
deactive <- c("xlab", "ylab", "ylim", "sub", "type")
deactivate <- (1:length(dots))[names(dots) %in% deactive ]
for (i in length(deactivate):1)
dots[ deactivate[i] ]<-NULL #fixed up [[]], due to compile error (v2.10).
dots <- lapply( dots, eval, parent.frame() )
if( "col.main" %in% names(dots) ) colmain <- dots$col.main
else colmain <- par("col.main")
}
else colmain <- par("col.main")
if (!inherits(x, c("manylm", "manyglm"))) warning("use 'plot.manylm' only with \"manylm\" or \"manyglm\" objects")
if (!is.numeric(which) || any(which < 1) || any(which > 4))
stop("'which' must be in 1:4")
isGlm <- inherits(x, "manyglm")
show <- rep.int(FALSE, times = 4)
show[which] <- TRUE
if(ncol(x$x) > 0 ) empty <- FALSE
else empty <- TRUE
# if(empty && show[4]) {
if(show[4])
{
if (length(which)==1) stop("Plot no. 4 cannot be drawn, as Cooks distance cannot be calculated for an empty model")
else
{
warning("Plot no. 4 cannot be drawn, as Cooks distance cannot be calculated for an empty model")
show[4] <- FALSE
which <- which[-which[which==4]]
}
}
if (substr(res.type,1,3)=="pit")
{
r <- as.matrix(x$PIT.residuals)
if (res.type=="pit.norm") r <- residuals(x)
}
else r <- as.matrix(x$Pearson.residuals) # residuals(x)
yh <- as.matrix(x$linear.predictor)
# yh <- as.matrix(x$fitted.values)
# w <- x$sqrt.weight * x$sqrt.weight
w <- NULL
# Change logical var.subset to numerical var.subset, if necessary. Note that NA values are logical as well, but should be excluded here.
if(!is.null(var.subset) & !is.numeric(var.subset))
var.subset <- which(var.subset[!is.na(var.subset)])
# miss.varsubset<-!is.numeric(var.subset) # If this function is called within another, the missing function could be tricked out.
if(is.null(labels.id)) labels.id <- as.character(1:nrow(r))
if (!is.null(w)) {
wind <- w != 0
if (isGlm & is.matrix(w)){
wind <- rowSums( wind) != 0
w <- w[wind,, drop=FALSE]
} else w <- w[wind]
r <- r[wind,, drop=FALSE]
yh <- yh[wind,, drop=FALSE]
labels.id <- labels.id[wind]
}
n <- nrow(r)
p <- ncol(r)
######## BEGIN edit var.subset, n.vars and r & fitted values #########
# subset allows double variables
# Do some dimension checks for the subset.
if (missing(var.subset) | is.null(var.subset) | !is.numeric(var.subset)) {
# Plot the n.var variables with highest abundances
if ( p < n.vars ) {
# warning("You have passed an invalid number of variables 'n.vars' to be included in the plot. All variables will be included instead.")
n.vars <- p
}
y <- as.matrix(x$y)
if (!is.null(w))
sum.y <- t(y[wind,,drop=FALSE]) %*% matrix(1,ncol=1,nrow=n)
else sum.y <- t(y) %*% matrix(1,ncol=1,nrow=n)
# Find abundance ranks OF MVABUND.OBJECT.1.
var.subset <- order(sum.y, decreasing = TRUE)
typeofvarsubset <- " \n(the variables with highest total abundance)"
}
else { # if var.subset is specified
if ( p < max(var.subset) )
stop ("You have passed an invalid var.subset")
var.subset.dim <- length(var.subset)
if ( missing(n.vars) | n.vars != var.subset.dim ) {
n.vars <- var.subset.dim
warning("Number of variables 'n.var' is set to the length of 'var.subset'.")
}
typeofvarsubset <- " (user selected)"
}
############# Extract relevant data ###################
r <- r[,var.subset, drop=FALSE]
yh <- yh[,var.subset, drop=FALSE]
w <- w[,var.subset, drop=FALSE]
######### END edit var.subset, n.vars and r & fitted values ###########
var.names <- colnames(r)
if(is.null(var.names)) var.names <- as.character(1:n.vars)
### SET COLORS AND GET SOME GRAPHICS PARAMETERS
# Upon exiting the function, reset all graphical parameters to its value
# at the beginning.
if(!is.null(mfcol)) mfrow <- mfcol
# Get all the graphical parameters.
opp <- par("col.main","mfrow","mfcol","oma")
if( "col" %in% names(dots) )
{
col <- dots$col[var.subset]
dots$col=NULL
}
else
col = rainbow(n.vars+1)[2:(n.vars+1)]
if (write.plot=="show")
on.exit( par(opp), add=TRUE )
################# BEGIN get window dimensions #########################
if (length(mfrow)==1){
# i.e. mfrow is an integer either the default or a passed value,
# ie calc nrows & ncols
if ((overlay & write.plot=="show" & mfrow <5) | (mfrow <4)) {
if(write.plot=="show" & is.null(dev)) {
if (mfrow==1) {
height <- 14
width <- 12
} else {
width <- 10 #MODDED BY SW
height <- 8
}
dev.off()
do.call(dev.name, args=list(height=height,width=width))
}
par(mfrow=c(1, mfrow))
row <- 1
columns <- mfrow
} else {
columns <- ceiling(sqrt(mfrow))
row <- columns-1
if (row*columns<mfrow) row <- columns
if(write.plot=="show" & is.null(dev)) {
if (columns > row){
width <- 9.2
height <- max(row*width/columns * 1.2,5)
} else {
height <- 11
width <- max(height*columns/row * 0.83,4)
}
dev.off()
do.call(dev.name, args=list(height=height,width=width))
}
par(mfrow=c(row, columns))
}
pw <- row* columns - mfrow
nonwindow <- FALSE
} else { # if length(mfrow)==1)
if(!is.null(c(mfrow, mfcol))){
row <- mfrow[1]
columns <- mfrow[2]
nonwindow <- FALSE
} else {
nonwindow <- TRUE
row <- opp$mfrow[1]
columns <- opp$mfrow[2]
}
if(write.plot=="show" & is.null(dev)) {
if (columns > row){
width <- 16
height <- max(row*width/columns*1.2,5)
} else {
height <- 11
width <- max(height*columns/row*0.83,4)
}
#MODDED by SW - Add feature for single plot for non-overlay
if (length(which)==1){
width <-8
height <- 10
mfrow <- c(1,1)
}
dev.off()
do.call(dev.name, args=list(height=height,width=width))
}
if (any(mfrow!=par("mfrow"))) par(mfrow=mfrow)
if (!is.null(c(mfrow, mfcol))) mfrow <- row* columns
pw <- 0
}
if (!is.null(mfcol)) par(mfcol=c(row,columns))
else if (!is.null(mfrow)) par(mfrow=c(row,columns))
if (length(which)==1){
t <- ceiling(min(n.vars,12)/3)
# par(mfrow=c(t,3))
par(mfrow=c(1,1))
}
one.fig <- prod(par("mfrow")) == 1 # ie if mfrow=NULL
if (is.null(sub.caption) ) {
# construct the sub.caption
fktName <- "manyglm"
terms <- deparse(x$terms, width.cutoff = 70)[1]
nc <- nchar(terms)
if (length(x$terms)>1 | nc>60)
terms <- paste(substr(terms,1,min(60, nc)), "...")
sub.caption <- paste(fktName, "(", terms,")", sep="")
}
if(!is.null(sub.caption) && !one.fig) {
oma <- par("oma")
if (oma[3] < 2 & (is.null(dev) | !is.null(mfrow) | !is.null(mfcol))) {
oma[3]<- 5
par(oma = oma)
}
}
dr <- par("oma")[3]!=0
# Ensure that mfrow = NULL for the last command.
if (is.null(mfrow)) {mfrow <- row* columns}
if(all(opp$mfrow == c(row,columns))) opp$mfrow <- opp$mfcol <- NULL
if(keep.window & write.plot=="show") opp$mfrow <- opp$mfcol <- opp$oma <- NULL
##################### END get window dimensions ##################
####################### BEGIN selection of colors ##################
lcols <- length(col)
if (lcols==p & lcols != n.vars) {
# Adjust the colors to the subset
col <- col[var.subset]
lcols <- n.vars
} else if (lcols>n.vars) {
col <- col[var.subset]
lcols <- n.vars
warning("Only the first ", n.vars, " colors will be used for plotting.")
} else if (lcols>1 & lcols<n.vars) {
col <- col[1]
lcols <- 1
warning("The vector of colors has inappropriate length.
Only the first color will be used")
}
color <- col
if (lcols == 1) color <- rep(color, times = n.vars)
####################### END selection of colors #################
if (any(show[2:3])) {
if (df.residual(x)==0)
stop("Plot(s) ", c(2,3)[show[2:3]], " cannot be drawn: standardized residuals cannot be calculated, as there are no degrees of freedom")
else {
#############################
## Q: why weighed residuals?
############################
rs <- if (is.null(w)) { r } # weighed residuals
else sqrt(w) * r
if (substr(res.type,1,3)=="pit") {
if (res.type=="pit.norm") ylab23<-"Dunn-Smyth Residuals"
else ylab23 <- "PIT Residuals."
}
else ylab23 <- "Standard Pearson residuals."
rs[is.infinite(rs)] <- NaN
}
}
if (any(show[c(1, 3)]))
# l.fit <- "Fitted values"
l.fit <- "Linear predictor value"
if (is.null(id.n)) id.n <- 0
else {
id.n <- as.integer(id.n)
if (id.n < 0 || id.n > n)
stop(gettextf("'id.n' must be in {1,..,%d}", n), domain = NA)
if (id.n > 0) {
if (is.null(labels.id)) labels.id <- paste(1:n)
iid <- 1:id.n
# Obtain vector of positions of the abs. highest values, use with a vector.
if (overlay) {
show.r <- matrix(ncol=n.vars, nrow=id.n)
for (i in 1:n.vars) show.r[,i] <- (order(abs(r[,i]),decreasing=TRUE))[iid] +(i-1)*n
show.r <- c(show.r)
} else {
show.r <- matrix(ncol=n.vars, nrow=n)
for (i in 1:n.vars) show.r[,i] <- (order(abs(r[,i]), decreasing = TRUE))
show.r <- show.r[iid,]
}
if (any(show[2:3])) {
if (overlay) {
show.rs <- matrix(ncol=n.vars, nrow=id.n)
for (i in 1:n.vars) show.rs[,i] <-(order(abs(rs[,i]), decreasing = TRUE))[iid] +(i-1)*n
show.rs <- c(show.rs)
} else {
show.rs <- matrix(ncol=n.vars, nrow=n)
for (i in 1:n.vars) show.rs[,i] <- (order(abs(rs[,i]), decreasing = TRUE))
show.rs <- show.rs[iid,]
}
}
##### what on earth is THIS? #####
text.id <- function(x, y, labels, adj.x = TRUE, col="black") {
# function to write a text at a plot at the position labpos
labpos <- if (adj.x)
label.pos[1 + as.numeric(x > mean(range(x)))] else 3
text(x, y, labels, cex = cex.id, xpd = TRUE,pos = labpos, offset = 0.25, col=col)
}
}
}
#######THIS IS SECTION IS FOR OVERLAY=TRUE #########
# this creates only one plot per diagnostics #
####################################################
if (overlay | n.vars==1) {
# plot all variables together
if (missing(ask))
ask <- ( dev.interactive() & ((prod(mfrow) < length(which)) | (nonwindow & !is.null(dev)) ) )
if (ask) {
op <- par(ask = TRUE) # if TRUE, this should be preserved
on.exit(par(op), add=TRUE )
}
if (substr(legend.pos, 1,1)!="none") { # add a legend
ncoll<- ceiling(n.vars/(50/(row+0.5*row^2)))
cexl<- 1.5 #0.9
if (ncoll>3) {
ncoll<-3
cexl <- 0.6
}
leg <- substr(var.names, 1,(8/ncoll)+1)
}
#SW - Reset mfrow to be approprite for one plot, set legend position
if (length(which)==1) {
# dev.off()
if (legend == TRUE) {
# dev.new(height=6, width=8) # added for smaller window size
par(mfrow = c(1,1), oma=c(.5,.5,.5,4.5), mar=c(6, 4.5, 2, 5))
legend.pos="right"
}
else {
# dev.new(height=6, width=6) # added for smaller window size
par(mfrow = c(1,1), oma=c(.5,.5,.5,.5), mar=c(6,4.5,2,.5))
}
}
else if (length(which)==2) {
# dev.off()
# dev.new(height=6, width=12)
par(mfrow=c(1,2),oma=c(0.5,0.5,1,10), mar=c(4, 4.5, 2, 2))
}
else if (length(which)==3) {
# dev.new(height=12, widht=12)
par(mfrow=c(2,2),oma=c(2,2,2,2), mar=c(4, 4, 3, 3))
}
# The residual vs. fitted value plot
yhtmp <- c(yh)
yh.is.zero <- yhtmp < (-6)
# yh.is.zero <- yhtmp < max(-6,(-max(yhtmp)))#this line is wrong - it kicks out any value more negative than max(yh)
yh0 <- yhtmp[!yh.is.zero]
xlim <- range(yh0)
if (id.n > 0) # for compatibility with R2.2.1
ylim <- ylim + c(-0.08, 0.08) * diff(ylim)
# drop small values in the response
if (show[1]) {
# Use vector built of transposed x bzw y to plot in the right color
# yi.is.zero <- (yh[,1]<(-9)) # log(1e-4)
# plot(x=t(yh[!yi.is.zero,1]), y=t(r[!yi.is.zero,1]),type="p",col=palette()[1], ylab = "Pearson residuals", xlab=l.fit, main = main, ylim=ylim, xlim=xlim, cex.lab=clab, cex.axis=caxis, cex=cex, lwd=lwd, font.main=2)
# for (i in 2:n.vars) {
# yi.is.zero <- (yh[,i] < (-9)) # log(1e-4)
# points(x=t(yh[!yi.is.zero,i]), y=t(r[!yi.is.zero,i]),type="p",col=palette()[i], cex=cex, lwd=lwd)
# }
rtmp <- c(r)
r0 <- rtmp[!yh.is.zero]
# ylim <- range(r0, na.rm = TRUE)
ylim <- range(max(abs(r0))*c(-1,1), na.rm = TRUE) #DW, 10/02/15: to make ylims symmetric about zero
colortmp <- rep(color, each=n)
color0 <- colortmp[!yh.is.zero]
if (substr(res.type,1,3)=="pit") {
if (res.type=="pit.norm") ylab="Dunn-Smyth Residuals"
else ylab="PIT Residuals"
}
else ylab="Pearson residuals"
plot(yh0, r0, xlab = l.fit, ylab = ylab, main=main, ylim=ylim, xlim=xlim, font.main=2, col=color0, cex.lab=clab, cex.axis=caxis, cex=cex, lwd=lwd)
# Add sub.caption, e.g, manyglm(tasm.cop ~ treatment)
if (one.fig)
title(sub = sub.caption, cex.sub=cex.caption-0.1, line=4.5, font.sub=2)
# Add the title Residual vs Fitted
mtext(caption[1], 3, 0.25, col=colmain, cex=cex.caption, font=2)
if (id.n > 0) { # add id.n labels
y.id <- (c(r))[show.r]
y.id[y.id < 0] <- y.id[y.id < 0] - strheight(" ")/3
text.id( (c(yh))[show.r], y.id, (rep(labels.id, times=n.vars))[show.r], col=rep(col, each=id.n))
}
if (res.type=="pit.uniform") hmark=0.5 else hmark=0
abline(h = hmark, lty = 2, col = "black", lwd=2)
if(legend==TRUE & substr(legend.pos, 1,1)[1]!="n"){
# add a legend
legend(legend.pos, legend=leg, col=color, pch=1, ncol=ncoll, cex=cexl-0.1,inset=-0.35,xpd=NA, lwd=2, lty=0, x.intersp=0.5)
}
# mtext("(a)", side = 3, cex = 2, at=-1.8, line=0.3)
}
# The normal QQ plot
if (show[2]) {
# rs.is.zero <- rs < (1e-9) #DW, 23/10/14: this seems to be an error - why would r near zero be a problem?
rstmp <- c(rs)
ylim <- range(max(abs(rstmp))*c(-1,1), na.rm = TRUE) #DW, 23/10/14: to make ylims symmetric about zero
# ylim[2] <- ylim[2] + diff(ylim) * 0.075 #DW, 23/10/14: I don't see any point for this line
qq <- do.call( "qqnorm", c(list(rstmp, main = main, ylab = ylab23, ylim=ylim, col=color, asp=1, cex.lab=1.5, cex=1.5, cex.axis=1.5, cex.main=1.5, lwd=2), dots))
if (qqline) qqline(rstmp, lty = 3, col = "gray50", lwd=2)
# Use vector built of transposed x bzw y in order to plot
# in the right colors.
if (one.fig) do.call( "title", c(list(sub = sub.caption), dots))
mtext(caption[2], 3, 0.25, col=colmain, cex=cex.caption) # the title
if (id.n > 0) # add id.n labels
text.id(qq$x[show.rs], qq$y[show.rs], (rep(labels.id,
each=n.vars))[show.rs], col=rep(col, each=id.n))
if(legend == TRUE & substr(legend.pos, 1,1)[1]!="n"){
# add a legend
legend(legend.pos, legend=leg, col=color, pch=1, ncol=ncoll, cex=cexl-0.1,inset=-0.35,xpd=NA, x.intersp=0.5, lwd=2, lty=0)
}
}
# The scale vs. location plot
if (show[3]) {
sqrtabsr <- c(sqrt(abs(rs)))
sqrtabsr0 <- sqrtabsr[!yh.is.zero]
ylim <- c(0, max(sqrtabsr0, na.rm = TRUE))
yl <- as.expression(substitute(sqrt(abs(YL)),list(YL = as.name(ylab23))))
# yi.is.zero <- (yh[,1]<(-9))
# plot(t(yh[!yi.is.zero,1]), t(sqrtabsr[!yi.is.zero,1]),type="p",col=palette()[1], ylab = yl, xlab=l.fit, main = main, ylim=ylim, xlim=xlim, cex=1.5, cex.lab=1.5, cex.axis=1.5)
# for (i in 2:n.vars) {
# yi.is.zero <- (yh[,i] < (-9))
# points(t(yh[!yi.is.zero,i]), t(sqrtabsr[!yi.is.zero,i]),type="p",col=palette()[i], cex=1.5, lwd=2)
# }
plot(yh0, sqrtabsr0, xlab = l.fit, ylab=yl,
main = main, ylim = ylim, xlim=xlim, type = "n")
panel(yh0, sqrtabsr0, col=color, cex=cex, cex.lab=clab, cex.axis=caxis, lwd=lwd)
if (one.fig)
do.call( "title", c(list(sub = sub.caption), dots))
mtext(caption[3], 3, 0.25, col=colmain, cex=cex.caption)
if (id.n > 0)
text.id(yh[show.rs], sqrtabsr[show.rs], (rep(labels.id,
each=n.vars))[show.rs], col=rep(col, each=id.n))
# ncoll <- ceiling(n.vars/6)
if(legend==TRUE & substr(legend.pos, 1,1)[1]!="n") # add a legend
legend(legend.pos, legend=leg, col=color, pch=1, ncol=ncoll, cex=cexl-0.1,inset=-0.35,xpd=NA, x.intersp=0.5)
}
# The cook distance plot
if (show[4]) {
# stop("cook's distance for glm is not implemented.")
# ymx <- max(cook, na.rm = TRUE) # error here, what is cook?
# if (id.n > 0) {
# show.r <- matrix(ncol=n.vars, nrow=id.n)
# for (i in 1:n.vars)
# show.r[,i] <- (order(-cook[,i]))[iid] +(i-1)*n
# show.r <- c(show.r)
# ymx <- ymx * 1.075
# }
# obsno <- rep(1:n, each = n.vars) + rep.int((1:n.vars)/(2*n.vars),times =n)
# # Use vector built of transposed x bzw y in order to plot in the right colors.
# do.call( "plot", c(list(obsno, c(t(cook)), xlab = "Obs. number", ylab = "Cook's distance", main = main, ylim = c(0, ymx), type = "h", col=color), dots))
#
# if (one.fig) do.call( "title", c(list(sub = sub.caption), dots))
# mtext(caption[4], 3, 0.25, col=colmain, cex=cex.caption)
#
# if (id.n > 0) {
# txtxshow <- show.r + rep((1:n.vars)/(2*n.vars), each =id.n)-rep((0:(n.vars-1))*n, each =id.n)
# text.id(txtxshow, (c(cook))[show.r], (rep(labels.id,times=n.vars))[show.r], adj.x = FALSE, col=rep(col, each=id.n))
# }
#
# if(legend==TRUE & substr(legend.pos, 1,1)[1]!="n") # add a legend
# legend(legend.pos, legend=leg, col=color, pch=1, ncol=ncoll, cex=cexl,inset=-0.15,xpd=NA)
}
if (legend==TRUE & legend.pos=="nextplot" ) legend("right", legend=leg, col=color, pch=1, ncol=ncoll, cex=cexl-0.1,inset=-0.5,xpd=NA)
# add a subcaption
if (!one.fig && !is.null(sub.caption) && dr)
mtext(sub.caption, outer = TRUE, cex = 1.1*par("cex.main"),col= par("col.main") )
if(n.vars < p) {
if (missing(var.subset) | is.null(var.subset) | !is.numeric(var.subset))
message("Only the variables ",paste(colnames(r), collapse = ", "), " were included in the plot", typeofvarsubset, ".")
}
return(invisible())
#######THIS IS SECTION IS FOR OVERLAY=FALSE #########
# creates a set of diagnostic plots for each spp #
#####################################################
} else {
nplots <- length(which)*n.vars
if (missing(ask))
ask <- ( dev.interactive() & ((mfrow < nplots )|(nonwindow & !is.null(dev)) ) )
if (ask) {
op <- par(ask = TRUE)
on.exit(par(op), add=TRUE )
}
if (!one.fig && dr) {
# Define a function 'scaption' to draw the sub.caption and / or open a new
# window after mfrow plots.
# par("oma")[3]: the size of the outer margins of the top in lines of text.
scaption <- function(i) {
if (i==mfrow) {
mtext( sub.caption, outer = TRUE, cex =1.1*par("cex.main"),col= par("col.main"))
k <- 0
while(k<pw) k<-k+1
return(1)
} else return(i+1)
}
} else scaption<- function(i) {}
scapt <- 1
for (i in 1:n.vars){
# draw plots for all variables
if (show[1]) {
ri <- r[,i]
yhi <- yh[,i]
ylim <- range(ri, na.rm = TRUE)
if (id.n > 0)
# for compatibility with R 2.2.1
ylim <- ylim + c(-0.08, 0.08) * diff(ylim)
if (res.type=="pit") ylab="Random Quantile Residuals"
else ylab="Pearson residuals"
do.call( "plot", c(list(yhi, ri, xlab = l.fit, ylab=ylab, main = main, ylim = ylim, type = "n", asp=asp), dots))
do.call( "panel", c(list(yhi, ri, col=color[i]), dots))
if (one.fig)
do.call( "title", c(list(sub = sub.caption), dots))
if (missing(caption))
capt <- paste(var.names[i], caption[1], sep="\n")
else capt <- caption[1]
mtext(capt, 3, 0.8, col=colmain, cex=cex.caption) # draw the title
if (id.n > 0) {
# draw id.n labels in the plot
y.id <- ri[show.r[,i]]
y.id[y.id < 0] <- y.id[y.id < 0] - strheight(" ")/3
text.id(yhi[show.r[,i]], y.id, labels.id[show.r[,i]])
}
# abline(h = 0, lty = 3, col = "grey")
scapt <- scaption(scapt)
}
if (show[2]) {
rsi <- rs[,i]
ylim <- range(rsi, na.rm = TRUE)
ylim[2] <- ylim[2] + diff(ylim) * 0.075
qq <- do.call( "qqnorm", c(list(rsi, main = main, ylab = ylab23, ylim = ylim, col=color[i], asp=asp), dots))
if (qqline)
qqline(rsi, lty = 3, col = "gray50")
if (one.fig)
do.call( "title", c(list(sub = sub.caption), dots))
if (missing(caption))
capt <- paste(var.names[i], caption[2], sep="\n")
else capt <- caption[2]
mtext(capt, 3, 0.8, col=colmain, cex=cex.caption) # draw the title
if (id.n > 0)
text.id(qq$x[show.rs[,i]], qq$y[show.rs[,i]],labels.id[show.rs[,i]])
scapt <- scaption(scapt)
}
if (show[3]) {
sqrtabsr <- sqrt(abs(rs[,i]))
ylim <- c(0, max(sqrtabsr, na.rm = TRUE))
yl <- as.expression( substitute(sqrt(abs(YL)),list(YL = as.name(ylab23))))
do.call( "plot", c(list(yh, sqrtabsr, xlab = l.fit, ylab = yl, main = main, ylim = ylim, type = "n", cex=1.5), dots))
do.call( "panel", c(list(yh, sqrtabsr, col=color[i]), dots) )
if (one.fig)
do.call( "title", c(list(sub = sub.caption), dots))
if (missing(caption))
capt <- paste(var.names[i], caption[3], sep="\n")
else capt <- caption[3]
mtext(capt, 3, 0.8, col=colmain, cex=cex.caption) # draw the title
# if (id.n > 0) # draw id.n labels in the plot
# text.id(yhn0[show.rs[,i]], sqrtabsr[show.rs[,i]],labels.id[show.rs[,i]] )
scapt <- scaption(scapt)
}
if (show[4]) {
stop("cook's distance for glm is not implemented.")
# if (id.n > 0) {
# show.r4 <- order(-cook[,i])[iid]
# ymx <- cook[show.r4[1],i] * 1.075
# } else ymx <- max(cook[,i], na.rm = TRUE)
#
# do.call( "plot", c(list(cook[,i], xlab = "Obs. number", ylab = "Cook's distance", main = main, ylim = c(0, ymx), type = "h", col=color[i]), dots))
# if (one.fig)
# do.call( "title", c(list(sub = sub.caption), dots))
#
# if (missing(caption))
# capt <- paste(var.names[i], caption[4], sep="\n")
# else capt <- caption[4]
#
# mtext(capt, 3, 0.8, col=colmain, cex=cex.caption) # draw the title
#
# if (id.n > 0) # draw id.n labels in the plot
# text.id(show.r4, cook[show.r4,i], labels.id[show.r4], adj.x = FALSE)
# scapt <- scaption(scapt)
}
} # end for
if(n.vars < p) {
if(missing(var.subset)|is.null(var.subset)|!is.numeric(var.subset))
tmp <- " \n(the variables with highest total abundance)"
else tmp <- " (user selected)"
}
message("Only the variables ", paste(colnames(r), collapse = ", "), " were included in the plot", tmp, ".")
}
return(invisible())
}
| /R/default.plot.manyglm.R | no_license | eddelbuettel/mvabund | R | false | false | 28,218 | r | ################################################################################
# PLOT.MANYLM, PLOT.MANYGLM: #
# Plot for evaluation of goodness of fit for lm.mvabund objects #
################################################################################
default.plot.manyglm <- function(x, which = 1, res.type="pit.norm", caption = c("Residuals vs Fitted","Normal Q-Q", "Scale-Location", "Cook's distance"), overlay=TRUE, n.vars=Inf, var.subset=NULL, panel = if (add.smooth) panel.smooth else points, sub.caption = NULL, main = "", ask, ..., id.n = if (overlay) 0 else 3, labels.id=rownames(x$Pearson.residuals), cex.id = 0.75, qqline = TRUE, add.smooth = if(!is.null(getOption("add.smooth"))){ getOption("add.smooth") } else TRUE, label.pos = c(4, 2), cex.caption=1.5, asp = 1, legend.pos= "nextplot", mfrow= if(overlay) {length(which)+(legend.pos=="nextplot")} else if(write.plot=="show") c(min(n.vars,3),length(which)) else length(which), mfcol=NULL, write.plot="show", filename="plot.mvabund", keep.window= if(is.null(c(mfrow,mfcol))) TRUE else FALSE, legend=FALSE)
{
allargs <- match.call(expand.dots = FALSE)
dots <- allargs$...
if ("cex" %in% names(dots)) cex <- dots$cex
else cex <- 1.5
if ("cex.lab" %in% names(dots)) clab <- dots$cex.lab
else clab <- 1.5
if ("col.lab" %in% names(dots)) colab <- dots$col.lab
else colab <- par("col.lab")
if ("lwd" %in% names(dots)) lwd <- dots$lwd
else lwd <- 2
if ("cex.axis" %in% names(dots)) caxis <- dots$cex.axis
else caxis <- 1.5
dev <- dev.list()
dev.name <- getOption("device")
if (write.plot!="show") {
if (write.plot=="eps" | write.plot=="postscript")
postscript(paste(filename,".eps", sep="") )
else if (write.plot=="pdf")
pdf(paste(filename,".pdf", sep="") )
else if (write.plot=="jpeg" )
jpeg(paste(filename,".jpeg", sep=""))
else if (write.plot=="bmp" )
bmp(paste(filename,".bmp", sep=""))
else if (write.plot=="png" )
png(paste(filename,".png", sep=""))
on.exit( dev.off() )
}
if (length(dots)>0)
{
# in the plot function.
deactive <- c("xlab", "ylab", "ylim", "sub", "type")
deactivate <- (1:length(dots))[names(dots) %in% deactive ]
for (i in length(deactivate):1)
dots[ deactivate[i] ]<-NULL #fixed up [[]], due to compile error (v2.10).
dots <- lapply( dots, eval, parent.frame() )
if( "col.main" %in% names(dots) ) colmain <- dots$col.main
else colmain <- par("col.main")
}
else colmain <- par("col.main")
if (!inherits(x, c("manylm", "manyglm"))) warning("use 'plot.manylm' only with \"manylm\" or \"manyglm\" objects")
if (!is.numeric(which) || any(which < 1) || any(which > 4))
stop("'which' must be in 1:4")
isGlm <- inherits(x, "manyglm")
show <- rep.int(FALSE, times = 4)
show[which] <- TRUE
if(ncol(x$x) > 0 ) empty <- FALSE
else empty <- TRUE
# if(empty && show[4]) {
if(show[4])
{
if (length(which)==1) stop("Plot no. 4 cannot be drawn, as Cooks distance cannot be calculated for an empty model")
else
{
warning("Plot no. 4 cannot be drawn, as Cooks distance cannot be calculated for an empty model")
show[4] <- FALSE
which <- which[-which[which==4]]
}
}
if (substr(res.type,1,3)=="pit")
{
r <- as.matrix(x$PIT.residuals)
if (res.type=="pit.norm") r <- residuals(x)
}
else r <- as.matrix(x$Pearson.residuals) # residuals(x)
yh <- as.matrix(x$linear.predictor)
# yh <- as.matrix(x$fitted.values)
# w <- x$sqrt.weight * x$sqrt.weight
w <- NULL
# Change logical var.subset to numerical var.subset, if necessary. Note that NA values are logical as well, but should be excluded here.
if(!is.null(var.subset) & !is.numeric(var.subset))
var.subset <- which(var.subset[!is.na(var.subset)])
# miss.varsubset<-!is.numeric(var.subset) # If this function is called within another, the missing function could be tricked out.
if(is.null(labels.id)) labels.id <- as.character(1:nrow(r))
if (!is.null(w)) {
wind <- w != 0
if (isGlm & is.matrix(w)){
wind <- rowSums( wind) != 0
w <- w[wind,, drop=FALSE]
} else w <- w[wind]
r <- r[wind,, drop=FALSE]
yh <- yh[wind,, drop=FALSE]
labels.id <- labels.id[wind]
}
n <- nrow(r)
p <- ncol(r)
######## BEGIN edit var.subset, n.vars and r & fitted values #########
# subset allows double variables
# Do some dimension checks for the subset.
if (missing(var.subset) | is.null(var.subset) | !is.numeric(var.subset)) {
# Plot the n.var variables with highest abundances
if ( p < n.vars ) {
# warning("You have passed an invalid number of variables 'n.vars' to be included in the plot. All variables will be included instead.")
n.vars <- p
}
y <- as.matrix(x$y)
if (!is.null(w))
sum.y <- t(y[wind,,drop=FALSE]) %*% matrix(1,ncol=1,nrow=n)
else sum.y <- t(y) %*% matrix(1,ncol=1,nrow=n)
# Find abundance ranks OF MVABUND.OBJECT.1.
var.subset <- order(sum.y, decreasing = TRUE)
typeofvarsubset <- " \n(the variables with highest total abundance)"
}
else { # if var.subset is specified
if ( p < max(var.subset) )
stop ("You have passed an invalid var.subset")
var.subset.dim <- length(var.subset)
if ( missing(n.vars) | n.vars != var.subset.dim ) {
n.vars <- var.subset.dim
warning("Number of variables 'n.var' is set to the length of 'var.subset'.")
}
typeofvarsubset <- " (user selected)"
}
############# Extract relevant data ###################
r <- r[,var.subset, drop=FALSE]
yh <- yh[,var.subset, drop=FALSE]
w <- w[,var.subset, drop=FALSE]
######### END edit var.subset, n.vars and r & fitted values ###########
var.names <- colnames(r)
if(is.null(var.names)) var.names <- as.character(1:n.vars)
### SET COLORS AND GET SOME GRAPHICS PARAMETERS
# Upon exiting the function, reset all graphical parameters to its value
# at the beginning.
if(!is.null(mfcol)) mfrow <- mfcol
# Get all the graphical parameters.
opp <- par("col.main","mfrow","mfcol","oma")
if( "col" %in% names(dots) )
{
col <- dots$col[var.subset]
dots$col=NULL
}
else
col = rainbow(n.vars+1)[2:(n.vars+1)]
if (write.plot=="show")
on.exit( par(opp), add=TRUE )
################# BEGIN get window dimensions #########################
if (length(mfrow)==1){
# i.e. mfrow is an integer either the default or a passed value,
# ie calc nrows & ncols
if ((overlay & write.plot=="show" & mfrow <5) | (mfrow <4)) {
if(write.plot=="show" & is.null(dev)) {
if (mfrow==1) {
height <- 14
width <- 12
} else {
width <- 10 #MODDED BY SW
height <- 8
}
dev.off()
do.call(dev.name, args=list(height=height,width=width))
}
par(mfrow=c(1, mfrow))
row <- 1
columns <- mfrow
} else {
columns <- ceiling(sqrt(mfrow))
row <- columns-1
if (row*columns<mfrow) row <- columns
if(write.plot=="show" & is.null(dev)) {
if (columns > row){
width <- 9.2
height <- max(row*width/columns * 1.2,5)
} else {
height <- 11
width <- max(height*columns/row * 0.83,4)
}
dev.off()
do.call(dev.name, args=list(height=height,width=width))
}
par(mfrow=c(row, columns))
}
pw <- row* columns - mfrow
nonwindow <- FALSE
} else { # if length(mfrow)==1)
if(!is.null(c(mfrow, mfcol))){
row <- mfrow[1]
columns <- mfrow[2]
nonwindow <- FALSE
} else {
nonwindow <- TRUE
row <- opp$mfrow[1]
columns <- opp$mfrow[2]
}
if(write.plot=="show" & is.null(dev)) {
if (columns > row){
width <- 16
height <- max(row*width/columns*1.2,5)
} else {
height <- 11
width <- max(height*columns/row*0.83,4)
}
#MODDED by SW - Add feature for single plot for non-overlay
if (length(which)==1){
width <-8
height <- 10
mfrow <- c(1,1)
}
dev.off()
do.call(dev.name, args=list(height=height,width=width))
}
if (any(mfrow!=par("mfrow"))) par(mfrow=mfrow)
if (!is.null(c(mfrow, mfcol))) mfrow <- row* columns
pw <- 0
}
if (!is.null(mfcol)) par(mfcol=c(row,columns))
else if (!is.null(mfrow)) par(mfrow=c(row,columns))
if (length(which)==1){
t <- ceiling(min(n.vars,12)/3)
# par(mfrow=c(t,3))
par(mfrow=c(1,1))
}
one.fig <- prod(par("mfrow")) == 1 # ie if mfrow=NULL
if (is.null(sub.caption) ) {
# construct the sub.caption
fktName <- "manyglm"
terms <- deparse(x$terms, width.cutoff = 70)[1]
nc <- nchar(terms)
if (length(x$terms)>1 | nc>60)
terms <- paste(substr(terms,1,min(60, nc)), "...")
sub.caption <- paste(fktName, "(", terms,")", sep="")
}
if(!is.null(sub.caption) && !one.fig) {
oma <- par("oma")
if (oma[3] < 2 & (is.null(dev) | !is.null(mfrow) | !is.null(mfcol))) {
oma[3]<- 5
par(oma = oma)
}
}
dr <- par("oma")[3]!=0
# Ensure that mfrow = NULL for the last command.
if (is.null(mfrow)) {mfrow <- row* columns}
if(all(opp$mfrow == c(row,columns))) opp$mfrow <- opp$mfcol <- NULL
if(keep.window & write.plot=="show") opp$mfrow <- opp$mfcol <- opp$oma <- NULL
##################### END get window dimensions ##################
####################### BEGIN selection of colors ##################
lcols <- length(col)
if (lcols==p & lcols != n.vars) {
# Adjust the colors to the subset
col <- col[var.subset]
lcols <- n.vars
} else if (lcols>n.vars) {
col <- col[var.subset]
lcols <- n.vars
warning("Only the first ", n.vars, " colors will be used for plotting.")
} else if (lcols>1 & lcols<n.vars) {
col <- col[1]
lcols <- 1
warning("The vector of colors has inappropriate length.
Only the first color will be used")
}
color <- col
if (lcols == 1) color <- rep(color, times = n.vars)
####################### END selection of colors #################
if (any(show[2:3])) {
if (df.residual(x)==0)
stop("Plot(s) ", c(2,3)[show[2:3]], " cannot be drawn: standardized residuals cannot be calculated, as there are no degrees of freedom")
else {
#############################
## Q: why weighed residuals?
############################
rs <- if (is.null(w)) { r } # weighed residuals
else sqrt(w) * r
if (substr(res.type,1,3)=="pit") {
if (res.type=="pit.norm") ylab23<-"Dunn-Smyth Residuals"
else ylab23 <- "PIT Residuals."
}
else ylab23 <- "Standard Pearson residuals."
rs[is.infinite(rs)] <- NaN
}
}
if (any(show[c(1, 3)]))
# l.fit <- "Fitted values"
l.fit <- "Linear predictor value"
if (is.null(id.n)) id.n <- 0
else {
id.n <- as.integer(id.n)
if (id.n < 0 || id.n > n)
stop(gettextf("'id.n' must be in {1,..,%d}", n), domain = NA)
if (id.n > 0) {
if (is.null(labels.id)) labels.id <- paste(1:n)
iid <- 1:id.n
# Obtain vector of positions of the abs. highest values, use with a vector.
if (overlay) {
show.r <- matrix(ncol=n.vars, nrow=id.n)
for (i in 1:n.vars) show.r[,i] <- (order(abs(r[,i]),decreasing=TRUE))[iid] +(i-1)*n
show.r <- c(show.r)
} else {
show.r <- matrix(ncol=n.vars, nrow=n)
for (i in 1:n.vars) show.r[,i] <- (order(abs(r[,i]), decreasing = TRUE))
show.r <- show.r[iid,]
}
if (any(show[2:3])) {
if (overlay) {
show.rs <- matrix(ncol=n.vars, nrow=id.n)
for (i in 1:n.vars) show.rs[,i] <-(order(abs(rs[,i]), decreasing = TRUE))[iid] +(i-1)*n
show.rs <- c(show.rs)
} else {
show.rs <- matrix(ncol=n.vars, nrow=n)
for (i in 1:n.vars) show.rs[,i] <- (order(abs(rs[,i]), decreasing = TRUE))
show.rs <- show.rs[iid,]
}
}
##### what on earth is THIS? #####
text.id <- function(x, y, labels, adj.x = TRUE, col="black") {
# function to write a text at a plot at the position labpos
labpos <- if (adj.x)
label.pos[1 + as.numeric(x > mean(range(x)))] else 3
text(x, y, labels, cex = cex.id, xpd = TRUE,pos = labpos, offset = 0.25, col=col)
}
}
}
#######THIS IS SECTION IS FOR OVERLAY=TRUE #########
# this creates only one plot per diagnostics #
####################################################
if (overlay | n.vars==1) {
# plot all variables together
if (missing(ask))
ask <- ( dev.interactive() & ((prod(mfrow) < length(which)) | (nonwindow & !is.null(dev)) ) )
if (ask) {
op <- par(ask = TRUE) # if TRUE, this should be preserved
on.exit(par(op), add=TRUE )
}
if (substr(legend.pos, 1,1)!="none") { # add a legend
ncoll<- ceiling(n.vars/(50/(row+0.5*row^2)))
cexl<- 1.5 #0.9
if (ncoll>3) {
ncoll<-3
cexl <- 0.6
}
leg <- substr(var.names, 1,(8/ncoll)+1)
}
#SW - Reset mfrow to be approprite for one plot, set legend position
if (length(which)==1) {
# dev.off()
if (legend == TRUE) {
# dev.new(height=6, width=8) # added for smaller window size
par(mfrow = c(1,1), oma=c(.5,.5,.5,4.5), mar=c(6, 4.5, 2, 5))
legend.pos="right"
}
else {
# dev.new(height=6, width=6) # added for smaller window size
par(mfrow = c(1,1), oma=c(.5,.5,.5,.5), mar=c(6,4.5,2,.5))
}
}
else if (length(which)==2) {
# dev.off()
# dev.new(height=6, width=12)
par(mfrow=c(1,2),oma=c(0.5,0.5,1,10), mar=c(4, 4.5, 2, 2))
}
else if (length(which)==3) {
# dev.new(height=12, widht=12)
par(mfrow=c(2,2),oma=c(2,2,2,2), mar=c(4, 4, 3, 3))
}
# The residual vs. fitted value plot
yhtmp <- c(yh)
yh.is.zero <- yhtmp < (-6)
# yh.is.zero <- yhtmp < max(-6,(-max(yhtmp)))#this line is wrong - it kicks out any value more negative than max(yh)
yh0 <- yhtmp[!yh.is.zero]
xlim <- range(yh0)
if (id.n > 0) # for compatibility with R2.2.1
ylim <- ylim + c(-0.08, 0.08) * diff(ylim)
# drop small values in the response
if (show[1]) {
# Use vector built of transposed x bzw y to plot in the right color
# yi.is.zero <- (yh[,1]<(-9)) # log(1e-4)
# plot(x=t(yh[!yi.is.zero,1]), y=t(r[!yi.is.zero,1]),type="p",col=palette()[1], ylab = "Pearson residuals", xlab=l.fit, main = main, ylim=ylim, xlim=xlim, cex.lab=clab, cex.axis=caxis, cex=cex, lwd=lwd, font.main=2)
# for (i in 2:n.vars) {
# yi.is.zero <- (yh[,i] < (-9)) # log(1e-4)
# points(x=t(yh[!yi.is.zero,i]), y=t(r[!yi.is.zero,i]),type="p",col=palette()[i], cex=cex, lwd=lwd)
# }
rtmp <- c(r)
r0 <- rtmp[!yh.is.zero]
# ylim <- range(r0, na.rm = TRUE)
ylim <- range(max(abs(r0))*c(-1,1), na.rm = TRUE) #DW, 10/02/15: to make ylims symmetric about zero
colortmp <- rep(color, each=n)
color0 <- colortmp[!yh.is.zero]
if (substr(res.type,1,3)=="pit") {
if (res.type=="pit.norm") ylab="Dunn-Smyth Residuals"
else ylab="PIT Residuals"
}
else ylab="Pearson residuals"
plot(yh0, r0, xlab = l.fit, ylab = ylab, main=main, ylim=ylim, xlim=xlim, font.main=2, col=color0, cex.lab=clab, cex.axis=caxis, cex=cex, lwd=lwd)
# Add sub.caption, e.g, manyglm(tasm.cop ~ treatment)
if (one.fig)
title(sub = sub.caption, cex.sub=cex.caption-0.1, line=4.5, font.sub=2)
# Add the title Residual vs Fitted
mtext(caption[1], 3, 0.25, col=colmain, cex=cex.caption, font=2)
if (id.n > 0) { # add id.n labels
y.id <- (c(r))[show.r]
y.id[y.id < 0] <- y.id[y.id < 0] - strheight(" ")/3
text.id( (c(yh))[show.r], y.id, (rep(labels.id, times=n.vars))[show.r], col=rep(col, each=id.n))
}
if (res.type=="pit.uniform") hmark=0.5 else hmark=0
abline(h = hmark, lty = 2, col = "black", lwd=2)
if(legend==TRUE & substr(legend.pos, 1,1)[1]!="n"){
# add a legend
legend(legend.pos, legend=leg, col=color, pch=1, ncol=ncoll, cex=cexl-0.1,inset=-0.35,xpd=NA, lwd=2, lty=0, x.intersp=0.5)
}
# mtext("(a)", side = 3, cex = 2, at=-1.8, line=0.3)
}
# The normal QQ plot
if (show[2]) {
# rs.is.zero <- rs < (1e-9) #DW, 23/10/14: this seems to be an error - why would r near zero be a problem?
rstmp <- c(rs)
ylim <- range(max(abs(rstmp))*c(-1,1), na.rm = TRUE) #DW, 23/10/14: to make ylims symmetric about zero
# ylim[2] <- ylim[2] + diff(ylim) * 0.075 #DW, 23/10/14: I don't see any point for this line
qq <- do.call( "qqnorm", c(list(rstmp, main = main, ylab = ylab23, ylim=ylim, col=color, asp=1, cex.lab=1.5, cex=1.5, cex.axis=1.5, cex.main=1.5, lwd=2), dots))
if (qqline) qqline(rstmp, lty = 3, col = "gray50", lwd=2)
# Use vector built of transposed x bzw y in order to plot
# in the right colors.
if (one.fig) do.call( "title", c(list(sub = sub.caption), dots))
mtext(caption[2], 3, 0.25, col=colmain, cex=cex.caption) # the title
if (id.n > 0) # add id.n labels
text.id(qq$x[show.rs], qq$y[show.rs], (rep(labels.id,
each=n.vars))[show.rs], col=rep(col, each=id.n))
if(legend == TRUE & substr(legend.pos, 1,1)[1]!="n"){
# add a legend
legend(legend.pos, legend=leg, col=color, pch=1, ncol=ncoll, cex=cexl-0.1,inset=-0.35,xpd=NA, x.intersp=0.5, lwd=2, lty=0)
}
}
# The scale vs. location plot
if (show[3]) {
sqrtabsr <- c(sqrt(abs(rs)))
sqrtabsr0 <- sqrtabsr[!yh.is.zero]
ylim <- c(0, max(sqrtabsr0, na.rm = TRUE))
yl <- as.expression(substitute(sqrt(abs(YL)),list(YL = as.name(ylab23))))
# yi.is.zero <- (yh[,1]<(-9))
# plot(t(yh[!yi.is.zero,1]), t(sqrtabsr[!yi.is.zero,1]),type="p",col=palette()[1], ylab = yl, xlab=l.fit, main = main, ylim=ylim, xlim=xlim, cex=1.5, cex.lab=1.5, cex.axis=1.5)
# for (i in 2:n.vars) {
# yi.is.zero <- (yh[,i] < (-9))
# points(t(yh[!yi.is.zero,i]), t(sqrtabsr[!yi.is.zero,i]),type="p",col=palette()[i], cex=1.5, lwd=2)
# }
plot(yh0, sqrtabsr0, xlab = l.fit, ylab=yl,
main = main, ylim = ylim, xlim=xlim, type = "n")
panel(yh0, sqrtabsr0, col=color, cex=cex, cex.lab=clab, cex.axis=caxis, lwd=lwd)
if (one.fig)
do.call( "title", c(list(sub = sub.caption), dots))
mtext(caption[3], 3, 0.25, col=colmain, cex=cex.caption)
if (id.n > 0)
text.id(yh[show.rs], sqrtabsr[show.rs], (rep(labels.id,
each=n.vars))[show.rs], col=rep(col, each=id.n))
# ncoll <- ceiling(n.vars/6)
if(legend==TRUE & substr(legend.pos, 1,1)[1]!="n") # add a legend
legend(legend.pos, legend=leg, col=color, pch=1, ncol=ncoll, cex=cexl-0.1,inset=-0.35,xpd=NA, x.intersp=0.5)
}
# The cook distance plot
if (show[4]) {
# stop("cook's distance for glm is not implemented.")
# ymx <- max(cook, na.rm = TRUE) # error here, what is cook?
# if (id.n > 0) {
# show.r <- matrix(ncol=n.vars, nrow=id.n)
# for (i in 1:n.vars)
# show.r[,i] <- (order(-cook[,i]))[iid] +(i-1)*n
# show.r <- c(show.r)
# ymx <- ymx * 1.075
# }
# obsno <- rep(1:n, each = n.vars) + rep.int((1:n.vars)/(2*n.vars),times =n)
# # Use vector built of transposed x bzw y in order to plot in the right colors.
# do.call( "plot", c(list(obsno, c(t(cook)), xlab = "Obs. number", ylab = "Cook's distance", main = main, ylim = c(0, ymx), type = "h", col=color), dots))
#
# if (one.fig) do.call( "title", c(list(sub = sub.caption), dots))
# mtext(caption[4], 3, 0.25, col=colmain, cex=cex.caption)
#
# if (id.n > 0) {
# txtxshow <- show.r + rep((1:n.vars)/(2*n.vars), each =id.n)-rep((0:(n.vars-1))*n, each =id.n)
# text.id(txtxshow, (c(cook))[show.r], (rep(labels.id,times=n.vars))[show.r], adj.x = FALSE, col=rep(col, each=id.n))
# }
#
# if(legend==TRUE & substr(legend.pos, 1,1)[1]!="n") # add a legend
# legend(legend.pos, legend=leg, col=color, pch=1, ncol=ncoll, cex=cexl,inset=-0.15,xpd=NA)
}
if (legend==TRUE & legend.pos=="nextplot" ) legend("right", legend=leg, col=color, pch=1, ncol=ncoll, cex=cexl-0.1,inset=-0.5,xpd=NA)
# add a subcaption
if (!one.fig && !is.null(sub.caption) && dr)
mtext(sub.caption, outer = TRUE, cex = 1.1*par("cex.main"),col= par("col.main") )
if(n.vars < p) {
if (missing(var.subset) | is.null(var.subset) | !is.numeric(var.subset))
message("Only the variables ",paste(colnames(r), collapse = ", "), " were included in the plot", typeofvarsubset, ".")
}
return(invisible())
#######THIS IS SECTION IS FOR OVERLAY=FALSE #########
# creates a set of diagnostic plots for each spp #
#####################################################
} else {
nplots <- length(which)*n.vars
if (missing(ask))
ask <- ( dev.interactive() & ((mfrow < nplots )|(nonwindow & !is.null(dev)) ) )
if (ask) {
op <- par(ask = TRUE)
on.exit(par(op), add=TRUE )
}
if (!one.fig && dr) {
# Define a function 'scaption' to draw the sub.caption and / or open a new
# window after mfrow plots.
# par("oma")[3]: the size of the outer margins of the top in lines of text.
scaption <- function(i) {
if (i==mfrow) {
mtext( sub.caption, outer = TRUE, cex =1.1*par("cex.main"),col= par("col.main"))
k <- 0
while(k<pw) k<-k+1
return(1)
} else return(i+1)
}
} else scaption<- function(i) {}
scapt <- 1
for (i in 1:n.vars){
# draw plots for all variables
if (show[1]) {
ri <- r[,i]
yhi <- yh[,i]
ylim <- range(ri, na.rm = TRUE)
if (id.n > 0)
# for compatibility with R 2.2.1
ylim <- ylim + c(-0.08, 0.08) * diff(ylim)
if (res.type=="pit") ylab="Random Quantile Residuals"
else ylab="Pearson residuals"
do.call( "plot", c(list(yhi, ri, xlab = l.fit, ylab=ylab, main = main, ylim = ylim, type = "n", asp=asp), dots))
do.call( "panel", c(list(yhi, ri, col=color[i]), dots))
if (one.fig)
do.call( "title", c(list(sub = sub.caption), dots))
if (missing(caption))
capt <- paste(var.names[i], caption[1], sep="\n")
else capt <- caption[1]
mtext(capt, 3, 0.8, col=colmain, cex=cex.caption) # draw the title
if (id.n > 0) {
# draw id.n labels in the plot
y.id <- ri[show.r[,i]]
y.id[y.id < 0] <- y.id[y.id < 0] - strheight(" ")/3
text.id(yhi[show.r[,i]], y.id, labels.id[show.r[,i]])
}
# abline(h = 0, lty = 3, col = "grey")
scapt <- scaption(scapt)
}
if (show[2]) {
rsi <- rs[,i]
ylim <- range(rsi, na.rm = TRUE)
ylim[2] <- ylim[2] + diff(ylim) * 0.075
qq <- do.call( "qqnorm", c(list(rsi, main = main, ylab = ylab23, ylim = ylim, col=color[i], asp=asp), dots))
if (qqline)
qqline(rsi, lty = 3, col = "gray50")
if (one.fig)
do.call( "title", c(list(sub = sub.caption), dots))
if (missing(caption))
capt <- paste(var.names[i], caption[2], sep="\n")
else capt <- caption[2]
mtext(capt, 3, 0.8, col=colmain, cex=cex.caption) # draw the title
if (id.n > 0)
text.id(qq$x[show.rs[,i]], qq$y[show.rs[,i]],labels.id[show.rs[,i]])
scapt <- scaption(scapt)
}
if (show[3]) {
sqrtabsr <- sqrt(abs(rs[,i]))
ylim <- c(0, max(sqrtabsr, na.rm = TRUE))
yl <- as.expression( substitute(sqrt(abs(YL)),list(YL = as.name(ylab23))))
do.call( "plot", c(list(yh, sqrtabsr, xlab = l.fit, ylab = yl, main = main, ylim = ylim, type = "n", cex=1.5), dots))
do.call( "panel", c(list(yh, sqrtabsr, col=color[i]), dots) )
if (one.fig)
do.call( "title", c(list(sub = sub.caption), dots))
if (missing(caption))
capt <- paste(var.names[i], caption[3], sep="\n")
else capt <- caption[3]
mtext(capt, 3, 0.8, col=colmain, cex=cex.caption) # draw the title
# if (id.n > 0) # draw id.n labels in the plot
# text.id(yhn0[show.rs[,i]], sqrtabsr[show.rs[,i]],labels.id[show.rs[,i]] )
scapt <- scaption(scapt)
}
if (show[4]) {
stop("cook's distance for glm is not implemented.")
# if (id.n > 0) {
# show.r4 <- order(-cook[,i])[iid]
# ymx <- cook[show.r4[1],i] * 1.075
# } else ymx <- max(cook[,i], na.rm = TRUE)
#
# do.call( "plot", c(list(cook[,i], xlab = "Obs. number", ylab = "Cook's distance", main = main, ylim = c(0, ymx), type = "h", col=color[i]), dots))
# if (one.fig)
# do.call( "title", c(list(sub = sub.caption), dots))
#
# if (missing(caption))
# capt <- paste(var.names[i], caption[4], sep="\n")
# else capt <- caption[4]
#
# mtext(capt, 3, 0.8, col=colmain, cex=cex.caption) # draw the title
#
# if (id.n > 0) # draw id.n labels in the plot
# text.id(show.r4, cook[show.r4,i], labels.id[show.r4], adj.x = FALSE)
# scapt <- scaption(scapt)
}
} # end for
if(n.vars < p) {
if(missing(var.subset)|is.null(var.subset)|!is.numeric(var.subset))
tmp <- " \n(the variables with highest total abundance)"
else tmp <- " (user selected)"
}
message("Only the variables ", paste(colnames(r), collapse = ", "), " were included in the plot", tmp, ".")
}
return(invisible())
}
|
\name{arimaSSM}
\alias{arimaSSM}
\title{Create a State Space Representation of ARIMA Model}
\usage{
arimaSSM(y, arima, H = NULL, Q = NULL, u = NULL,
distribution = c("Gaussian", "Poisson", "Binomial"),
transform = c("none", "ldl", "augment"),
tolF = .Machine$double.eps^0.5,
tol0 = .Machine$double.eps^0.5)
}
\arguments{
\item{arima}{A list or a list of lists with components
\code{ar}, \code{ma} and \code{d}, giving the
autoregression and moving average coefficients, and the
degree of differencing for each series. If arima is a
single list, it is assumed that all \eqn{p} series have
same ARIMA structure. Otherwise first sublist gives the
ARIMA structure of the first series etc.}
\item{H}{A \eqn{p \times p}{p*p} covariance matrix (or
\eqn{p \times p \times n}{p*p*n} array in of time-varying
case) of the disturbance terms
\eqn{\epsilon_t}{\epsilon[t]} of the observation
equation. Default gives \eqn{p \times p}{p*p} zero matrix
ie. ordinary ARIMA model without additional noise.
Omitted in case of non-Gaussian distributions. Augment
the state vector if you to add want additional noise.}
\item{Q}{A \eqn{p \times p}{p*p} covariance matrix of the
disturbance terms \eqn{\eta_t}{\eta[t]} of the system
equation. Default is \eqn{p \times p}{p*p} identity
matrix ie. ordinary ARIMA model with disturbance terms
having unit variance.}
\item{y}{A time series object of class \code{ts}, or a
object that can be coerced to such.}
\item{u}{Only used with non-Gaussian distribution. See
details.}
\item{distribution}{Specify the distribution of the
observations. Default is "Gaussian".}
\item{transform}{The functions of \code{KFAS} require
diagonal covariance matrix \eqn{H_t}{H[t]}. If
\eqn{H_t}{H[t]} is not diagonal, model can be transformed
using one of the two options. Option \code{"ldl"}
performs LDL decomposition for covariance matrix
\eqn{H_t}{H[t]}, and multiplies the observation equation
with the \eqn{L_t^{-1}}{L[t]^{-1}}, so \eqn{\epsilon_t
\sim N(0,D_t)}{\epsilon[t] ~ N(0,D[t])}. Option
\code{"augment"} adds \eqn{\epsilon_t}{\epsilon[t]} to
the state vector, when \eqn{Q_t}{Q[t]} becomes block
diagonal with blocks \eqn{Q_t}{Q[t]} and \eqn{H_t}{H[t]}.
In case of univariate series, option \code{"ldl"} only
changes the \code{H_type} argument of the model to
\code{"Diagonal"}. Default is \code{"none"} which does no
transformation but checks if \eqn{H} is diagonal. If not,
\code{H_type} is set to \code{"Untransformed"}.}
\item{tolF}{Tolerance parameter for Finf. Smallest value
not counted for zero.}
\item{tol0}{Tolerance parameter for LDL decomposition,
determines which diagonal values are counted as zero.}
}
\description{
Function \code{arimaSSM} creates a state space
representation of ARIMA model.
}
\details{
The linear Gaussian state space model is given by
\deqn{y_t = Z_t \alpha_t + \epsilon_t,}{y[t] =
Z[t]\alpha[t] + \epsilon[t], (observation equation)}
\deqn{\alpha_{t+1} = T_t \alpha_t + R_t
\eta_t,}{\alpha[t+1] = T[t]\alpha[t] + R[t]\eta[t],
(transition equation)}
where \eqn{\epsilon_t ~ N(0,H_t)}{\epsilon[t] ~
N(0,H[t])}, \eqn{\eta_t ~ N(0,Q_t)}{\eta[t] ~ N(0,Q[t])}
and \eqn{\alpha_1 ~ N(a_1,P_1)}{\alpha[1] ~ N(a[1],P[1])}
independently of each other. In case of non-Gaussian
observations, the observation equation is of form
\eqn{p(y_t|\theta_t) =
p(y_t|Z_t\alpha_t)}{p(y[t]|\theta[t]) =
p(y[t]|Z[t]\alpha[t])}, with
\eqn{p(y_t|\theta_t)}{p(y[t]|\theta[t])} being one of the
following:
If observations are Poisson distributed, parameter of
Poisson distribution is
\eqn{u_t\lambda_t}{u[t]\lambda[t]} and \eqn{\theta_t =
log(\lambda_t)}{\theta[t]=log(\lambda[t])}.
If observations are from binomial distribution, \eqn{u}
is a vector specifying number the of trials at times
\eqn{1,\ldots,n}, and \eqn{\theta_t =
log[\pi_t/(1-\pi_t)]}{\theta[t] =
log(\pi[t]/(1-\pi[t]))}, where \eqn{\pi_t}{\pi[t]} is the
probability of success at time \eqn{t}.
For non-Gaussian models \eqn{u_t=1}{u[t]=1} as a default.
For Gaussian models, parameter is omitted.
Only univariate observations are supported when
observation equation is non-Gaussian.
}
\seealso{
\code{\link{regSSM}} for state space representation of a
regression model, \code{\link{structSSM}} for structural
time series model, and \code{\link{SSModel}} for custom
\code{SSModel} object.
}
| /man/arimaSSM.Rd | no_license | snowdj/KFAS | R | false | false | 4,480 | rd | \name{arimaSSM}
\alias{arimaSSM}
\title{Create a State Space Representation of ARIMA Model}
\usage{
arimaSSM(y, arima, H = NULL, Q = NULL, u = NULL,
distribution = c("Gaussian", "Poisson", "Binomial"),
transform = c("none", "ldl", "augment"),
tolF = .Machine$double.eps^0.5,
tol0 = .Machine$double.eps^0.5)
}
\arguments{
\item{arima}{A list or a list of lists with components
\code{ar}, \code{ma} and \code{d}, giving the
autoregression and moving average coefficients, and the
degree of differencing for each series. If arima is a
single list, it is assumed that all \eqn{p} series have
same ARIMA structure. Otherwise first sublist gives the
ARIMA structure of the first series etc.}
\item{H}{A \eqn{p \times p}{p*p} covariance matrix (or
\eqn{p \times p \times n}{p*p*n} array in of time-varying
case) of the disturbance terms
\eqn{\epsilon_t}{\epsilon[t]} of the observation
equation. Default gives \eqn{p \times p}{p*p} zero matrix
ie. ordinary ARIMA model without additional noise.
Omitted in case of non-Gaussian distributions. Augment
the state vector if you to add want additional noise.}
\item{Q}{A \eqn{p \times p}{p*p} covariance matrix of the
disturbance terms \eqn{\eta_t}{\eta[t]} of the system
equation. Default is \eqn{p \times p}{p*p} identity
matrix ie. ordinary ARIMA model with disturbance terms
having unit variance.}
\item{y}{A time series object of class \code{ts}, or a
object that can be coerced to such.}
\item{u}{Only used with non-Gaussian distribution. See
details.}
\item{distribution}{Specify the distribution of the
observations. Default is "Gaussian".}
\item{transform}{The functions of \code{KFAS} require
diagonal covariance matrix \eqn{H_t}{H[t]}. If
\eqn{H_t}{H[t]} is not diagonal, model can be transformed
using one of the two options. Option \code{"ldl"}
performs LDL decomposition for covariance matrix
\eqn{H_t}{H[t]}, and multiplies the observation equation
with the \eqn{L_t^{-1}}{L[t]^{-1}}, so \eqn{\epsilon_t
\sim N(0,D_t)}{\epsilon[t] ~ N(0,D[t])}. Option
\code{"augment"} adds \eqn{\epsilon_t}{\epsilon[t]} to
the state vector, when \eqn{Q_t}{Q[t]} becomes block
diagonal with blocks \eqn{Q_t}{Q[t]} and \eqn{H_t}{H[t]}.
In case of univariate series, option \code{"ldl"} only
changes the \code{H_type} argument of the model to
\code{"Diagonal"}. Default is \code{"none"} which does no
transformation but checks if \eqn{H} is diagonal. If not,
\code{H_type} is set to \code{"Untransformed"}.}
\item{tolF}{Tolerance parameter for Finf. Smallest value
not counted for zero.}
\item{tol0}{Tolerance parameter for LDL decomposition,
determines which diagonal values are counted as zero.}
}
\description{
Function \code{arimaSSM} creates a state space
representation of ARIMA model.
}
\details{
The linear Gaussian state space model is given by
\deqn{y_t = Z_t \alpha_t + \epsilon_t,}{y[t] =
Z[t]\alpha[t] + \epsilon[t], (observation equation)}
\deqn{\alpha_{t+1} = T_t \alpha_t + R_t
\eta_t,}{\alpha[t+1] = T[t]\alpha[t] + R[t]\eta[t],
(transition equation)}
where \eqn{\epsilon_t ~ N(0,H_t)}{\epsilon[t] ~
N(0,H[t])}, \eqn{\eta_t ~ N(0,Q_t)}{\eta[t] ~ N(0,Q[t])}
and \eqn{\alpha_1 ~ N(a_1,P_1)}{\alpha[1] ~ N(a[1],P[1])}
independently of each other. In case of non-Gaussian
observations, the observation equation is of form
\eqn{p(y_t|\theta_t) =
p(y_t|Z_t\alpha_t)}{p(y[t]|\theta[t]) =
p(y[t]|Z[t]\alpha[t])}, with
\eqn{p(y_t|\theta_t)}{p(y[t]|\theta[t])} being one of the
following:
If observations are Poisson distributed, parameter of
Poisson distribution is
\eqn{u_t\lambda_t}{u[t]\lambda[t]} and \eqn{\theta_t =
log(\lambda_t)}{\theta[t]=log(\lambda[t])}.
If observations are from binomial distribution, \eqn{u}
is a vector specifying number the of trials at times
\eqn{1,\ldots,n}, and \eqn{\theta_t =
log[\pi_t/(1-\pi_t)]}{\theta[t] =
log(\pi[t]/(1-\pi[t]))}, where \eqn{\pi_t}{\pi[t]} is the
probability of success at time \eqn{t}.
For non-Gaussian models \eqn{u_t=1}{u[t]=1} as a default.
For Gaussian models, parameter is omitted.
Only univariate observations are supported when
observation equation is non-Gaussian.
}
\seealso{
\code{\link{regSSM}} for state space representation of a
regression model, \code{\link{structSSM}} for structural
time series model, and \code{\link{SSModel}} for custom
\code{SSModel} object.
}
|
lba.mle.logit <- function(obj ,
A ,
B ,
K ,
cA ,
cB ,
logitA ,
logitB ,
omsk ,
psitk ,
S ,
T ,
itmax.ide ,
trace.lba ,
...)
{
#===========================================================================
#Multinomial logit on both mixing parameters and latent components
#===========================================================================
if(all(c(!is.null(logitA), !is.null(logitB)))){
results <- lba.mle.logit.AB(obj = obj,
logitA = logitA,
logitB = logitB,
omsk = omsk,
psitk = psitk,
K = K,
S = S,
T = T,
itmax.ide = itmax.ide,
trace.lba = trace.lba,
...)
} else
#===========================================================================
#Multinomial logit on mixing parameters only
#===========================================================================
if(all(c(!is.null(logitA), is.null(logitB)))){
results <- lba.mle.logit.A(obj = obj,
B = B,
cB = cB,
logitA = logitA, #design matrix for row covariates IxS
omsk = omsk,
K = K,
S = S,
itmax.ide = itmax.ide,
trace.lba = trace.lba,
...)
} else {
#===========================================================================
#Multinomial logit on latent components only
#===========================================================================
results <- lba.mle.logit.B(obj = obj,
A = A,
cA = cA,
logitB = logitB, #design matrix for row covariates IxS
psitk = psitk,
K = K,
T = T,
itmax.ide = itmax.ide,
trace.lba = trace.lba,
...)
}
}
######################## AUXILIAR FUNCTIONS ##############################
lba.mle.logit.AB <- function(obj ,
logitA ,
logitB ,
omsk ,
psitk ,
K ,
S ,
T ,
itmax.ide,
trace.lba,
...)
{
I <- nrow(obj) # row numbers of data matrix
J <- ncol(obj) # column numbers of data matrix
#-----------------------------------------------------------------------------
if (is.null(omsk)) {
omsk <- matrix(c(rep(0,S), rnorm(S*(K-1))), ncol = K) }
if (is.null(psitk)) {
psitk <- matrix(rnorm(T*K), ncol = K) }
#----------------------------------------------------------------------------
#third case multinomial logit constraints on mixing parameters, and
#on the latent budgets.
#---------------------------------------------------------------------------
mw <- function(xx,
obj,
K,
I,
J,
logitA,
logitB,
S,
T){
omsk <- matrix(xx[(1):(S*K)], ncol = K)
psitk <- matrix(xx[(S*K+1):(S*K+T*K)], ncol = K)
# creating A from omsk (om(s,k) )
A <- matrix(0,nrow=I,ncol=K)
for(i in 1:I){
for(k in 1:K){
for(n in 1:K){
a <- 1
for(s in 1:S){
if(exp(logitA[i,s]*(omsk[s,n]-omsk[s,k]))==Inf){
a <- a*1e6
}else{ a <- a*exp(logitA[i,s]*(omsk[s,n]-omsk[s,k]))} }
A[i,k] <- A[i,k] + a }
A[i,k] <- 1/A[i,k] } }
A <- A/rowSums(A)
# A is a IxK matrix
#creating B from psitk
B <- matrix(0,nrow=J,ncol=K)
for(j in 1:J){
for(k in 1:K){
for(n in 1:J){
a <- 1
for(t in 1:T){
if(exp(psitk[t,k]*(logitB[n,t]-logitB[j,t]))==Inf) {
a <- a*1e6
}else{ a <- a*exp(psitk[t,k]*(logitB[n,t]-logitB[j,t])) } }
B[j,k] <- B[j,k] + a }
B[j,k] <- 1/B[j,k] } }
B <- t(t(B)/colSums(B))
# B is a JxK matrix
pjki <- rep(0,I*J*K) #this will become pijk/pi+ see van der Ark page 80
m <- 0
# this makes i=1,j=1,k=1; i=2,j=1,k=1; i=3,j=1,k=1...; i=I,j=1,k=1 and so on.
for(k in 1:K) for(j in 1:J) for(i in 1:I) { m <- m +1
pjki[m] <- A[i,k]*B[j,k] }
pip <- rowSums(obj)/sum(obj)
pjki[pjki <=0] <- 1e-7
mi <- matrix(pjki, nrow=I)
pijk <- mi*pip #this is pijk see van der Ark page 80
nijk <- as.vector(pijk*sum(obj))
mw <- -sum(nijk * log(pjki)) #-loglikelihood function
}
x0 <- c(as.vector(omsk), as.vector(psitk))
# finding the ls estimates
xab <- optim(par = x0,
fn = mw,
obj = obj,
K = K,
I = I,
J = J,
logitA = logitA,
logitB = logitB,
S = S,
T = T,
method = 'BFGS',
control = list(trace=trace.lba,
maxit=itmax.ide))
omsk <- matrix(xab$par[1:(S*K)], ncol = K)
psitk <- matrix(xab$par[(S*K+1):(S*K+T*K)], ncol = K)
#creating A from omsk
A <- matrix(0,nrow=I,ncol=K)
for(i in 1:I){
for(k in 1:K){
for(n in 1:K){
a <- 1
for(s in 1:S){
if(exp(logitA[i,s]*(omsk[s,n]-omsk[s,k]))==Inf){
a <- a*1e6
}else{ a <- a*exp(logitA[i,s]*(omsk[s,n]-omsk[s,k]))} }
A[i,k] <- A[i,k] + a }
A[i,k] <- 1/A[i,k] } }
A <- A/rowSums(A)
#creating B from psitk
B <- matrix(0,nrow=J,ncol=K)
for(j in 1:J){
for(k in 1:K){
for(n in 1:J){
a <- 1
for(t in 1:T){
if(exp(psitk[t,k]*(logitB[n,t]-logitB[j,t]))==Inf) {
a <- a*1e6
}else{ a <- a*exp(psitk[t,k]*(logitB[n,t]-logitB[j,t])) } }
B[j,k] <- B[j,k] + a }
B[j,k] <- 1/B[j,k] } }
B <- t(t(B)/colSums(B))
# B is a JxK matrix
B <- t(t(B)/colSums(B))
# B is a JxK matrix
colnames(A) <- colnames(B) <- colnames(omsk) <- colnames(psitk) <- paste('LB',1:K,sep='')
pimais <- rowSums(obj)/sum(obj)
aux_pk <- pimais %*% A # budget proportions
pk <- matrix(aux_pk[order(aux_pk,
decreasing = TRUE)],
ncol = dim(aux_pk)[2])
A <- matrix(A[,order(aux_pk,
decreasing = TRUE)],
ncol = dim(aux_pk)[2])
B <- matrix(B[,order(aux_pk,
decreasing = TRUE)],
ncol = dim(aux_pk)[2])
P <- obj/rowSums(obj)
rownames(A) <- rownames(P)
rownames(B) <- colnames(P)
colnames(pk) <- colnames(A) <- colnames(B) <- paste('LB', 1:K, sep='')
pij <- A %*% t(B) # expected budget
residual <- P - pij
val_func <- xab$value
iter_ide <- as.numeric(xab$counts[2])
rescB <- rescaleB(obj,
A,
B)
colnames(rescB) <- colnames(B)
rownames(rescB) <- rownames(B)
results <- list(P,
pij,
residual,
A,
B,
rescB,
pk,
val_func,
iter_ide,
omsk,
psitk)
names(results) <- c('P',
'pij',
'residual',
'A',
'B',
'rescB',
'pk',
'val_func',
'iter_ide',
'omsk',
'psitk')
class(results) <- c("lba.mle.logit",
"lba.mle")
invisible(results)
}
lba.mle.logit.A <- function(obj ,
B ,
cB ,
logitA ,
omsk ,
K ,
S ,
itmax.ide,
trace.lba,
...)
{
#The matrices caki and cbjk contain the constraint values of the mixing
#parameters and latent components respectively.
#For fixed value constraint use the values at respective location in the matrix.
#For aki, all row sums must be less or equal 1. For bjk all column sums must be
#less or equal 1.
#For equality value constraint use whole numbers starting form 2. Same numbers
#at diffferent locations of the matrix show equal parameters.
#USE NA TO FILL UP THE REST OF THE MATRICES.
I <- nrow(obj) # row numbers of data matrix
J <- ncol(obj) # column numbers of data matrix
#-----------------------------------------------------------------------------
if (is.null(omsk)) {
omsk <- matrix(c(rep(0,S), rnorm(S*(K-1))), ncol = K) }
#BUILDING B
if(!is.null(cB) & is.null(B)){
B <- t(constrainAB(t(cB)))
} else { if(is.null(cB) & is.null(B)){
#creating random generated values for beta(j|k)
B <- t(rdirich(K, runif(J))) } }
#============================================================================
#============================================================================
#----------------------------------------------------------------------------
#first case multinomial logit constraints on mixing parameters, but not
#in the latent budgets.
#---------------------------------------------------------------------------
mw <- function(xx,
obj,
cB,
K,
I,
J,
logitA,
S){
y <- length(xx)- (J*K+S*K)
omsk <- matrix(xx[(y+J*K +1):(y+J*K+S*K)], ncol = K)
B <- matrix(xx[(y+1):(y+J*K)], ncol = K)
#creating A from omsk
A <- matrix(0,nrow=I,ncol=K)
for(i in 1:I){
for(k in 1:K){
for(n in 1:K){
a <- 1
for(s in 1:S){
if(exp(logitA[i,s]*(omsk[s,n]-omsk[s,k]))==Inf){
a <- a*1e6
}else{ a <- a*exp(logitA[i,s]*(omsk[s,n]-omsk[s,k]))} }
A[i,k] <- A[i,k] + a }
A[i,k] <- 1/A[i,k] } }
A <- A/rowSums(A)
# A is a IxK matrix
if(!is.null(cB)) {
mincb <- min(cB, na.rm=TRUE)
if(mincb < 1){
posFb <- which(cB<1, arr.ind = T)
B[posFb] <- cB[posFb] } }
pjki <- rep(0,I*J*K) #this will become pijk/pi+ see van der Ark page 80
m <- 0
# this makes i=1,j=1,k=1; i=2,j=1,k=1; i=3,j=1,k=1...; i=I,j=1,k=1 and so on.
for(k in 1:K) for(j in 1:J) for(i in 1:I) { m <- m +1
pjki[m] <- A[i,k]*B[j,k] }
pip <- rowSums(obj)/sum(obj)
pjki[pjki <=0] <- 1e-7
mi <- matrix(pjki, nrow=I)
pijk <- mi*pip #this is pijk see van der Ark page 80
nijk <- as.vector(pijk*sum(obj))
mw <- -sum(nijk * log(pjki)) #-loglikelihood function
}
#============================================================================
# heq function
#============================================================================
heq <- function(xx,
obj,
cB,
K,
I,
J,
logitA,
S){
# construction of matrices omsk and B(B) from input vector x
y <- length(xx)- (J * K + S * K )
omsk <- matrix(xx[(y+J*K +1):(y+J*K+S*K)], ncol = K)
B <- matrix(xx[(y+1):(y+J*K)], ncol = K)
# creating random generated values from om(s,k)
A <- matrix(0,nrow=I,ncol=K)
for(i in 1:I){
for(k in 1:K){
for(n in 1:K){
a <- 1
for(s in 1:S){
if(exp(logitA[i,s]*(omsk[s,n]-omsk[s,k]))==Inf){
a <- a*1e6
}else{ a <- a*exp(logitA[i,s]*(omsk[s,n]-omsk[s,k]))} }
A[i,k] <- A[i,k] + a }
A[i,k] <- 1/A[i,k] } }
A <- A/rowSums(A)
if(!is.null(cB)) {
mincb <- min(cB, na.rm=TRUE)
maxcb <- max(cB, na.rm=TRUE)
if(maxcb > 1){
bl <- list()
for(i in 2:max(cB, na.rm=TRUE)){
bl[[i-1]] <- which(cB==i, arr.ind=TRUE)
bl[[i-1]] <- bl[[i-1]][order(bl[[i-1]][,1],bl[[i-1]][,2]),]
}
#this code forces the corresponding betasjk's to be equal
h <- 0
b <- 0
e <- 0
for(j in 1:(maxcb-1)) {
b[j] <- nrow(bl[[j]])
for(i in 1:(b[j])){ e <- e+1
h[e]<- B[bl[[j]][i,1],bl[[j]][i,2]]- xx[j]
}
}
}
#this code forces the fixed constraints to be preserved.
if(mincb < 1) {
posFb <- which(cB<1, arr.ind = T)
if(maxcb > 1){
h[(length(h)+1):(length(h)+ nrow(posFb))] <-(B[posFb] - cB[posFb])
}else{
h <- 0
h[1:nrow(posFb)] <- (B[posFb] - cB[posFb])
}
}
}
if(is.null(cB)){
h <- c(colSums(B) - rep(1,(K)), xx[(y+J*K +1):(y+J*K+S)])
}else{
h[(length(h)+1):(length(h)+ K + S)] <- c((colSums(B) - rep(1,(K))), xx[(y+J*K +1):(y+J*K+S)])
}
h
}
#=========================================================================
# hin function
#=========================================================================
hin <- function(xx,
obj,
cB,
K,
I,
J,
logitA,
S){
y <- length(xx)- (J*K + S*K )
h <- xx[(y+1):(y+J*K)] + 1e-7
h
}
#===========================================================================
#===========================================================================
if(!is.null(cB)){
maxcb <- max(cB, na.rm=TRUE)
if(maxcb > 1) { #there are equality parameters in cB
#list containing in each element the positions of the equality parameters
#of matrix B(B) that are equal among them.
bl <- list()
for(i in 2:max(cB, na.rm=TRUE)){
bl[[i-1]] <- which(cB==i, arr.ind=TRUE)
bl[[i-1]] <- bl[[i-1]][
order(bl[[i-1]][,1],bl[[i-1]][,2]),]}
m <- sum(sapply(bl, function(x) 1))
a <- rep(0,m)
} else { m <- 0
a <- rep(0,m) }
} else { m <- 0
a <- rep(0,m) }
x0 <- c(a, as.vector(B), as.vector(omsk))
# finding the mle estimates
itmax.ala <- round(.1*itmax.ide)
itmax.opt <- round(.9*itmax.ide)
xab <- constrOptim.nl(par = x0,
fn = mw,
cB = cB,
logitA = logitA,
obj = obj,
K = K,
I = I,
J = J,
S = S,
heq = heq,
hin = hin,
control.outer=list(trace=trace.lba,
itmax=itmax.ala),
control.optim=list(maxit=itmax.opt))
y <- length(xab$par)- (J * K + S * K )
omsk <- matrix(xab$par[(y+J*K +1):(y+J*K+S*K)], ncol = K)
B <- matrix(xab$par[(y+1):(y+J*K)], ncol = K)
A <- matrix(0,nrow=I,ncol=K)
for(i in 1:I){
for(k in 1:K){
for(n in 1:K){
a <- 1
for(s in 1:S){
if(exp(logitA[i,s]*(omsk[s,n]-omsk[s,k]))==Inf){
a <- a*1e6
}else{ a <- a*exp(logitA[i,s]*(omsk[s,n]-omsk[s,k]))} }
A[i,k] <- A[i,k] + a }
A[i,k] <- 1/A[i,k] } }
pimais <- rowSums(obj)/sum(obj)
P <- obj/rowSums(obj)
aux_pk <- pimais %*% A # budget proportions
pk <- matrix(aux_pk[order(aux_pk,
decreasing = TRUE)],
ncol = dim(aux_pk)[2])
A <- matrix(A[,order(aux_pk,
decreasing = TRUE)],
ncol = dim(aux_pk)[2])
B <- matrix(B[,order(aux_pk,
decreasing = TRUE)],
ncol = dim(aux_pk)[2])
P <- obj/rowSums(obj)
colnames(pk) <- colnames(A) <- colnames(B) <- paste('LB', 1:K, sep='')
pij <- A %*% t(B) # expected budget
rownames(A) <- rownames(P)
rownames(B) <- colnames(P)
residual <- P - pij
val_func <- xab$value
iter_ide <- round(as.numeric(xab$counts[2]/xab$outer.iterations)) + xab$outer.iterations
rescB <- rescaleB(obj,
A,
B)
colnames(rescB) <- colnames(B)
rownames(rescB) <- rownames(B)
results <- list(P,
pij,
residual,
A,
B,
rescB,
pk,
val_func,
iter_ide,
omsk)
names(results) <- c('P',
'pij',
'residual',
'A',
'B',
'rescB',
'pk',
'val_func',
'iter_ide',
'omsk')
class(results) <- c("lba.mle.logit",
"lba.mle")
invisible(results)
}
lba.mle.logit.B <- function(obj ,
A ,
cA ,
logitB ,
psitk ,
K ,
T ,
itmax.ide,
trace.lba,
...)
{
#The matrices caki and cbjk contain the constraint values of the mixing
#parameters and latent components respectively.
#For fixed value constraint use the values at respective location in the matrix.
#For aki, all row sums must be less or equal 1. For bjk all column sums must be
#less or equal 1.
#For equality value constraint use whole numbers starting form 2. Same numbers
#at diffferent locations of the matrix show equal parameters.
#USE NA TO FILL UP THE REST OF THE MATRICES.
I <- nrow(obj) # row numbers of data matrix
J <- ncol(obj) # column numbers of data matrix
#-----------------------------------------------------------------------------
if (is.null(psitk)) {
psitk <- matrix(rnorm(T*K), ncol = K) }
#BUILDING cA
if(!is.null(cA) & is.null(A)){
A <- constrainAB(cA)
} else { if(is.null(cA) & is.null(A)){
#creating random generated values for beta(j|k)
A <- rdirich(I, runif(K)) } }
#============================================================================
#----------------------------------------------------------------------------
#second case multinomial logit constraints on latent budgets, but not
#in the mixing parameters.
#---------------------------------------------------------------------------
mw <- function(xx,
obj,
cA,
K,
I,
J,
logitB,
T){
y <- length(xx)- (T*K+I*K)
A <- matrix(xx[(y+1):(y+I*K)], ncol = K)
psitk <- matrix(xx[(y+I*K+1):(y+I*K+T*K)], ncol = K)
#creating B from psitk
B <- matrix(0,nrow=J,ncol=K)
for(j in 1:J){
for(k in 1:K){
for(n in 1:J){
a <- 1
for(t in 1:T){
if(exp(psitk[t,k]*(logitB[n,t]-logitB[j,t]))==Inf) {
a <- a*1e6
}else{ a <- a*exp(psitk[t,k]*(logitB[n,t]-logitB[j,t])) } }
B[j,k] <- B[j,k] + a }
B[j,k] <- 1/B[j,k] } }
B <- t(t(B)/colSums(B))
# B is a JxK matrix
if(!is.null(cA)) {
minca <- min(cA, na.rm=TRUE)
if(minca < 1){
posFa <- which(cA<1, arr.ind = T)
A[posFa] <- cA[posFa] } }
pjki <- rep(0,I*J*K) #this will become pijk/pi+ see van der Ark page 80
m <- 0
# this makes i=1,j=1,k=1; i=2,j=1,k=1; i=3,j=1,k=1...; i=I,j=1,k=1 and so on.
for(k in 1:K) for(j in 1:J) for(i in 1:I) { m <- m +1
pjki[m] <- A[i,k]*B[j,k] }
pip <- rowSums(obj)/sum(obj)
pjki[pjki <=0] <- 1e-7
mi <- matrix(pjki, nrow=I)
pijk <- mi*pip #this is pijk see van der Ark page 80
nijk <- as.vector(pijk*sum(obj))
mw <- -sum(nijk * log(pjki)) #-loglikelihood function
}
#============================================================================
# heq function
#============================================================================
heq <- function(xx,
obj,
cA,
K,
I,
J,
logitB,
T){
# construction of matrices A(A) and B(B) from input vector x
y <- length(xx)- (T*K+I*K)
A <- matrix(xx[(y+1):(y+I*K)], ncol = K)
psitk <- matrix(xx[(y+I*K+1):(y+I*K+T*K)], ncol = K)
B <- matrix(0,nrow=J,ncol=K)
for(j in 1:J){
for(k in 1:K){
for(n in 1:J){
a <- 1
for(t in 1:T){
if(exp(psitk[t,k]*(logitB[n,t]-logitB[j,t]))==Inf) {
a <- a*1e6
}else{ a <- a*exp(psitk[t,k]*(logitB[n,t]-logitB[j,t])) } }
B[j,k] <- B[j,k] + a }
B[j,k] <- 1/B[j,k] } }
B <- t(t(B)/colSums(B))
# B is a JxK matrix
if(!is.null(cA)) {
minca <- min(cA, na.rm=TRUE)
maxca <- max(cA, na.rm=TRUE)
if(maxca > 1){
al <- list()
for(i in 2:max(cA, na.rm=TRUE)){
al[[i-1]] <- which(cA==i, arr.ind=TRUE)
al[[i-1]] <- al[[i-1]][
order(al[[i-1]][,1],al[[i-1]][,2]),] }
#this code forces the corresponding alphasik's to be equal
h <- 0
a <- 0
e <- 0
for(j in 1:(maxca-1)) {
a[j] <- nrow(al[[j]])
for(i in 1:(a[j])){ e <- e+1
h[e]<-
A[al[[j]][i,1],al[[j]][i,2]]- xx[j] } } }
#this code forces the fixed constraints to be preserved.
if(minca < 1) {
posFa <- which(cA<1, arr.ind = T)
if(maxca > 1){
h[(length(h)+1):(length(h)+ nrow(posFa))] <-
(A[posFa] - cA[posFa])
}else{
h <- 0
h[1:nrow(posFa)] <- (A[posFa] - cA[posFa]) } } }
if(is.null(cA)){
h <- c(rowSums(A) - rep(1,I))
}else{
h[(length(h)+1):(length(h)+ I)] <- c(rowSums(A) - rep(1,I)) }
h
}
#=========================================================================
# hin function
#=========================================================================
hin <- function(xx,
obj,
cA,
K,
I,
J,
logitB,
T){
y <- length(xx)- (I*K + T*K )
h <- xx[(y+1):(y+I*K)] + 1e-7
h
}
#===========================================================================
#===========================================================================
if(!is.null(cA)) { #there are equality parameters in cA
#list containing in each element the positions of the equality parameters
#of matrix A (A) that are equal among them.
maxca <- max(cA, na.rm=TRUE)
if(maxca > 1){
al <- list()
for(i in 2:max(cA, na.rm=TRUE)){
al[[i-1]] <- which(cA==i, arr.ind=TRUE)
al[[i-1]] <- al[[i-1]][order(al[[i-1]][,1],al[[i-1]][,2]),] }
m <- sum(sapply(al, function(x) 1))
a <- rep(0,m)
} else { m <- 0
a <- rep(0,m) }
} else { m <- 0
a <- rep(0,m) }
x0 <- c(a, as.vector(A), as.vector(psitk))
# finding the ls estimates
itmax.ala <- round(.1*itmax.ide)
itmax.opt <- round(.9*itmax.ide)
xab <- constrOptim.nl(par = x0,
fn = mw,
cA = cA,
logitB = logitB,
obj = obj,
K = K,
I = I,
J = J,
T = T,
heq = heq,
hin = hin,
control.outer=list(trace=trace.lba,
itmax=itmax.ala),
control.optim=list(maxit=itmax.opt))
#
y <- length(xab$par)- (T*K+I*K)
psitk <- matrix(xab$par[(y+I*K+1):(y+I*K+T*K)], ncol = K)
A <- matrix(xab$par[(y+1):(y+I*K)], ncol = K)
B <- matrix(0,nrow=J,ncol=K)
for(j in 1:J){
for(k in 1:K){
for(n in 1:J){
a <- 1
for(t in 1:T){
if(exp(psitk[t,k]*(logitB[n,t]-logitB[j,t]))==Inf) {
a <- a*1e6
}else{ a <- a*exp(psitk[t,k]*(logitB[n,t]-logitB[j,t])) } }
B[j,k] <- B[j,k] + a }
B[j,k] <- 1/B[j,k] } }
B <- t(t(B)/colSums(B))
# B is a JxK matrix
pimais <- rowSums(obj)/sum(obj)
P <- obj/rowSums(obj)
aux_pk <- pimais %*% A # budget proportions
pk <- matrix(aux_pk[order(aux_pk,
decreasing = TRUE)],
ncol = dim(aux_pk)[2])
A <- matrix(A[,order(aux_pk,
decreasing = TRUE)],
ncol = dim(aux_pk)[2])
B <- matrix(B[,order(aux_pk,
decreasing = TRUE)],
ncol = dim(aux_pk)[2])
rownames(A) <- rownames(P)
rownames(B) <- colnames(P)
colnames(pk) <- colnames(A) <- colnames(B) <- paste('LB',1:K,sep='')
pij <- A %*% t(B) # expected budget
residual <- P - pij
val_func <- xab$value
iter_ide <- round(as.numeric(xab$counts[2])/xab$outer.iterations) + xab$outer.iterations
rescB <- rescaleB(obj,
A,
B)
colnames(rescB) <- colnames(B)
rownames(rescB) <- rownames(B)
results <- list(P,
pij,
residual,
A,
B,
rescB,
pk,
val_func,
iter_ide,
psitk)
names(results) <- c('P',
'pij',
'residual',
'A',
'B',
'rescB',
'pk',
'val_func',
'iter_ide',
'psitk')
class(results) <- c("lba.mle.logit",
"lba.mle")
invisible(results)
}
| /R/lba.mle.logit.R | no_license | ivanalaman/lba | R | false | false | 26,154 | r | lba.mle.logit <- function(obj ,
A ,
B ,
K ,
cA ,
cB ,
logitA ,
logitB ,
omsk ,
psitk ,
S ,
T ,
itmax.ide ,
trace.lba ,
...)
{
#===========================================================================
#Multinomial logit on both mixing parameters and latent components
#===========================================================================
if(all(c(!is.null(logitA), !is.null(logitB)))){
results <- lba.mle.logit.AB(obj = obj,
logitA = logitA,
logitB = logitB,
omsk = omsk,
psitk = psitk,
K = K,
S = S,
T = T,
itmax.ide = itmax.ide,
trace.lba = trace.lba,
...)
} else
#===========================================================================
#Multinomial logit on mixing parameters only
#===========================================================================
if(all(c(!is.null(logitA), is.null(logitB)))){
results <- lba.mle.logit.A(obj = obj,
B = B,
cB = cB,
logitA = logitA, #design matrix for row covariates IxS
omsk = omsk,
K = K,
S = S,
itmax.ide = itmax.ide,
trace.lba = trace.lba,
...)
} else {
#===========================================================================
#Multinomial logit on latent components only
#===========================================================================
results <- lba.mle.logit.B(obj = obj,
A = A,
cA = cA,
logitB = logitB, #design matrix for row covariates IxS
psitk = psitk,
K = K,
T = T,
itmax.ide = itmax.ide,
trace.lba = trace.lba,
...)
}
}
######################## AUXILIAR FUNCTIONS ##############################
lba.mle.logit.AB <- function(obj ,
logitA ,
logitB ,
omsk ,
psitk ,
K ,
S ,
T ,
itmax.ide,
trace.lba,
...)
{
I <- nrow(obj) # row numbers of data matrix
J <- ncol(obj) # column numbers of data matrix
#-----------------------------------------------------------------------------
if (is.null(omsk)) {
omsk <- matrix(c(rep(0,S), rnorm(S*(K-1))), ncol = K) }
if (is.null(psitk)) {
psitk <- matrix(rnorm(T*K), ncol = K) }
#----------------------------------------------------------------------------
#third case multinomial logit constraints on mixing parameters, and
#on the latent budgets.
#---------------------------------------------------------------------------
mw <- function(xx,
obj,
K,
I,
J,
logitA,
logitB,
S,
T){
omsk <- matrix(xx[(1):(S*K)], ncol = K)
psitk <- matrix(xx[(S*K+1):(S*K+T*K)], ncol = K)
# creating A from omsk (om(s,k) )
A <- matrix(0,nrow=I,ncol=K)
for(i in 1:I){
for(k in 1:K){
for(n in 1:K){
a <- 1
for(s in 1:S){
if(exp(logitA[i,s]*(omsk[s,n]-omsk[s,k]))==Inf){
a <- a*1e6
}else{ a <- a*exp(logitA[i,s]*(omsk[s,n]-omsk[s,k]))} }
A[i,k] <- A[i,k] + a }
A[i,k] <- 1/A[i,k] } }
A <- A/rowSums(A)
# A is a IxK matrix
#creating B from psitk
B <- matrix(0,nrow=J,ncol=K)
for(j in 1:J){
for(k in 1:K){
for(n in 1:J){
a <- 1
for(t in 1:T){
if(exp(psitk[t,k]*(logitB[n,t]-logitB[j,t]))==Inf) {
a <- a*1e6
}else{ a <- a*exp(psitk[t,k]*(logitB[n,t]-logitB[j,t])) } }
B[j,k] <- B[j,k] + a }
B[j,k] <- 1/B[j,k] } }
B <- t(t(B)/colSums(B))
# B is a JxK matrix
pjki <- rep(0,I*J*K) #this will become pijk/pi+ see van der Ark page 80
m <- 0
# this makes i=1,j=1,k=1; i=2,j=1,k=1; i=3,j=1,k=1...; i=I,j=1,k=1 and so on.
for(k in 1:K) for(j in 1:J) for(i in 1:I) { m <- m +1
pjki[m] <- A[i,k]*B[j,k] }
pip <- rowSums(obj)/sum(obj)
pjki[pjki <=0] <- 1e-7
mi <- matrix(pjki, nrow=I)
pijk <- mi*pip #this is pijk see van der Ark page 80
nijk <- as.vector(pijk*sum(obj))
mw <- -sum(nijk * log(pjki)) #-loglikelihood function
}
x0 <- c(as.vector(omsk), as.vector(psitk))
# finding the ls estimates
xab <- optim(par = x0,
fn = mw,
obj = obj,
K = K,
I = I,
J = J,
logitA = logitA,
logitB = logitB,
S = S,
T = T,
method = 'BFGS',
control = list(trace=trace.lba,
maxit=itmax.ide))
omsk <- matrix(xab$par[1:(S*K)], ncol = K)
psitk <- matrix(xab$par[(S*K+1):(S*K+T*K)], ncol = K)
#creating A from omsk
A <- matrix(0,nrow=I,ncol=K)
for(i in 1:I){
for(k in 1:K){
for(n in 1:K){
a <- 1
for(s in 1:S){
if(exp(logitA[i,s]*(omsk[s,n]-omsk[s,k]))==Inf){
a <- a*1e6
}else{ a <- a*exp(logitA[i,s]*(omsk[s,n]-omsk[s,k]))} }
A[i,k] <- A[i,k] + a }
A[i,k] <- 1/A[i,k] } }
A <- A/rowSums(A)
#creating B from psitk
B <- matrix(0,nrow=J,ncol=K)
for(j in 1:J){
for(k in 1:K){
for(n in 1:J){
a <- 1
for(t in 1:T){
if(exp(psitk[t,k]*(logitB[n,t]-logitB[j,t]))==Inf) {
a <- a*1e6
}else{ a <- a*exp(psitk[t,k]*(logitB[n,t]-logitB[j,t])) } }
B[j,k] <- B[j,k] + a }
B[j,k] <- 1/B[j,k] } }
B <- t(t(B)/colSums(B))
# B is a JxK matrix
B <- t(t(B)/colSums(B))
# B is a JxK matrix
colnames(A) <- colnames(B) <- colnames(omsk) <- colnames(psitk) <- paste('LB',1:K,sep='')
pimais <- rowSums(obj)/sum(obj)
aux_pk <- pimais %*% A # budget proportions
pk <- matrix(aux_pk[order(aux_pk,
decreasing = TRUE)],
ncol = dim(aux_pk)[2])
A <- matrix(A[,order(aux_pk,
decreasing = TRUE)],
ncol = dim(aux_pk)[2])
B <- matrix(B[,order(aux_pk,
decreasing = TRUE)],
ncol = dim(aux_pk)[2])
P <- obj/rowSums(obj)
rownames(A) <- rownames(P)
rownames(B) <- colnames(P)
colnames(pk) <- colnames(A) <- colnames(B) <- paste('LB', 1:K, sep='')
pij <- A %*% t(B) # expected budget
residual <- P - pij
val_func <- xab$value
iter_ide <- as.numeric(xab$counts[2])
rescB <- rescaleB(obj,
A,
B)
colnames(rescB) <- colnames(B)
rownames(rescB) <- rownames(B)
results <- list(P,
pij,
residual,
A,
B,
rescB,
pk,
val_func,
iter_ide,
omsk,
psitk)
names(results) <- c('P',
'pij',
'residual',
'A',
'B',
'rescB',
'pk',
'val_func',
'iter_ide',
'omsk',
'psitk')
class(results) <- c("lba.mle.logit",
"lba.mle")
invisible(results)
}
lba.mle.logit.A <- function(obj ,
B ,
cB ,
logitA ,
omsk ,
K ,
S ,
itmax.ide,
trace.lba,
...)
{
#The matrices caki and cbjk contain the constraint values of the mixing
#parameters and latent components respectively.
#For fixed value constraint use the values at respective location in the matrix.
#For aki, all row sums must be less or equal 1. For bjk all column sums must be
#less or equal 1.
#For equality value constraint use whole numbers starting form 2. Same numbers
#at diffferent locations of the matrix show equal parameters.
#USE NA TO FILL UP THE REST OF THE MATRICES.
I <- nrow(obj) # row numbers of data matrix
J <- ncol(obj) # column numbers of data matrix
#-----------------------------------------------------------------------------
if (is.null(omsk)) {
omsk <- matrix(c(rep(0,S), rnorm(S*(K-1))), ncol = K) }
#BUILDING B
if(!is.null(cB) & is.null(B)){
B <- t(constrainAB(t(cB)))
} else { if(is.null(cB) & is.null(B)){
#creating random generated values for beta(j|k)
B <- t(rdirich(K, runif(J))) } }
#============================================================================
#============================================================================
#----------------------------------------------------------------------------
#first case multinomial logit constraints on mixing parameters, but not
#in the latent budgets.
#---------------------------------------------------------------------------
mw <- function(xx,
obj,
cB,
K,
I,
J,
logitA,
S){
y <- length(xx)- (J*K+S*K)
omsk <- matrix(xx[(y+J*K +1):(y+J*K+S*K)], ncol = K)
B <- matrix(xx[(y+1):(y+J*K)], ncol = K)
#creating A from omsk
A <- matrix(0,nrow=I,ncol=K)
for(i in 1:I){
for(k in 1:K){
for(n in 1:K){
a <- 1
for(s in 1:S){
if(exp(logitA[i,s]*(omsk[s,n]-omsk[s,k]))==Inf){
a <- a*1e6
}else{ a <- a*exp(logitA[i,s]*(omsk[s,n]-omsk[s,k]))} }
A[i,k] <- A[i,k] + a }
A[i,k] <- 1/A[i,k] } }
A <- A/rowSums(A)
# A is a IxK matrix
if(!is.null(cB)) {
mincb <- min(cB, na.rm=TRUE)
if(mincb < 1){
posFb <- which(cB<1, arr.ind = T)
B[posFb] <- cB[posFb] } }
pjki <- rep(0,I*J*K) #this will become pijk/pi+ see van der Ark page 80
m <- 0
# this makes i=1,j=1,k=1; i=2,j=1,k=1; i=3,j=1,k=1...; i=I,j=1,k=1 and so on.
for(k in 1:K) for(j in 1:J) for(i in 1:I) { m <- m +1
pjki[m] <- A[i,k]*B[j,k] }
pip <- rowSums(obj)/sum(obj)
pjki[pjki <=0] <- 1e-7
mi <- matrix(pjki, nrow=I)
pijk <- mi*pip #this is pijk see van der Ark page 80
nijk <- as.vector(pijk*sum(obj))
mw <- -sum(nijk * log(pjki)) #-loglikelihood function
}
#============================================================================
# heq function
#============================================================================
heq <- function(xx,
obj,
cB,
K,
I,
J,
logitA,
S){
# construction of matrices omsk and B(B) from input vector x
y <- length(xx)- (J * K + S * K )
omsk <- matrix(xx[(y+J*K +1):(y+J*K+S*K)], ncol = K)
B <- matrix(xx[(y+1):(y+J*K)], ncol = K)
# creating random generated values from om(s,k)
A <- matrix(0,nrow=I,ncol=K)
for(i in 1:I){
for(k in 1:K){
for(n in 1:K){
a <- 1
for(s in 1:S){
if(exp(logitA[i,s]*(omsk[s,n]-omsk[s,k]))==Inf){
a <- a*1e6
}else{ a <- a*exp(logitA[i,s]*(omsk[s,n]-omsk[s,k]))} }
A[i,k] <- A[i,k] + a }
A[i,k] <- 1/A[i,k] } }
A <- A/rowSums(A)
if(!is.null(cB)) {
mincb <- min(cB, na.rm=TRUE)
maxcb <- max(cB, na.rm=TRUE)
if(maxcb > 1){
bl <- list()
for(i in 2:max(cB, na.rm=TRUE)){
bl[[i-1]] <- which(cB==i, arr.ind=TRUE)
bl[[i-1]] <- bl[[i-1]][order(bl[[i-1]][,1],bl[[i-1]][,2]),]
}
#this code forces the corresponding betasjk's to be equal
h <- 0
b <- 0
e <- 0
for(j in 1:(maxcb-1)) {
b[j] <- nrow(bl[[j]])
for(i in 1:(b[j])){ e <- e+1
h[e]<- B[bl[[j]][i,1],bl[[j]][i,2]]- xx[j]
}
}
}
#this code forces the fixed constraints to be preserved.
if(mincb < 1) {
posFb <- which(cB<1, arr.ind = T)
if(maxcb > 1){
h[(length(h)+1):(length(h)+ nrow(posFb))] <-(B[posFb] - cB[posFb])
}else{
h <- 0
h[1:nrow(posFb)] <- (B[posFb] - cB[posFb])
}
}
}
if(is.null(cB)){
h <- c(colSums(B) - rep(1,(K)), xx[(y+J*K +1):(y+J*K+S)])
}else{
h[(length(h)+1):(length(h)+ K + S)] <- c((colSums(B) - rep(1,(K))), xx[(y+J*K +1):(y+J*K+S)])
}
h
}
#=========================================================================
# hin function
#=========================================================================
hin <- function(xx,
obj,
cB,
K,
I,
J,
logitA,
S){
y <- length(xx)- (J*K + S*K )
h <- xx[(y+1):(y+J*K)] + 1e-7
h
}
#===========================================================================
#===========================================================================
if(!is.null(cB)){
maxcb <- max(cB, na.rm=TRUE)
if(maxcb > 1) { #there are equality parameters in cB
#list containing in each element the positions of the equality parameters
#of matrix B(B) that are equal among them.
bl <- list()
for(i in 2:max(cB, na.rm=TRUE)){
bl[[i-1]] <- which(cB==i, arr.ind=TRUE)
bl[[i-1]] <- bl[[i-1]][
order(bl[[i-1]][,1],bl[[i-1]][,2]),]}
m <- sum(sapply(bl, function(x) 1))
a <- rep(0,m)
} else { m <- 0
a <- rep(0,m) }
} else { m <- 0
a <- rep(0,m) }
x0 <- c(a, as.vector(B), as.vector(omsk))
# finding the mle estimates
itmax.ala <- round(.1*itmax.ide)
itmax.opt <- round(.9*itmax.ide)
xab <- constrOptim.nl(par = x0,
fn = mw,
cB = cB,
logitA = logitA,
obj = obj,
K = K,
I = I,
J = J,
S = S,
heq = heq,
hin = hin,
control.outer=list(trace=trace.lba,
itmax=itmax.ala),
control.optim=list(maxit=itmax.opt))
y <- length(xab$par)- (J * K + S * K )
omsk <- matrix(xab$par[(y+J*K +1):(y+J*K+S*K)], ncol = K)
B <- matrix(xab$par[(y+1):(y+J*K)], ncol = K)
A <- matrix(0,nrow=I,ncol=K)
for(i in 1:I){
for(k in 1:K){
for(n in 1:K){
a <- 1
for(s in 1:S){
if(exp(logitA[i,s]*(omsk[s,n]-omsk[s,k]))==Inf){
a <- a*1e6
}else{ a <- a*exp(logitA[i,s]*(omsk[s,n]-omsk[s,k]))} }
A[i,k] <- A[i,k] + a }
A[i,k] <- 1/A[i,k] } }
pimais <- rowSums(obj)/sum(obj)
P <- obj/rowSums(obj)
aux_pk <- pimais %*% A # budget proportions
pk <- matrix(aux_pk[order(aux_pk,
decreasing = TRUE)],
ncol = dim(aux_pk)[2])
A <- matrix(A[,order(aux_pk,
decreasing = TRUE)],
ncol = dim(aux_pk)[2])
B <- matrix(B[,order(aux_pk,
decreasing = TRUE)],
ncol = dim(aux_pk)[2])
P <- obj/rowSums(obj)
colnames(pk) <- colnames(A) <- colnames(B) <- paste('LB', 1:K, sep='')
pij <- A %*% t(B) # expected budget
rownames(A) <- rownames(P)
rownames(B) <- colnames(P)
residual <- P - pij
val_func <- xab$value
iter_ide <- round(as.numeric(xab$counts[2]/xab$outer.iterations)) + xab$outer.iterations
rescB <- rescaleB(obj,
A,
B)
colnames(rescB) <- colnames(B)
rownames(rescB) <- rownames(B)
results <- list(P,
pij,
residual,
A,
B,
rescB,
pk,
val_func,
iter_ide,
omsk)
names(results) <- c('P',
'pij',
'residual',
'A',
'B',
'rescB',
'pk',
'val_func',
'iter_ide',
'omsk')
class(results) <- c("lba.mle.logit",
"lba.mle")
invisible(results)
}
lba.mle.logit.B <- function(obj ,
A ,
cA ,
logitB ,
psitk ,
K ,
T ,
itmax.ide,
trace.lba,
...)
{
#The matrices caki and cbjk contain the constraint values of the mixing
#parameters and latent components respectively.
#For fixed value constraint use the values at respective location in the matrix.
#For aki, all row sums must be less or equal 1. For bjk all column sums must be
#less or equal 1.
#For equality value constraint use whole numbers starting form 2. Same numbers
#at diffferent locations of the matrix show equal parameters.
#USE NA TO FILL UP THE REST OF THE MATRICES.
I <- nrow(obj) # row numbers of data matrix
J <- ncol(obj) # column numbers of data matrix
#-----------------------------------------------------------------------------
if (is.null(psitk)) {
psitk <- matrix(rnorm(T*K), ncol = K) }
#BUILDING cA
if(!is.null(cA) & is.null(A)){
A <- constrainAB(cA)
} else { if(is.null(cA) & is.null(A)){
#creating random generated values for beta(j|k)
A <- rdirich(I, runif(K)) } }
#============================================================================
#----------------------------------------------------------------------------
#second case multinomial logit constraints on latent budgets, but not
#in the mixing parameters.
#---------------------------------------------------------------------------
mw <- function(xx,
obj,
cA,
K,
I,
J,
logitB,
T){
y <- length(xx)- (T*K+I*K)
A <- matrix(xx[(y+1):(y+I*K)], ncol = K)
psitk <- matrix(xx[(y+I*K+1):(y+I*K+T*K)], ncol = K)
#creating B from psitk
B <- matrix(0,nrow=J,ncol=K)
for(j in 1:J){
for(k in 1:K){
for(n in 1:J){
a <- 1
for(t in 1:T){
if(exp(psitk[t,k]*(logitB[n,t]-logitB[j,t]))==Inf) {
a <- a*1e6
}else{ a <- a*exp(psitk[t,k]*(logitB[n,t]-logitB[j,t])) } }
B[j,k] <- B[j,k] + a }
B[j,k] <- 1/B[j,k] } }
B <- t(t(B)/colSums(B))
# B is a JxK matrix
if(!is.null(cA)) {
minca <- min(cA, na.rm=TRUE)
if(minca < 1){
posFa <- which(cA<1, arr.ind = T)
A[posFa] <- cA[posFa] } }
pjki <- rep(0,I*J*K) #this will become pijk/pi+ see van der Ark page 80
m <- 0
# this makes i=1,j=1,k=1; i=2,j=1,k=1; i=3,j=1,k=1...; i=I,j=1,k=1 and so on.
for(k in 1:K) for(j in 1:J) for(i in 1:I) { m <- m +1
pjki[m] <- A[i,k]*B[j,k] }
pip <- rowSums(obj)/sum(obj)
pjki[pjki <=0] <- 1e-7
mi <- matrix(pjki, nrow=I)
pijk <- mi*pip #this is pijk see van der Ark page 80
nijk <- as.vector(pijk*sum(obj))
mw <- -sum(nijk * log(pjki)) #-loglikelihood function
}
#============================================================================
# heq function
#============================================================================
heq <- function(xx,
obj,
cA,
K,
I,
J,
logitB,
T){
# construction of matrices A(A) and B(B) from input vector x
y <- length(xx)- (T*K+I*K)
A <- matrix(xx[(y+1):(y+I*K)], ncol = K)
psitk <- matrix(xx[(y+I*K+1):(y+I*K+T*K)], ncol = K)
B <- matrix(0,nrow=J,ncol=K)
for(j in 1:J){
for(k in 1:K){
for(n in 1:J){
a <- 1
for(t in 1:T){
if(exp(psitk[t,k]*(logitB[n,t]-logitB[j,t]))==Inf) {
a <- a*1e6
}else{ a <- a*exp(psitk[t,k]*(logitB[n,t]-logitB[j,t])) } }
B[j,k] <- B[j,k] + a }
B[j,k] <- 1/B[j,k] } }
B <- t(t(B)/colSums(B))
# B is a JxK matrix
if(!is.null(cA)) {
minca <- min(cA, na.rm=TRUE)
maxca <- max(cA, na.rm=TRUE)
if(maxca > 1){
al <- list()
for(i in 2:max(cA, na.rm=TRUE)){
al[[i-1]] <- which(cA==i, arr.ind=TRUE)
al[[i-1]] <- al[[i-1]][
order(al[[i-1]][,1],al[[i-1]][,2]),] }
#this code forces the corresponding alphasik's to be equal
h <- 0
a <- 0
e <- 0
for(j in 1:(maxca-1)) {
a[j] <- nrow(al[[j]])
for(i in 1:(a[j])){ e <- e+1
h[e]<-
A[al[[j]][i,1],al[[j]][i,2]]- xx[j] } } }
#this code forces the fixed constraints to be preserved.
if(minca < 1) {
posFa <- which(cA<1, arr.ind = T)
if(maxca > 1){
h[(length(h)+1):(length(h)+ nrow(posFa))] <-
(A[posFa] - cA[posFa])
}else{
h <- 0
h[1:nrow(posFa)] <- (A[posFa] - cA[posFa]) } } }
if(is.null(cA)){
h <- c(rowSums(A) - rep(1,I))
}else{
h[(length(h)+1):(length(h)+ I)] <- c(rowSums(A) - rep(1,I)) }
h
}
#=========================================================================
# hin function
#=========================================================================
hin <- function(xx,
obj,
cA,
K,
I,
J,
logitB,
T){
y <- length(xx)- (I*K + T*K )
h <- xx[(y+1):(y+I*K)] + 1e-7
h
}
#===========================================================================
#===========================================================================
if(!is.null(cA)) { #there are equality parameters in cA
#list containing in each element the positions of the equality parameters
#of matrix A (A) that are equal among them.
maxca <- max(cA, na.rm=TRUE)
if(maxca > 1){
al <- list()
for(i in 2:max(cA, na.rm=TRUE)){
al[[i-1]] <- which(cA==i, arr.ind=TRUE)
al[[i-1]] <- al[[i-1]][order(al[[i-1]][,1],al[[i-1]][,2]),] }
m <- sum(sapply(al, function(x) 1))
a <- rep(0,m)
} else { m <- 0
a <- rep(0,m) }
} else { m <- 0
a <- rep(0,m) }
x0 <- c(a, as.vector(A), as.vector(psitk))
# finding the ls estimates
itmax.ala <- round(.1*itmax.ide)
itmax.opt <- round(.9*itmax.ide)
xab <- constrOptim.nl(par = x0,
fn = mw,
cA = cA,
logitB = logitB,
obj = obj,
K = K,
I = I,
J = J,
T = T,
heq = heq,
hin = hin,
control.outer=list(trace=trace.lba,
itmax=itmax.ala),
control.optim=list(maxit=itmax.opt))
#
y <- length(xab$par)- (T*K+I*K)
psitk <- matrix(xab$par[(y+I*K+1):(y+I*K+T*K)], ncol = K)
A <- matrix(xab$par[(y+1):(y+I*K)], ncol = K)
B <- matrix(0,nrow=J,ncol=K)
for(j in 1:J){
for(k in 1:K){
for(n in 1:J){
a <- 1
for(t in 1:T){
if(exp(psitk[t,k]*(logitB[n,t]-logitB[j,t]))==Inf) {
a <- a*1e6
}else{ a <- a*exp(psitk[t,k]*(logitB[n,t]-logitB[j,t])) } }
B[j,k] <- B[j,k] + a }
B[j,k] <- 1/B[j,k] } }
B <- t(t(B)/colSums(B))
# B is a JxK matrix
pimais <- rowSums(obj)/sum(obj)
P <- obj/rowSums(obj)
aux_pk <- pimais %*% A # budget proportions
pk <- matrix(aux_pk[order(aux_pk,
decreasing = TRUE)],
ncol = dim(aux_pk)[2])
A <- matrix(A[,order(aux_pk,
decreasing = TRUE)],
ncol = dim(aux_pk)[2])
B <- matrix(B[,order(aux_pk,
decreasing = TRUE)],
ncol = dim(aux_pk)[2])
rownames(A) <- rownames(P)
rownames(B) <- colnames(P)
colnames(pk) <- colnames(A) <- colnames(B) <- paste('LB',1:K,sep='')
pij <- A %*% t(B) # expected budget
residual <- P - pij
val_func <- xab$value
iter_ide <- round(as.numeric(xab$counts[2])/xab$outer.iterations) + xab$outer.iterations
rescB <- rescaleB(obj,
A,
B)
colnames(rescB) <- colnames(B)
rownames(rescB) <- rownames(B)
results <- list(P,
pij,
residual,
A,
B,
rescB,
pk,
val_func,
iter_ide,
psitk)
names(results) <- c('P',
'pij',
'residual',
'A',
'B',
'rescB',
'pk',
'val_func',
'iter_ide',
'psitk')
class(results) <- c("lba.mle.logit",
"lba.mle")
invisible(results)
}
|
#' Multivariate Normal Order-statistics Model.
#'
#' Using MCMC methods to fit the MVNOS model. Please install JAGS 3.X (\url{http://mcmc-jags.sourceforge.net}) and rjags (\url{https://cran.r-project.org/package=rjags}) at first.
#'
#' @param y :an n*k matrix, observed data, each row is an individual's rank of items
#' @param p :number of parameters in MVNOS model
#' @param Z :a n*k*p array of covariates associated with all judges
#' @param beta0 :a 1*p matrix, prior normal distribution mean parameters
#' @param A0 :a p*p matrix, prior normal distribution variance-covariance matrix
#' @param alpha :scalar, prior Wishart distribution degree of freedom
#' @param P :a (k-1)*(k-1) matrix, prior Wishart distribution scale matrix
#' @param BURN_IN_ITERATIONS :number of iterations to burn-in at first
#' @param MAX_ITERATIONS :full sample iterations
#' @param DRAW_CYCLE :reduce the full sample by draw-cycle(e.g. draw every 20th draw from the full sample)
#' @return A list of Gibbs sampling traces
#' @export
#' @author Li Qinglong <liqinglong0830@@163.com>
#' @examples
#' # APA data application
#' # It will take about 10 minutes to run the demo.
#' data(APA)
#' y = freq2case(APA, freq.col = 1)
#' y = 6 - y
#' # number of observed judges
#' n = dim(y)[1]
#' # number of items
#' k = dim(y)[2]
#' # number of parameteros of beta
#' p = k
#' beta0 = rep(0, p)
#' alpha = k + 1
#' A0 = diag(100, ncol = p, nrow = p)
#' P = diag(k + 1, ncol = k - 1, nrow = k - 1)
#' # Construct Z
#' Z = array(0, dim = c(n, k, p))
#' for (j in 1:n)
#' {
#' Z[j, , ] = diag(1, nrow= k, ncol = p)
#' }
#' # Total iterations of Gibbs sampling
#' MAX_ITERATIONS = 10000
#' # Number of iterations to be reduced(burnt in)
#' BURN_IN_ITERATIONS = 1000
#' # Run the model, time consuming
#' # output_list = mvnos.model(y, p, Z, beta0, A0, alpha, P,
#' # MAX_ITERATIONS = MAX_ITERATIONS, BURN_IN_ITERATIONS = BURN_IN_ITERATIONS)
#' @references Yu, P. L. H. (2000). Bayesian analysis of order-statistics models for ranking data. Psychometrika, 65(3):281-299.
mvnos.model <- function(y, p, Z, beta0 = NULL, A0 = NULL, alpha = NULL, P = NULL,
BURN_IN_ITERATIONS = 1000, MAX_ITERATIONS = 10000, DRAW_CYCLE = 20)
{
# Author:Li Qinglong
# Input:
# y :an n*k matrix, observed data, each row is an individual's rank of items
# p :number of parameters in MVNOS model
# Z :a n*k*p array of covariates associated with all judges
# beta0 :a 1*p matrix, prior normal distribution mean parameters
# A0 :a p*p matrix, prior normal distribution variance-covariance matrix
# alpha :singular, prior Wishart distribution degree of freedom
# P :a (k-1)*(k-1) matrix, prior Wishart distribution scale matrix
# Output:
# A list of Gibbs sampling trace
# require(rjags)
# Initialization
# Prior distribution parameters
# Default ones
item_name = colnames(y)
y = as.matrix(y)
names(y) = NULL
n = dim(y)[1] # number of individuls
k = dim(y)[2] # number of items
if (is.null(beta0)) beta0 = rep(0, p)
if (is.null(A0)) A0 = diag(100, ncol = p, nrow = p)
if (any(dim(A0) != c(p, p))) message("A0 shouble a p * p matrix.")
if (is.null(alpha)) alpha = k + 1
if (is.null(P)) P = diag(k + 1, ncol = k - 1, nrow = k - 1)
if (any(dim(P) != c(k - 1, k - 1))) message("P shouble a (k-1) * (k-1) matrix.")
X = array(0, dim = c(n, k - 1, p))
for (j in 1:n)
{
Zk = Z[j, k,]
X[j, , ] = t(t(Z[j, 1:(k-1), ]) - Z[j, k, ])
}
#Starting value of w(Standardized rank score)
yk = matrix(rep(y[, k], k), ncol = k)
w = (y - yk) / sqrt((k ^ 2 - 1) / 12)
##################################################################
# Start to using JAGS
# Total iterates of Gibbs sampling
MAX_ITERATIONS = MAX_ITERATIONS
# Number of iterates to be reduced(burnt in)
BURN_IN_ITERATIONS = BURN_IN_ITERATIONS
data <- list(X = X, y = y, n = n, p = p, k = k, alpha = alpha, beta0 = beta0, A0 = A0, P = P)
init <- list(w = w)
init$w[, k] = NA
# JAGS code
# When p=1, beta ~ dnorm(beta0, A0)
if (p == 1)
{
strModelCode = "
var X[n, k - 1, p], bounds[n, k - 1, 2], beta[p]
data
{
for(i in 1:n)
{
for(j in 1:(k-1))
{
ones[i, j] <- 1
}
}
lower <- -1e+5
upper <- 1e+5
}
model
{
for (i in 1:n)
{
for (j in 1:(k-1))
{
bounds[i, j, 1] <- equals(y[i,j], 1) * lower + inprod(w[i, ], equals(y[i, ], y[i, j] - 1))
bounds[i, j, 2] <- equals(y[i,j], k) * upper + inprod(w[i, ], equals(y[i, ], y[i, j] + 1))
ones[i, j] ~ dinterval(w[i, j], bounds[i, j, ])
}
w[i, 1:(k-1)] ~ dmnorm(X[i, , ] * beta, G)
w[i, k] <- 0
}
beta ~ dnorm(beta0, A0)
G ~ dwish(P, alpha)
Sigma <- inverse(G)
}"
} else
{
strModelCode = "
var X[n, k - 1, p], bounds[n, k - 1, 2], beta[p]
data
{
for(i in 1:n)
{
for(j in 1:(k-1))
{
ones[i, j] <- 1
}
}
lower <- -1e+5
upper <- 1e+5
}
model
{
for (i in 1:n)
{
for (j in 1:(k-1))
{
bounds[i, j, 1] <- equals(y[i,j], 1) * lower + inprod(w[i, ], equals(y[i, ], y[i, j] - 1))
bounds[i, j, 2] <- equals(y[i,j], k) * upper + inprod(w[i, ], equals(y[i, ], y[i, j] + 1))
ones[i, j] ~ dinterval(w[i, j], bounds[i, j, ])
}
w[i, 1:(k-1)] ~ dmnorm(X[i, , ] %*% beta, G)
w[i, k] <- 0
}
beta ~ dmnorm(beta0, A0)
G ~ dwish(P, alpha)
Sigma <- inverse(G)
}"
}
jags <- rjags::jags.model(textConnection(strModelCode), data, init)
update(jags, BURN_IN_ITERATIONS)
samp <- rjags::coda.samples(jags, c("beta", "Sigma"), MAX_ITERATIONS)
# End of running JAGS
#################################################################
posterior_data = as.matrix(samp)
draw_index = seq(DRAW_CYCLE, MAX_ITERATIONS, DRAW_CYCLE)
posterior_trace = posterior_data[draw_index, ]
Sigma_trace = posterior_trace[, 1:(k-1)^2]
beta_trace = posterior_trace[, ((k - 1)^2 + 1):((k - 1)^2 + p)]
# Scaled by Sigma11
Sigma11 = Sigma_trace[, 1]
beta_trace = as.matrix(beta_trace / sqrt(Sigma11))
Sigma_trace = Sigma_trace / Sigma11
# Create an output list
output_list = summary_trace(beta_trace, Sigma_trace, item_name = item_name)
return(output_list)
} | /R/mvnos.model.R | no_license | cran/StatMethRank | R | false | false | 6,393 | r | #' Multivariate Normal Order-statistics Model.
#'
#' Using MCMC methods to fit the MVNOS model. Please install JAGS 3.X (\url{http://mcmc-jags.sourceforge.net}) and rjags (\url{https://cran.r-project.org/package=rjags}) at first.
#'
#' @param y :an n*k matrix, observed data, each row is an individual's rank of items
#' @param p :number of parameters in MVNOS model
#' @param Z :a n*k*p array of covariates associated with all judges
#' @param beta0 :a 1*p matrix, prior normal distribution mean parameters
#' @param A0 :a p*p matrix, prior normal distribution variance-covariance matrix
#' @param alpha :scalar, prior Wishart distribution degree of freedom
#' @param P :a (k-1)*(k-1) matrix, prior Wishart distribution scale matrix
#' @param BURN_IN_ITERATIONS :number of iterations to burn-in at first
#' @param MAX_ITERATIONS :full sample iterations
#' @param DRAW_CYCLE :reduce the full sample by draw-cycle(e.g. draw every 20th draw from the full sample)
#' @return A list of Gibbs sampling traces
#' @export
#' @author Li Qinglong <liqinglong0830@@163.com>
#' @examples
#' # APA data application
#' # It will take about 10 minutes to run the demo.
#' data(APA)
#' y = freq2case(APA, freq.col = 1)
#' y = 6 - y
#' # number of observed judges
#' n = dim(y)[1]
#' # number of items
#' k = dim(y)[2]
#' # number of parameteros of beta
#' p = k
#' beta0 = rep(0, p)
#' alpha = k + 1
#' A0 = diag(100, ncol = p, nrow = p)
#' P = diag(k + 1, ncol = k - 1, nrow = k - 1)
#' # Construct Z
#' Z = array(0, dim = c(n, k, p))
#' for (j in 1:n)
#' {
#' Z[j, , ] = diag(1, nrow= k, ncol = p)
#' }
#' # Total iterations of Gibbs sampling
#' MAX_ITERATIONS = 10000
#' # Number of iterations to be reduced(burnt in)
#' BURN_IN_ITERATIONS = 1000
#' # Run the model, time consuming
#' # output_list = mvnos.model(y, p, Z, beta0, A0, alpha, P,
#' # MAX_ITERATIONS = MAX_ITERATIONS, BURN_IN_ITERATIONS = BURN_IN_ITERATIONS)
#' @references Yu, P. L. H. (2000). Bayesian analysis of order-statistics models for ranking data. Psychometrika, 65(3):281-299.
mvnos.model <- function(y, p, Z, beta0 = NULL, A0 = NULL, alpha = NULL, P = NULL,
BURN_IN_ITERATIONS = 1000, MAX_ITERATIONS = 10000, DRAW_CYCLE = 20)
{
# Author:Li Qinglong
# Input:
# y :an n*k matrix, observed data, each row is an individual's rank of items
# p :number of parameters in MVNOS model
# Z :a n*k*p array of covariates associated with all judges
# beta0 :a 1*p matrix, prior normal distribution mean parameters
# A0 :a p*p matrix, prior normal distribution variance-covariance matrix
# alpha :singular, prior Wishart distribution degree of freedom
# P :a (k-1)*(k-1) matrix, prior Wishart distribution scale matrix
# Output:
# A list of Gibbs sampling trace
# require(rjags)
# Initialization
# Prior distribution parameters
# Default ones
item_name = colnames(y)
y = as.matrix(y)
names(y) = NULL
n = dim(y)[1] # number of individuls
k = dim(y)[2] # number of items
if (is.null(beta0)) beta0 = rep(0, p)
if (is.null(A0)) A0 = diag(100, ncol = p, nrow = p)
if (any(dim(A0) != c(p, p))) message("A0 shouble a p * p matrix.")
if (is.null(alpha)) alpha = k + 1
if (is.null(P)) P = diag(k + 1, ncol = k - 1, nrow = k - 1)
if (any(dim(P) != c(k - 1, k - 1))) message("P shouble a (k-1) * (k-1) matrix.")
X = array(0, dim = c(n, k - 1, p))
for (j in 1:n)
{
Zk = Z[j, k,]
X[j, , ] = t(t(Z[j, 1:(k-1), ]) - Z[j, k, ])
}
#Starting value of w(Standardized rank score)
yk = matrix(rep(y[, k], k), ncol = k)
w = (y - yk) / sqrt((k ^ 2 - 1) / 12)
##################################################################
# Start to using JAGS
# Total iterates of Gibbs sampling
MAX_ITERATIONS = MAX_ITERATIONS
# Number of iterates to be reduced(burnt in)
BURN_IN_ITERATIONS = BURN_IN_ITERATIONS
data <- list(X = X, y = y, n = n, p = p, k = k, alpha = alpha, beta0 = beta0, A0 = A0, P = P)
init <- list(w = w)
init$w[, k] = NA
# JAGS code
# When p=1, beta ~ dnorm(beta0, A0)
if (p == 1)
{
strModelCode = "
var X[n, k - 1, p], bounds[n, k - 1, 2], beta[p]
data
{
for(i in 1:n)
{
for(j in 1:(k-1))
{
ones[i, j] <- 1
}
}
lower <- -1e+5
upper <- 1e+5
}
model
{
for (i in 1:n)
{
for (j in 1:(k-1))
{
bounds[i, j, 1] <- equals(y[i,j], 1) * lower + inprod(w[i, ], equals(y[i, ], y[i, j] - 1))
bounds[i, j, 2] <- equals(y[i,j], k) * upper + inprod(w[i, ], equals(y[i, ], y[i, j] + 1))
ones[i, j] ~ dinterval(w[i, j], bounds[i, j, ])
}
w[i, 1:(k-1)] ~ dmnorm(X[i, , ] * beta, G)
w[i, k] <- 0
}
beta ~ dnorm(beta0, A0)
G ~ dwish(P, alpha)
Sigma <- inverse(G)
}"
} else
{
strModelCode = "
var X[n, k - 1, p], bounds[n, k - 1, 2], beta[p]
data
{
for(i in 1:n)
{
for(j in 1:(k-1))
{
ones[i, j] <- 1
}
}
lower <- -1e+5
upper <- 1e+5
}
model
{
for (i in 1:n)
{
for (j in 1:(k-1))
{
bounds[i, j, 1] <- equals(y[i,j], 1) * lower + inprod(w[i, ], equals(y[i, ], y[i, j] - 1))
bounds[i, j, 2] <- equals(y[i,j], k) * upper + inprod(w[i, ], equals(y[i, ], y[i, j] + 1))
ones[i, j] ~ dinterval(w[i, j], bounds[i, j, ])
}
w[i, 1:(k-1)] ~ dmnorm(X[i, , ] %*% beta, G)
w[i, k] <- 0
}
beta ~ dmnorm(beta0, A0)
G ~ dwish(P, alpha)
Sigma <- inverse(G)
}"
}
jags <- rjags::jags.model(textConnection(strModelCode), data, init)
update(jags, BURN_IN_ITERATIONS)
samp <- rjags::coda.samples(jags, c("beta", "Sigma"), MAX_ITERATIONS)
# End of running JAGS
#################################################################
posterior_data = as.matrix(samp)
draw_index = seq(DRAW_CYCLE, MAX_ITERATIONS, DRAW_CYCLE)
posterior_trace = posterior_data[draw_index, ]
Sigma_trace = posterior_trace[, 1:(k-1)^2]
beta_trace = posterior_trace[, ((k - 1)^2 + 1):((k - 1)^2 + p)]
# Scaled by Sigma11
Sigma11 = Sigma_trace[, 1]
beta_trace = as.matrix(beta_trace / sqrt(Sigma11))
Sigma_trace = Sigma_trace / Sigma11
# Create an output list
output_list = summary_trace(beta_trace, Sigma_trace, item_name = item_name)
return(output_list)
} |
#----------------------------------------------------------------------------------------------------
#' @import methods
#' @import TrenaProject
#' @importFrom AnnotationDbi select
#' @import org.Hs.eg.db
#'
#' @title TrenaProjectLymphocyte-class
#'
#' @name TrenaProjectLymphocyte-class
#' @rdname TrenaProjectLymphocyte-class
#' @aliases TrenaProjectLymphocyte
#' @exportClass TrenaProjectLymphocyte
#'
.TrenaProjectLymphocyte <- setClass("TrenaProjectLymphocyte",
contains="TrenaProjectHG38")
#----------------------------------------------------------------------------------------------------
#' Define an object of class TrenaProjectLymphocyte
#'
#' @description
#' Expression, variant and covariate data for the genes of interest (perhaps unbounded) for pre-term birth studies
#'
#' @rdname TrenaProjectLymphocyte-class
#'
#' @export
#'
#' @return An object of the TrenaProjectLymphocyte class
#'
TrenaProjectLymphocyte <- function(quiet=TRUE)
{
genomeName <- "hg38"
directory <- system.file(package="TrenaProjectLymphocyte", "extdata", "geneSets")
geneSet.files <- list.files(directory)
geneSets <- list()
for(file in geneSet.files){
full.path <- file.path(directory, file)
genes <- scan(full.path, sep="\t", what=character(0), quiet=TRUE, comment.char="#")
geneSet.name <- sub(".txt", "", file)
geneSets[[geneSet.name]] <- genes
}
footprintDatabaseNames <- c("lymphoblast_hint_16", "lymphoblast_hint_20", "lymphoblast_wellington_16", "lymphoblast_wellington_20")
dataDirectory <- system.file(package="TrenaProjectLymphocyte", "extdata")
footprintDatabaseHost <- "khaleesi.systemsbiology.net"
footprintDatabasePort <- 5432
covariatesFile <- NA_character_;
stopifnot(file.exists(dataDirectory))
# TODO: this should be hidden by, provided by TrenaProjectHG38:
geneInfoTable.path <- system.file(package="TrenaProjectHG38", "extdata", "geneInfoTable.RData")
.TrenaProjectLymphocyte(TrenaProjectHG38(projectName="TrenaProject Lymphocyte",
supportedGenes=geneSets[[2]],
footprintDatabaseHost=footprintDatabaseHost,
footprintDatabasePort=footprintDatabasePort,
footprintDatabaseNames=footprintDatabaseNames,
packageDataDirectory=dataDirectory,
quiet=quiet
))
} # TrenaProjectLymphocyte, the constructor
#----------------------------------------------------------------------------------------------------
| /R/TrenaProjectLymphocyte.R | permissive | PriceLab/TrenaProjectLymphocyte | R | false | false | 2,734 | r | #----------------------------------------------------------------------------------------------------
#' @import methods
#' @import TrenaProject
#' @importFrom AnnotationDbi select
#' @import org.Hs.eg.db
#'
#' @title TrenaProjectLymphocyte-class
#'
#' @name TrenaProjectLymphocyte-class
#' @rdname TrenaProjectLymphocyte-class
#' @aliases TrenaProjectLymphocyte
#' @exportClass TrenaProjectLymphocyte
#'
.TrenaProjectLymphocyte <- setClass("TrenaProjectLymphocyte",
contains="TrenaProjectHG38")
#----------------------------------------------------------------------------------------------------
#' Define an object of class TrenaProjectLymphocyte
#'
#' @description
#' Expression, variant and covariate data for the genes of interest (perhaps unbounded) for pre-term birth studies
#'
#' @rdname TrenaProjectLymphocyte-class
#'
#' @export
#'
#' @return An object of the TrenaProjectLymphocyte class
#'
TrenaProjectLymphocyte <- function(quiet=TRUE)
{
genomeName <- "hg38"
directory <- system.file(package="TrenaProjectLymphocyte", "extdata", "geneSets")
geneSet.files <- list.files(directory)
geneSets <- list()
for(file in geneSet.files){
full.path <- file.path(directory, file)
genes <- scan(full.path, sep="\t", what=character(0), quiet=TRUE, comment.char="#")
geneSet.name <- sub(".txt", "", file)
geneSets[[geneSet.name]] <- genes
}
footprintDatabaseNames <- c("lymphoblast_hint_16", "lymphoblast_hint_20", "lymphoblast_wellington_16", "lymphoblast_wellington_20")
dataDirectory <- system.file(package="TrenaProjectLymphocyte", "extdata")
footprintDatabaseHost <- "khaleesi.systemsbiology.net"
footprintDatabasePort <- 5432
covariatesFile <- NA_character_;
stopifnot(file.exists(dataDirectory))
# TODO: this should be hidden by, provided by TrenaProjectHG38:
geneInfoTable.path <- system.file(package="TrenaProjectHG38", "extdata", "geneInfoTable.RData")
.TrenaProjectLymphocyte(TrenaProjectHG38(projectName="TrenaProject Lymphocyte",
supportedGenes=geneSets[[2]],
footprintDatabaseHost=footprintDatabaseHost,
footprintDatabasePort=footprintDatabasePort,
footprintDatabaseNames=footprintDatabaseNames,
packageDataDirectory=dataDirectory,
quiet=quiet
))
} # TrenaProjectLymphocyte, the constructor
#----------------------------------------------------------------------------------------------------
|
#' @export
#' @rdname kernelfun
#'
#' @examples
#' kernel_properties("gaussian")
#'
kernel_properties <-
function(name,
derivative = FALSE)
{
name <- match.arg(tolower(name), .kernelsList())
canonical_bandwidth <- switch(name,
biweight = 5 * sqrt(7)/49,
chernoff = NA_real_,
cosine = 3/4 * sqrt(1/3 - 2/pi^2),
eddy = NA_real_,
epanechnikov = 3/(5 * sqrt(5)),
gaussian = 1/(2 * sqrt(pi)),
optcosine = sqrt(1 - 8/pi^2) * pi^2/16,
rectangular = sqrt(3)/6,
triangular = sqrt(6)/9,
uniform = NA_real_)
canonical_bandwidth_deriv <- NA_real_
fac <- switch(name,
biweight = 2 * sqrt(7),
chernoff = NA,
cosine = 2/sqrt(1/3 - 2/pi^2),
eddy = NA,
epanechnikov = 2 * sqrt(5),
gaussian = 4,
optcosine = 2/sqrt(1 - 8/pi^2),
rectangular = 2 * sqrt(3),
triangular = 2 * sqrt(6),
uniform = NA)
fac_deriv <- NA_real_
integral_K <- switch(name,
biweight = 1,
chernoff = 1,
cosine = 1,
eddy = 1,
epanechnikov = 1,
gaussian = 1,
optcosine = 1,
rectangular = 1,
triangular = 1,
uniform = 1)
integral_K_deriv <- switch(name,
biweight = NA,
chernoff = 0,
cosine = 0,
eddy = 0,
epanechnikov = 0,
gaussian = 0,
optcosine = 0,
rectangular = 0,
triangular = 0,
uniform = 0)
integral_K2 <- switch(name,
biweight = 1/2,
chernoff = 1/2,
cosine = (3/4)*sqrt(1/3 - 2/pi^2),
eddy = 1.25,
epanechnikov = 3/5,
gaussian = 1/(2*sqrt(pi)),
optcosine = (pi^2/16)*sqrt(1 - 8/pi^2),
rectangular = 1/2,
triangular = 2/3,
uniform = 1/2)
integral_K2_deriv <- switch(name,
biweight = 15/(49*sqrt(7)),
chernoff = 0,
cosine = (pi^2/4)*(sqrt(1/3 - 2/pi^2))^3,
eddy = 9.375,
epanechnikov = 3/2,
gaussian = 0.1410474,
optcosine = (pi^4/64)*(sqrt(1 - 8/pi^2))^3,
rectangular = 0,
triangular = 2,
uniform = 0)
continuity <- switch(name,
biweight = Inf,
chernoff = 0,
cosine = Inf,
eddy = 1,
epanechnikov = 1,
gaussian = Inf,
optcosine = 1,
rectangular = 0,
triangular = 1,
uniform = 0)
continuity_deriv <- switch(name,
biweight = Inf,
chernoff = Inf,
cosine = Inf,
eddy = 0,
epanechnikov = 0,
gaussian = Inf,
optcosine = 0,
rectangular = Inf,
triangular = 0,
uniform = Inf)
differentiability <- switch(name,
biweight = Inf,
chernoff = 0,
cosine = Inf,
eddy = 0,
epanechnikov = 0,
gaussian = Inf,
optcosine = 0,
rectangular = 0,
triangular = 0,
uniform = 0)
differentiability_deriv <- switch(name,
biweight = Inf,
chernoff = Inf,
cosine = Inf,
eddy = 0,
epanechnikov = 0,
gaussian = Inf,
optcosine = 0,
rectangular = Inf,
triangular = 0,
uniform = Inf)
if (derivative) {
list(canonical_bandwidth = canonical_bandwidth_deriv,
continuity = continuity_deriv,
differentiability = differentiability_deriv,
fac = fac_deriv,
integral_K = integral_K_deriv,
integral_K2 = integral_K2_deriv,
name = name,
derivative = derivative)
} else {
list(canonical_bandwidth = canonical_bandwidth,
continuity = continuity,
differentiability = differentiability,
fac = fac,
integral_K = integral_K,
integral_K2 = integral_K2,
name = name,
derivative = derivative)
}
}
| /R/kernel_properties.R | no_license | paulponcet/statip | R | false | false | 5,939 | r |
#' @export
#' @rdname kernelfun
#'
#' @examples
#' kernel_properties("gaussian")
#'
kernel_properties <-
function(name,
derivative = FALSE)
{
name <- match.arg(tolower(name), .kernelsList())
canonical_bandwidth <- switch(name,
biweight = 5 * sqrt(7)/49,
chernoff = NA_real_,
cosine = 3/4 * sqrt(1/3 - 2/pi^2),
eddy = NA_real_,
epanechnikov = 3/(5 * sqrt(5)),
gaussian = 1/(2 * sqrt(pi)),
optcosine = sqrt(1 - 8/pi^2) * pi^2/16,
rectangular = sqrt(3)/6,
triangular = sqrt(6)/9,
uniform = NA_real_)
canonical_bandwidth_deriv <- NA_real_
fac <- switch(name,
biweight = 2 * sqrt(7),
chernoff = NA,
cosine = 2/sqrt(1/3 - 2/pi^2),
eddy = NA,
epanechnikov = 2 * sqrt(5),
gaussian = 4,
optcosine = 2/sqrt(1 - 8/pi^2),
rectangular = 2 * sqrt(3),
triangular = 2 * sqrt(6),
uniform = NA)
fac_deriv <- NA_real_
integral_K <- switch(name,
biweight = 1,
chernoff = 1,
cosine = 1,
eddy = 1,
epanechnikov = 1,
gaussian = 1,
optcosine = 1,
rectangular = 1,
triangular = 1,
uniform = 1)
integral_K_deriv <- switch(name,
biweight = NA,
chernoff = 0,
cosine = 0,
eddy = 0,
epanechnikov = 0,
gaussian = 0,
optcosine = 0,
rectangular = 0,
triangular = 0,
uniform = 0)
integral_K2 <- switch(name,
biweight = 1/2,
chernoff = 1/2,
cosine = (3/4)*sqrt(1/3 - 2/pi^2),
eddy = 1.25,
epanechnikov = 3/5,
gaussian = 1/(2*sqrt(pi)),
optcosine = (pi^2/16)*sqrt(1 - 8/pi^2),
rectangular = 1/2,
triangular = 2/3,
uniform = 1/2)
integral_K2_deriv <- switch(name,
biweight = 15/(49*sqrt(7)),
chernoff = 0,
cosine = (pi^2/4)*(sqrt(1/3 - 2/pi^2))^3,
eddy = 9.375,
epanechnikov = 3/2,
gaussian = 0.1410474,
optcosine = (pi^4/64)*(sqrt(1 - 8/pi^2))^3,
rectangular = 0,
triangular = 2,
uniform = 0)
continuity <- switch(name,
biweight = Inf,
chernoff = 0,
cosine = Inf,
eddy = 1,
epanechnikov = 1,
gaussian = Inf,
optcosine = 1,
rectangular = 0,
triangular = 1,
uniform = 0)
continuity_deriv <- switch(name,
biweight = Inf,
chernoff = Inf,
cosine = Inf,
eddy = 0,
epanechnikov = 0,
gaussian = Inf,
optcosine = 0,
rectangular = Inf,
triangular = 0,
uniform = Inf)
differentiability <- switch(name,
biweight = Inf,
chernoff = 0,
cosine = Inf,
eddy = 0,
epanechnikov = 0,
gaussian = Inf,
optcosine = 0,
rectangular = 0,
triangular = 0,
uniform = 0)
differentiability_deriv <- switch(name,
biweight = Inf,
chernoff = Inf,
cosine = Inf,
eddy = 0,
epanechnikov = 0,
gaussian = Inf,
optcosine = 0,
rectangular = Inf,
triangular = 0,
uniform = Inf)
if (derivative) {
list(canonical_bandwidth = canonical_bandwidth_deriv,
continuity = continuity_deriv,
differentiability = differentiability_deriv,
fac = fac_deriv,
integral_K = integral_K_deriv,
integral_K2 = integral_K2_deriv,
name = name,
derivative = derivative)
} else {
list(canonical_bandwidth = canonical_bandwidth,
continuity = continuity,
differentiability = differentiability,
fac = fac,
integral_K = integral_K,
integral_K2 = integral_K2,
name = name,
derivative = derivative)
}
}
|
#' @title Backslash to Forward Slash
#'
#' @description This function is primary for windows users. When copying file paths on windows they will contain backslashes, "\", which will throw an error in R because "\" is used for escape characters. Due to how R reads "\", this function reads the filepath directly from the clipboard. The user can either run *back_to_forward()* in the console to get the path with forward slashes, or put *back_to_forward()* on a hotkey for instant posting within the script.
#'
#' @details Primarily for Windows users. When copying file paths in Windows it will use a backslash to seperate files, this will throw an error in R. The process of manually changing the backslashe to forward slashes can get highly repetitive. The `back_to_forward()` function will take what is in the users clipboard and paste the path with the wanted forward slashes. There are two approaches to this; 1) run `back_to_forward()` in the console to retreive the string version with forward slashes of the path, or 2) Assign a hotkey to the addin `Back to Forward Slash`. Shout out to the `reprex` package for testing `clipr` methods.
#'
#' @param text A String. Default uses the text in your clipboard. This should not be altered from the default. Exists primarily for the sake of internal testing.
#' @param render A Logical. Defaults to `TRUE`. Exists primarily for the sake of internal testing.
#'
#' @return A string. A file path that is compatible with R.
#' @export
#'
#' @examples
#' \dontrun{
#' # Example path in clipboard:
#' # C:\Documents\Newsletters\Summer2018.csv
#' back_to_forward()
#' }
back_to_forward <- function(text = clipr::read_clip(), render = TRUE) {
if (stringr::str_detect(text, "\\\\")) {
text <- gsub(
pattern = "\\\\",
replacement = "/",
x = text
)
}
if (render) {
rstudioapi::insertText(text)
} else {
text
}
return(invisible(text))
}
| /R/back_to_forward.R | permissive | KoderKow/kowr | R | false | false | 1,923 | r | #' @title Backslash to Forward Slash
#'
#' @description This function is primary for windows users. When copying file paths on windows they will contain backslashes, "\", which will throw an error in R because "\" is used for escape characters. Due to how R reads "\", this function reads the filepath directly from the clipboard. The user can either run *back_to_forward()* in the console to get the path with forward slashes, or put *back_to_forward()* on a hotkey for instant posting within the script.
#'
#' @details Primarily for Windows users. When copying file paths in Windows it will use a backslash to seperate files, this will throw an error in R. The process of manually changing the backslashe to forward slashes can get highly repetitive. The `back_to_forward()` function will take what is in the users clipboard and paste the path with the wanted forward slashes. There are two approaches to this; 1) run `back_to_forward()` in the console to retreive the string version with forward slashes of the path, or 2) Assign a hotkey to the addin `Back to Forward Slash`. Shout out to the `reprex` package for testing `clipr` methods.
#'
#' @param text A String. Default uses the text in your clipboard. This should not be altered from the default. Exists primarily for the sake of internal testing.
#' @param render A Logical. Defaults to `TRUE`. Exists primarily for the sake of internal testing.
#'
#' @return A string. A file path that is compatible with R.
#' @export
#'
#' @examples
#' \dontrun{
#' # Example path in clipboard:
#' # C:\Documents\Newsletters\Summer2018.csv
#' back_to_forward()
#' }
back_to_forward <- function(text = clipr::read_clip(), render = TRUE) {
if (stringr::str_detect(text, "\\\\")) {
text <- gsub(
pattern = "\\\\",
replacement = "/",
x = text
)
}
if (render) {
rstudioapi::insertText(text)
} else {
text
}
return(invisible(text))
}
|
library("data.table")
library("ggplot2")
setwd("C:/Users/Home/Desktop/coursera")
path <- getwd()
download.file(url = "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
, destfile = paste(path, "dataFiles.zip", sep = "/"))
unzip(zipfile = "dataFiles.zip")
# Load the NEI & SCC data frames.
NEI <- data.table::as.data.table(x = readRDS("summarySCC_PM25.rds"))
SCC <- data.table::as.data.table(x = readRDS("Source_Classification_Code.rds"))
# Subset coal combustion related NEI data
combustionRelated <- grepl("comb", SCC[, SCC.Level.One], ignore.case=TRUE)
coalRelated <- grepl("coal", SCC[, SCC.Level.Four], ignore.case=TRUE)
combustionSCC <- SCC[combustionRelated & coalRelated, SCC]
combustionNEI <- NEI[NEI[,SCC] %in% combustionSCC]
png("plot4.png")
ggplot(combustionNEI,aes(x = factor(year),y = Emissions/10^5)) +
geom_bar(stat="identity", fill ="#FF9999", width=0.75) +
labs(x="year", y=expression("Total PM"[2.5]*" Emission (10^5 Tons)")) +
labs(title=expression("PM"[2.5]*" Coal Combustion Source Emissions Across US from 1999-2008"))
dev.off() | /Plot4.R | no_license | Jadson-Correa/Exploratory | R | false | false | 1,122 | r | library("data.table")
library("ggplot2")
setwd("C:/Users/Home/Desktop/coursera")
path <- getwd()
download.file(url = "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
, destfile = paste(path, "dataFiles.zip", sep = "/"))
unzip(zipfile = "dataFiles.zip")
# Load the NEI & SCC data frames.
NEI <- data.table::as.data.table(x = readRDS("summarySCC_PM25.rds"))
SCC <- data.table::as.data.table(x = readRDS("Source_Classification_Code.rds"))
# Subset coal combustion related NEI data
combustionRelated <- grepl("comb", SCC[, SCC.Level.One], ignore.case=TRUE)
coalRelated <- grepl("coal", SCC[, SCC.Level.Four], ignore.case=TRUE)
combustionSCC <- SCC[combustionRelated & coalRelated, SCC]
combustionNEI <- NEI[NEI[,SCC] %in% combustionSCC]
png("plot4.png")
ggplot(combustionNEI,aes(x = factor(year),y = Emissions/10^5)) +
geom_bar(stat="identity", fill ="#FF9999", width=0.75) +
labs(x="year", y=expression("Total PM"[2.5]*" Emission (10^5 Tons)")) +
labs(title=expression("PM"[2.5]*" Coal Combustion Source Emissions Across US from 1999-2008"))
dev.off() |
library(phytools)
# load treespace and packages for plotting:
library(treespace)
library(phylogram)
library(phangorn)
library(seqinr)
library(adegraphics)
library(adegenet)
library(apTreeshape)
library(ggtree)
library(ape)
library(ggplot2)
library(tidyverse)
# Set seed for reproducibility
set.seed(23)
# Load metadata file
SRA_metadata <- read.table("Pipeline_results/Ecoli_SD_geography/exported_trees/Ecoli_SD_metadata.csv",header = TRUE, fill = FALSE ,sep = ',',stringsAsFactors = TRUE)
# typeof(SRA_metadata)
# View(SRA_metadata)
# typeof(as.data.frame(SRA_metadata))
# Read in phylogenetic trees
lyve_tree <- read.tree(file = "Pipeline_results/Ecoli_SD_geography/exported_trees/lyveset_NJ.newick")
# kSNP3 tree.NJ.tre, tree.ML.tre, tree.core.tre, tree.parsimony.tre
ksnp_tree <- read.tree(file = "Pipeline_results/Ecoli_SD_geography/exported_trees/ksnp3_NJ.newick")
# Cfsan
cfsan_tree <- read.tree(file = "Pipeline_results/Ecoli_SD_geography/exported_trees/cfsan_NJ.newick")
# Enterobase
enterobase_tree <- read.tree(file = "Pipeline_results/Ecoli_SD_geography/exported_trees/enterobase_NJ.newick")
# Combine trees
combined_trees <- c(lyve_tree,ksnp_tree,cfsan_tree,enterobase_tree)
# Combine trees from single dataset into vector
dataset1_tree_vector <- c(lyve_tree,ksnp_tree,cfsan_tree,enterobase_tree)
dataset1_tree_vector <- c(as.phylo(lyve_tree),as.phylo(ksnp_tree),as.phylo(cfsan_tree),as.phylo(enterobase_tree))
#
##
### Code for subsetting trees with unmatched nodes
## Need to automate this
# SetDiff
setdiff(cfsan_tree$tip.label, ksnp_tree$tip.label)
## Check for sample matches
# Find samples not in cfsan_snp (lowest number of tips)
all_SRA_to_drop = c()
# SRA_to_drop <- unique(enterobase_tree$tip.label[! enterobase_tree$tip.label %in% cfsan_tree$tip.label])
# all_SRA_to_drop = c(all_SRA_to_drop,SRA_to_drop)
SRA_to_drop <- unique(enterobase_tree$tip.label[! enterobase_tree$tip.label %in% ksnp_tree$tip.label])
all_SRA_to_drop = c(all_SRA_to_drop,SRA_to_drop)
SRA_to_drop <- unique(cfsan_tree$tip.label[! cfsan_tree$tip.label %in% enterobase_tree$tip.label])
all_SRA_to_drop = c(all_SRA_to_drop,SRA_to_drop)
# SRA_to_drop <- unique(lyve_tree$tip.label[! lyve_tree$tip.label %in% ksnp_tree$tip.label])
# SRA_to_drop <- unique(ksnp_tree$tip.label[! ksnp_tree$tip.label %in% lyve_tree$tip.label])
all_SRA_to_drop <- unique(all_SRA_to_drop)
#SRA_to_drop <- unique(cfsan_tree$tip.label[! cfsan_tree$tip.label %in% ksnp_tree$tip.label])
#SRA_to_drop <- unique(ksnp_tree$tip.label[! ksnp_tree$tip.label %in% cfsan_tree$tip.label])
#SRA_to_drop <- unique(cfsan_tree$tip.label[! cfsan_tree$tip.label %in% lyve_tree$tip.label])
lyve_tree <- drop.tip(combined_trees[[1]], all_SRA_to_drop)
ksnp_tree <- drop.tip(combined_trees[[2]], all_SRA_to_drop)
cfsan_tree <- drop.tip(combined_trees[[3]], all_SRA_to_drop)
enterobase_tree <- drop.tip(combined_trees[[4]], all_SRA_to_drop)
lyve_tree_rooted <- root(lyve_tree,1, r = TRUE)
ksnp_tree_rooted <- root(ksnp_tree,1, r = TRUE)
cfsan_tree_rooted <- root(cfsan_tree,1, r = TRUE)
enterobase_tree_rooted <- root(enterobase_tree,1, r = TRUE)
#
##
###
#### Combine rooted and cleaned trees
###
##
#
combined_rooted_trees <- c(ksnp_tree_rooted,enterobase_tree_rooted,cfsan_tree_rooted, lyve_tree_rooted)
combined_cleaned_trees <- c(ksnp_tree,enterobase_tree,cfsan_tree, lyve_tree)
names(combined_cleaned_trees) <- c("ksnp","enterobase","cfsan","lyveset")
densityTree(combined_rooted_trees,type="cladogram",nodes="intermediate")
densityTree(combined_cleaned_trees,type="cladogram",nodes="intermediate")
densityTree(combined_rooted_trees,use.edge.length=FALSE,type="phylogram",nodes="inner", alpha = 0.3)
# Load updated metadata file
#SRA_metadata <- read.csv("SRA_present.csv", header = FALSE, stringsAsFactors = FALSE)
# Calculate related tree distance
#relatedTreeDist(combined_trees, as.data.frame(SRA_metadata), checkTrees = TRUE)
#write.csv(lyve_tree$tip.label, "lyve_tree_nodes.csv")
#write.csv(ksnp_tree$tip.label, "ksnp3_nodes.csv")
# png(filename = "Ecoli_SD_lyveset_tree.png", res = 300,width = 800, height = 800)
# plotTree(lyve_tree, label.offset =1)
##
###
#### ggtree
###
##
# Mutate to create new column with selected outbreak group
SRA_metadata <- as_tibble(SRA_metadata)
SRA_metadata <- SRA_metadata %>% mutate(Group = ifelse(SNP.cluster == "PDS000046273.15" , "Outbreak", "Other"))
# Lyveset
#ggtree(lyve_tree,branch.length='none') + theme_tree2() + geom_nodepoint(color="#b5e521", alpha=1/4, size=10) + geom_nodelab(geom = "text")
# lyve
plyve <- ggtree(lyve_tree) %<+% SRA_metadata
plyve2 <- plyve + geom_tiplab(offset = .6, hjust = .5) +
geom_tippoint(aes(shape = Group, color = Group), size = 2) +
theme(legend.position = "right") + scale_size_continuous(range = c(3, 10))
plyve2
ggsave("Pipeline_results/Ecoli_SD_geography/lyve_tree_Ecoli_SD.png", width = 50, height = 80, units = "cm")
dev.off()
# lyve no lengths
plyve <- ggtree(lyve_tree, branch.length = "none") %<+% SRA_metadata
plyve2 <- plyve + geom_tiplab(offset = .6, hjust = .5) +
geom_tippoint(aes(shape = Group, color = Group), size = 2) +
theme(legend.position = "right") + scale_size_continuous(range = c(3, 10))
plyve2
ggsave("Pipeline_results/Ecoli_SD_geography/lyve_tree_nolengths_Ecoli_SD.png", width = 50, height = 80, units = "cm")
dev.off()
# cfsan
pcfsan <- ggtree(cfsan_tree) %<+% SRA_metadata
pcfsan2 <- pcfsan + geom_tiplab(offset = .6, hjust = .5) +
geom_tippoint(aes(shape = Group, color = Group), size = 2) +
theme(legend.position = "right") + scale_size_continuous(range = c(3, 10))
pcfsan2
ggsave("Pipeline_results/Ecoli_SD_geography/cfsan_tree_Ecoli_SD.png", width = 50, height = 80, units = "cm")
dev.off()
# cfsan_nolengths
pcfsan <- ggtree(cfsan_tree, branch.length = "none") %<+% SRA_metadata
pcfsan2 <- pcfsan + geom_tiplab(offset = .6, hjust = .5) +
geom_tippoint(aes(shape = Group, color = Group), size = 2) +
theme(legend.position = "right") + scale_size_continuous(range = c(3, 10))
pcfsan2
ggsave("Pipeline_results/Ecoli_SD_geography/cfsan_tree_nolengths_Ecoli_SD.png", width = 50, height = 80, units = "cm")
dev.off()
# enterobase
penterobase <- ggtree(enterobase_tree) %<+% SRA_metadata
penterobase2 <- penterobase + geom_tiplab(offset = .6, hjust = .5) +
geom_tippoint(aes(shape = Group, color = Group), size = 2) +
theme(legend.position = "right") + scale_size_continuous(range = c(3, 10))
penterobase2
ggsave("Pipeline_results/Ecoli_SD_geography/enterobase_tree_Ecoli_SD.png", width = 50, height = 80, units = "cm")
dev.off()
# enterobase_nolengths
penterobase <- ggtree(enterobase_tree, branch.length = "none") %<+% SRA_metadata
penterobase2 <- penterobase + geom_tiplab(offset = .6, hjust = .5) +
geom_tippoint(aes(shape = Group, color = Group), size = 2) +
theme(legend.position = "right") + scale_size_continuous(range = c(3, 10))
penterobase2
ggsave("Pipeline_results/Ecoli_SD_geography/enterobase_tree_nolengths_Ecoli_SD.png", width = 50, height = 80, units = "cm")
dev.off()
# ksnp
pksnp <- ggtree(ksnp_tree) %<+% SRA_metadata
pksnp2 <- pksnp + geom_tiplab(offset = .6, hjust = .5) +
geom_tippoint(aes(shape = Group, color = Group), size = 2) +
theme(legend.position = "right") + scale_size_continuous(range = c(3, 10))
pksnp2
ggsave("Pipeline_results/Ecoli_SD_geography/ksnp_tree_Ecoli_SD.png", width = 50, height = 80, units = "cm")
dev.off()
# ksnp_nolengths
pksnp <- ggtree(ksnp_tree, branch.length = "none") %<+% SRA_metadata
pksnp2 <- pksnp + geom_tiplab(offset = .6, hjust = .5) +
geom_tippoint(aes(shape = Group, color = Group), size = 2) +
theme(legend.position = "right") + scale_size_continuous(range = c(3, 10))
pksnp2
ggsave("Pipeline_results/Ecoli_SD_geography/ksnp_tree_nolengths_Ecoli_SD.png", width = 50, height = 80, units = "cm")
dev.off()
##
###
#### ggtree combined tree figures
###
##
pfacet_tree <- ggtree(combined_cleaned_trees) + facet_wrap( ~.id, scale="free") + theme_tree2()
pfacet_tree
ggsave("Pipeline_results/Ecoli_SD_geography/combined_trees_wblengths_Ecoli_SD.png", width = 50, height = 80, units = "cm")
dev.off()
# no branch lengths
pfacet_tree2 <- ggtree(combined_cleaned_trees,branch.length='none') + facet_wrap( ~.id, scale="free") + theme_tree2()
pfacet_tree2
ggsave("Pipeline_results/Ecoli_SD_geography/combined_trees_nolengths_Ecoli_SD.png", width = 50, height = 80, units = "cm")
dev.off()
pdense_tree <- ggdensitree(combined_cleaned_trees, alpha=.3, colour='steelblue') +
geom_tiplab(size=3) + xlim(0, 45)
pdense_tree
ggsave("Pipeline_results/Ecoli_SD_geography/combined_trees_dense_tree_Ecoli_SD.png", width = 50, height = 80, units = "cm")
dev.off()
#
##
###
#### Info on tree
###
##
#
summary(lyve_tree)
sum(lyve_tree$edge.length)
summary(cfsan_tree)
sum(cfsan_tree$edge.length)
summary(enterobase_tree)
sum(enterobase_tree$edge.length)
summary(ksnp_tree)
sum(ksnp_tree$edge.length)
# Calculate co-speciation (RF distance) between trees
# Make function to automate these pairwise comparisons within a vector of trees
# Save results in table
# Compare trees with cospeciation
cospeciation(ksnp_tree, cfsan_tree, distance = c("RF","SPR"))
cospeciation(ksnp_tree, enterobase_tree, distance = c("RF","SPR"))
cospeciation(lyve_tree,ksnp_tree, distance = c("RF","SPR"))
cospeciation(lyve_tree,cfsan_tree, distance = c("RF","SPR"))
cospeciation(lyve_tree,enterobase_tree, distance = c("RF","SPR"))
plot(cospeciation(ksnp_tree, enterobase_tree, distance = c("RF")))
cospeciation(cfsan_tree, enterobase_tree, distance = c("RF","SPR"))
# Compare trees with all.equal.phylo
all.equal.phylo(lyve_tree,ksnp_tree)
all.equal.phylo(lyve_tree,cfsan_tree)
all.equal.phylo(lyve_tree,enterobase_tree)
all.equal.phylo(ksnp_tree, cfsan_tree)
all.equal.phylo(ksnp_tree, enterobase_tree)
all.equal.phylo(cfsan_tree, enterobase_tree)
# Plots
comparePhylo(lyve_tree,ksnp_tree, plot=TRUE)
comparePhylo(lyve_tree,cfsan_tree, plot=TRUE)
comparePhylo(lyve_tree,enterobase_tree, plot=TRUE)
comparePhylo(ksnp_tree, cfsan_tree, plot=TRUE)
comparePhylo(ksnp_tree, enterobase_tree, plot=TRUE)
comparePhylo(cfsan_tree, enterobase_tree, plot=TRUE)
# Side-by-side tree node comparison plots
# http://phytools.org/mexico2018/ex/12/Plotting-methods.html
# Compare trees with all.equal.phylo
plot(cophylo(lyve_tree,ksnp_tree))
plot(cophylo(lyve_tree,cfsan_tree))
plot(cophylo(lyve_tree,enterobase_tree))
plot(cophylo(ksnp_tree, cfsan_tree))
plot(cophylo(ksnp_tree, enterobase_tree))
plot(cophylo(cfsan_tree, enterobase_tree))
# Side-by-side comparison with phylogenetic trees
obj <- cophylo(ksnp_tree, cfsan_tree, print= TRUE)
plot(obj,link.type="curved",link.lwd=3,link.lty="solid",
link.col="grey",fsize=0.8)
nodelabels.cophylo(which="left",frame="circle",cex=0.8)
nodelabels.cophylo(which="right",frame="circle",cex=0.8)
###
####
####### Treespace
####
###
# https://cran.r-project.org/web/packages/treespace/vignettes/introduction.html
combined_treespace <- treespace(combined_rooted_trees, nf=3) # , return.tree.vectors = TRUE
#test <- as.treeshape(dataset1_tree_vector)
table.image(combined_treespace$D)
table.value(combined_treespace$D, nclass=5, method="color", symbol="circle", col=redpal(6))
plotGroves(combined_treespace$pco,lab.show=TRUE, lab.cex=1.5)
combined_treespace_groves <- findGroves(combined_treespace)
plotGrovesD3(combined_treespace_groves)
#aldous.test(combined_rooted_trees)
colless.test(combined_treespace_groves, alternative="greater")
likelihood.test(combined_treespace, alternative='greater')
#
##
### Test OTU grouping
##
#
outbreak_group <- SRA_metadata %>%
filter(SNP.cluster == "PDS000000366.382") %>%
select(Newick_label)
#Just the sample Ids
lyve_tree_w_meta <- groupOTU(lyve_tree ,outbreak_group, group_name = "Outbreak")
p <- ggtree(lyve_tree_w_meta, aes(color=Outbreak)) +
scale_color_manual(values = c("#efad29", "#63bbd4")) +
geom_nodepoint(color="black", size=0.1) +
geom_tiplab(size=2, color="black")
p
##
###
#### Extra code
###
##
# Branch lengths
plot(compute.brlen(lyve_tree))
# Branch times
#This function computes the branch lengths of a tree giving its branching times (aka node ages or heights).
plot(compute.brtime(lyve_tree))
# dist.topo
dist.topo(combined_cleaned_trees)
dnd1 <- as.dendrogram(lyve_tree)
dnd2 <- as.dendrogram(ksnp_tree)
plotTree(enterobase_tree)
| /scripts/2_Ecoli_SD_phylogenetic_tree_analysis.R | no_license | TheNoyesLab/FMPRE_WGS_project | R | false | false | 12,720 | r |
library(phytools)
# load treespace and packages for plotting:
library(treespace)
library(phylogram)
library(phangorn)
library(seqinr)
library(adegraphics)
library(adegenet)
library(apTreeshape)
library(ggtree)
library(ape)
library(ggplot2)
library(tidyverse)
# Set seed for reproducibility
set.seed(23)
# Load metadata file
SRA_metadata <- read.table("Pipeline_results/Ecoli_SD_geography/exported_trees/Ecoli_SD_metadata.csv",header = TRUE, fill = FALSE ,sep = ',',stringsAsFactors = TRUE)
# typeof(SRA_metadata)
# View(SRA_metadata)
# typeof(as.data.frame(SRA_metadata))
# Read in phylogenetic trees
lyve_tree <- read.tree(file = "Pipeline_results/Ecoli_SD_geography/exported_trees/lyveset_NJ.newick")
# kSNP3 tree.NJ.tre, tree.ML.tre, tree.core.tre, tree.parsimony.tre
ksnp_tree <- read.tree(file = "Pipeline_results/Ecoli_SD_geography/exported_trees/ksnp3_NJ.newick")
# Cfsan
cfsan_tree <- read.tree(file = "Pipeline_results/Ecoli_SD_geography/exported_trees/cfsan_NJ.newick")
# Enterobase
enterobase_tree <- read.tree(file = "Pipeline_results/Ecoli_SD_geography/exported_trees/enterobase_NJ.newick")
# Combine trees
combined_trees <- c(lyve_tree,ksnp_tree,cfsan_tree,enterobase_tree)
# Combine trees from single dataset into vector
dataset1_tree_vector <- c(lyve_tree,ksnp_tree,cfsan_tree,enterobase_tree)
dataset1_tree_vector <- c(as.phylo(lyve_tree),as.phylo(ksnp_tree),as.phylo(cfsan_tree),as.phylo(enterobase_tree))
#
##
### Code for subsetting trees with unmatched nodes
## Need to automate this
# SetDiff
setdiff(cfsan_tree$tip.label, ksnp_tree$tip.label)
## Check for sample matches
# Find samples not in cfsan_snp (lowest number of tips)
all_SRA_to_drop = c()
# SRA_to_drop <- unique(enterobase_tree$tip.label[! enterobase_tree$tip.label %in% cfsan_tree$tip.label])
# all_SRA_to_drop = c(all_SRA_to_drop,SRA_to_drop)
SRA_to_drop <- unique(enterobase_tree$tip.label[! enterobase_tree$tip.label %in% ksnp_tree$tip.label])
all_SRA_to_drop = c(all_SRA_to_drop,SRA_to_drop)
SRA_to_drop <- unique(cfsan_tree$tip.label[! cfsan_tree$tip.label %in% enterobase_tree$tip.label])
all_SRA_to_drop = c(all_SRA_to_drop,SRA_to_drop)
# SRA_to_drop <- unique(lyve_tree$tip.label[! lyve_tree$tip.label %in% ksnp_tree$tip.label])
# SRA_to_drop <- unique(ksnp_tree$tip.label[! ksnp_tree$tip.label %in% lyve_tree$tip.label])
all_SRA_to_drop <- unique(all_SRA_to_drop)
#SRA_to_drop <- unique(cfsan_tree$tip.label[! cfsan_tree$tip.label %in% ksnp_tree$tip.label])
#SRA_to_drop <- unique(ksnp_tree$tip.label[! ksnp_tree$tip.label %in% cfsan_tree$tip.label])
#SRA_to_drop <- unique(cfsan_tree$tip.label[! cfsan_tree$tip.label %in% lyve_tree$tip.label])
lyve_tree <- drop.tip(combined_trees[[1]], all_SRA_to_drop)
ksnp_tree <- drop.tip(combined_trees[[2]], all_SRA_to_drop)
cfsan_tree <- drop.tip(combined_trees[[3]], all_SRA_to_drop)
enterobase_tree <- drop.tip(combined_trees[[4]], all_SRA_to_drop)
lyve_tree_rooted <- root(lyve_tree,1, r = TRUE)
ksnp_tree_rooted <- root(ksnp_tree,1, r = TRUE)
cfsan_tree_rooted <- root(cfsan_tree,1, r = TRUE)
enterobase_tree_rooted <- root(enterobase_tree,1, r = TRUE)
#
##
###
#### Combine rooted and cleaned trees
###
##
#
combined_rooted_trees <- c(ksnp_tree_rooted,enterobase_tree_rooted,cfsan_tree_rooted, lyve_tree_rooted)
combined_cleaned_trees <- c(ksnp_tree,enterobase_tree,cfsan_tree, lyve_tree)
names(combined_cleaned_trees) <- c("ksnp","enterobase","cfsan","lyveset")
densityTree(combined_rooted_trees,type="cladogram",nodes="intermediate")
densityTree(combined_cleaned_trees,type="cladogram",nodes="intermediate")
densityTree(combined_rooted_trees,use.edge.length=FALSE,type="phylogram",nodes="inner", alpha = 0.3)
# Load updated metadata file
#SRA_metadata <- read.csv("SRA_present.csv", header = FALSE, stringsAsFactors = FALSE)
# Calculate related tree distance
#relatedTreeDist(combined_trees, as.data.frame(SRA_metadata), checkTrees = TRUE)
#write.csv(lyve_tree$tip.label, "lyve_tree_nodes.csv")
#write.csv(ksnp_tree$tip.label, "ksnp3_nodes.csv")
# png(filename = "Ecoli_SD_lyveset_tree.png", res = 300,width = 800, height = 800)
# plotTree(lyve_tree, label.offset =1)
##
###
#### ggtree
###
##
# Mutate to create new column with selected outbreak group
SRA_metadata <- as_tibble(SRA_metadata)
SRA_metadata <- SRA_metadata %>% mutate(Group = ifelse(SNP.cluster == "PDS000046273.15" , "Outbreak", "Other"))
# Lyveset
#ggtree(lyve_tree,branch.length='none') + theme_tree2() + geom_nodepoint(color="#b5e521", alpha=1/4, size=10) + geom_nodelab(geom = "text")
# lyve
plyve <- ggtree(lyve_tree) %<+% SRA_metadata
plyve2 <- plyve + geom_tiplab(offset = .6, hjust = .5) +
geom_tippoint(aes(shape = Group, color = Group), size = 2) +
theme(legend.position = "right") + scale_size_continuous(range = c(3, 10))
plyve2
ggsave("Pipeline_results/Ecoli_SD_geography/lyve_tree_Ecoli_SD.png", width = 50, height = 80, units = "cm")
dev.off()
# lyve no lengths
plyve <- ggtree(lyve_tree, branch.length = "none") %<+% SRA_metadata
plyve2 <- plyve + geom_tiplab(offset = .6, hjust = .5) +
geom_tippoint(aes(shape = Group, color = Group), size = 2) +
theme(legend.position = "right") + scale_size_continuous(range = c(3, 10))
plyve2
ggsave("Pipeline_results/Ecoli_SD_geography/lyve_tree_nolengths_Ecoli_SD.png", width = 50, height = 80, units = "cm")
dev.off()
# cfsan
pcfsan <- ggtree(cfsan_tree) %<+% SRA_metadata
pcfsan2 <- pcfsan + geom_tiplab(offset = .6, hjust = .5) +
geom_tippoint(aes(shape = Group, color = Group), size = 2) +
theme(legend.position = "right") + scale_size_continuous(range = c(3, 10))
pcfsan2
ggsave("Pipeline_results/Ecoli_SD_geography/cfsan_tree_Ecoli_SD.png", width = 50, height = 80, units = "cm")
dev.off()
# cfsan_nolengths
pcfsan <- ggtree(cfsan_tree, branch.length = "none") %<+% SRA_metadata
pcfsan2 <- pcfsan + geom_tiplab(offset = .6, hjust = .5) +
geom_tippoint(aes(shape = Group, color = Group), size = 2) +
theme(legend.position = "right") + scale_size_continuous(range = c(3, 10))
pcfsan2
ggsave("Pipeline_results/Ecoli_SD_geography/cfsan_tree_nolengths_Ecoli_SD.png", width = 50, height = 80, units = "cm")
dev.off()
# enterobase
penterobase <- ggtree(enterobase_tree) %<+% SRA_metadata
penterobase2 <- penterobase + geom_tiplab(offset = .6, hjust = .5) +
geom_tippoint(aes(shape = Group, color = Group), size = 2) +
theme(legend.position = "right") + scale_size_continuous(range = c(3, 10))
penterobase2
ggsave("Pipeline_results/Ecoli_SD_geography/enterobase_tree_Ecoli_SD.png", width = 50, height = 80, units = "cm")
dev.off()
# enterobase_nolengths
penterobase <- ggtree(enterobase_tree, branch.length = "none") %<+% SRA_metadata
penterobase2 <- penterobase + geom_tiplab(offset = .6, hjust = .5) +
geom_tippoint(aes(shape = Group, color = Group), size = 2) +
theme(legend.position = "right") + scale_size_continuous(range = c(3, 10))
penterobase2
ggsave("Pipeline_results/Ecoli_SD_geography/enterobase_tree_nolengths_Ecoli_SD.png", width = 50, height = 80, units = "cm")
dev.off()
# ksnp
pksnp <- ggtree(ksnp_tree) %<+% SRA_metadata
pksnp2 <- pksnp + geom_tiplab(offset = .6, hjust = .5) +
geom_tippoint(aes(shape = Group, color = Group), size = 2) +
theme(legend.position = "right") + scale_size_continuous(range = c(3, 10))
pksnp2
ggsave("Pipeline_results/Ecoli_SD_geography/ksnp_tree_Ecoli_SD.png", width = 50, height = 80, units = "cm")
dev.off()
# ksnp_nolengths
pksnp <- ggtree(ksnp_tree, branch.length = "none") %<+% SRA_metadata
pksnp2 <- pksnp + geom_tiplab(offset = .6, hjust = .5) +
geom_tippoint(aes(shape = Group, color = Group), size = 2) +
theme(legend.position = "right") + scale_size_continuous(range = c(3, 10))
pksnp2
ggsave("Pipeline_results/Ecoli_SD_geography/ksnp_tree_nolengths_Ecoli_SD.png", width = 50, height = 80, units = "cm")
dev.off()
##
###
#### ggtree combined tree figures
###
##
pfacet_tree <- ggtree(combined_cleaned_trees) + facet_wrap( ~.id, scale="free") + theme_tree2()
pfacet_tree
ggsave("Pipeline_results/Ecoli_SD_geography/combined_trees_wblengths_Ecoli_SD.png", width = 50, height = 80, units = "cm")
dev.off()
# no branch lengths
pfacet_tree2 <- ggtree(combined_cleaned_trees,branch.length='none') + facet_wrap( ~.id, scale="free") + theme_tree2()
pfacet_tree2
ggsave("Pipeline_results/Ecoli_SD_geography/combined_trees_nolengths_Ecoli_SD.png", width = 50, height = 80, units = "cm")
dev.off()
pdense_tree <- ggdensitree(combined_cleaned_trees, alpha=.3, colour='steelblue') +
geom_tiplab(size=3) + xlim(0, 45)
pdense_tree
ggsave("Pipeline_results/Ecoli_SD_geography/combined_trees_dense_tree_Ecoli_SD.png", width = 50, height = 80, units = "cm")
dev.off()
#
##
###
#### Info on tree
###
##
#
summary(lyve_tree)
sum(lyve_tree$edge.length)
summary(cfsan_tree)
sum(cfsan_tree$edge.length)
summary(enterobase_tree)
sum(enterobase_tree$edge.length)
summary(ksnp_tree)
sum(ksnp_tree$edge.length)
# Calculate co-speciation (RF distance) between trees
# Make function to automate these pairwise comparisons within a vector of trees
# Save results in table
# Compare trees with cospeciation
cospeciation(ksnp_tree, cfsan_tree, distance = c("RF","SPR"))
cospeciation(ksnp_tree, enterobase_tree, distance = c("RF","SPR"))
cospeciation(lyve_tree,ksnp_tree, distance = c("RF","SPR"))
cospeciation(lyve_tree,cfsan_tree, distance = c("RF","SPR"))
cospeciation(lyve_tree,enterobase_tree, distance = c("RF","SPR"))
plot(cospeciation(ksnp_tree, enterobase_tree, distance = c("RF")))
cospeciation(cfsan_tree, enterobase_tree, distance = c("RF","SPR"))
# Compare trees with all.equal.phylo
all.equal.phylo(lyve_tree,ksnp_tree)
all.equal.phylo(lyve_tree,cfsan_tree)
all.equal.phylo(lyve_tree,enterobase_tree)
all.equal.phylo(ksnp_tree, cfsan_tree)
all.equal.phylo(ksnp_tree, enterobase_tree)
all.equal.phylo(cfsan_tree, enterobase_tree)
# Plots
comparePhylo(lyve_tree,ksnp_tree, plot=TRUE)
comparePhylo(lyve_tree,cfsan_tree, plot=TRUE)
comparePhylo(lyve_tree,enterobase_tree, plot=TRUE)
comparePhylo(ksnp_tree, cfsan_tree, plot=TRUE)
comparePhylo(ksnp_tree, enterobase_tree, plot=TRUE)
comparePhylo(cfsan_tree, enterobase_tree, plot=TRUE)
# Side-by-side tree node comparison plots
# http://phytools.org/mexico2018/ex/12/Plotting-methods.html
# Compare trees with all.equal.phylo
plot(cophylo(lyve_tree,ksnp_tree))
plot(cophylo(lyve_tree,cfsan_tree))
plot(cophylo(lyve_tree,enterobase_tree))
plot(cophylo(ksnp_tree, cfsan_tree))
plot(cophylo(ksnp_tree, enterobase_tree))
plot(cophylo(cfsan_tree, enterobase_tree))
# Side-by-side comparison with phylogenetic trees
obj <- cophylo(ksnp_tree, cfsan_tree, print= TRUE)
plot(obj,link.type="curved",link.lwd=3,link.lty="solid",
link.col="grey",fsize=0.8)
nodelabels.cophylo(which="left",frame="circle",cex=0.8)
nodelabels.cophylo(which="right",frame="circle",cex=0.8)
###
####
####### Treespace
####
###
# https://cran.r-project.org/web/packages/treespace/vignettes/introduction.html
combined_treespace <- treespace(combined_rooted_trees, nf=3) # , return.tree.vectors = TRUE
#test <- as.treeshape(dataset1_tree_vector)
table.image(combined_treespace$D)
table.value(combined_treespace$D, nclass=5, method="color", symbol="circle", col=redpal(6))
plotGroves(combined_treespace$pco,lab.show=TRUE, lab.cex=1.5)
combined_treespace_groves <- findGroves(combined_treespace)
plotGrovesD3(combined_treespace_groves)
#aldous.test(combined_rooted_trees)
colless.test(combined_treespace_groves, alternative="greater")
likelihood.test(combined_treespace, alternative='greater')
#
##
### Test OTU grouping
##
#
outbreak_group <- SRA_metadata %>%
filter(SNP.cluster == "PDS000000366.382") %>%
select(Newick_label)
#Just the sample Ids
lyve_tree_w_meta <- groupOTU(lyve_tree ,outbreak_group, group_name = "Outbreak")
p <- ggtree(lyve_tree_w_meta, aes(color=Outbreak)) +
scale_color_manual(values = c("#efad29", "#63bbd4")) +
geom_nodepoint(color="black", size=0.1) +
geom_tiplab(size=2, color="black")
p
##
###
#### Extra code
###
##
# Branch lengths
plot(compute.brlen(lyve_tree))
# Branch times
#This function computes the branch lengths of a tree giving its branching times (aka node ages or heights).
plot(compute.brtime(lyve_tree))
# dist.topo
dist.topo(combined_cleaned_trees)
dnd1 <- as.dendrogram(lyve_tree)
dnd2 <- as.dendrogram(ksnp_tree)
plotTree(enterobase_tree)
|
#Unzip File
unzip("exdata%2Fdata%2Fhousehold_power_consumption.zip")
#Read Household Power Consumption File
myframe<-read.table("household_power_consumption.txt",sep=";",header=TRUE)
#Transform Date column in "date" type
myframe$Date<-as.Date(myframe$Date,"%d/%m/%Y")
#Subsetting file for required dates
myframe<-subset(myframe,Date == "2007-02-01" | Date == "2007-02-02")
#Merge Date and Time columns
myframe$Date<- with(myframe, paste(Date,Time))
#Transform merged column to date and time format
myframe$Date<-strptime(myframe$Date, format="%Y-%m-%d %H:%M:%S")
#Remove Time column
myframe<-myframe[,-2]
#Transform Global_active_power to numeric format
myframe$Global_active_power<-as.numeric(as.character(myframe$Global_active_power))
myframe$Sub_metering_1<-as.numeric(as.character(myframe$Sub_metering_1))
myframe$Sub_metering_2<-as.numeric(as.character(myframe$Sub_metering_2))
myframe$Sub_metering_3<-as.numeric(as.character(myframe$Sub_metering_3))
#Create the plot
with(myframe, plot(Date,Sub_metering_1, type = "n", xlab="", ylab ="Energy sub metering"))
with(myframe, lines(Date, Sub_metering_1, col = "black"))
with(myframe, lines(Date, Sub_metering_2, col = "red"))
with(myframe, lines(Date, Sub_metering_3, col = "blue"))
legend("topright", lty = 1, cex=0.75, col = c("black","red", "blue"), legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
#Copy to png file
dev.copy(png, file = "plot3.png")
#Close png device
dev.off() | /plot3.R | no_license | cleversonsch/ExData_Plotting1 | R | false | false | 1,515 | r | #Unzip File
unzip("exdata%2Fdata%2Fhousehold_power_consumption.zip")
#Read Household Power Consumption File
myframe<-read.table("household_power_consumption.txt",sep=";",header=TRUE)
#Transform Date column in "date" type
myframe$Date<-as.Date(myframe$Date,"%d/%m/%Y")
#Subsetting file for required dates
myframe<-subset(myframe,Date == "2007-02-01" | Date == "2007-02-02")
#Merge Date and Time columns
myframe$Date<- with(myframe, paste(Date,Time))
#Transform merged column to date and time format
myframe$Date<-strptime(myframe$Date, format="%Y-%m-%d %H:%M:%S")
#Remove Time column
myframe<-myframe[,-2]
#Transform Global_active_power to numeric format
myframe$Global_active_power<-as.numeric(as.character(myframe$Global_active_power))
myframe$Sub_metering_1<-as.numeric(as.character(myframe$Sub_metering_1))
myframe$Sub_metering_2<-as.numeric(as.character(myframe$Sub_metering_2))
myframe$Sub_metering_3<-as.numeric(as.character(myframe$Sub_metering_3))
#Create the plot
with(myframe, plot(Date,Sub_metering_1, type = "n", xlab="", ylab ="Energy sub metering"))
with(myframe, lines(Date, Sub_metering_1, col = "black"))
with(myframe, lines(Date, Sub_metering_2, col = "red"))
with(myframe, lines(Date, Sub_metering_3, col = "blue"))
legend("topright", lty = 1, cex=0.75, col = c("black","red", "blue"), legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
#Copy to png file
dev.copy(png, file = "plot3.png")
#Close png device
dev.off() |
library(eba)
### Name: inclusion.rule
### Title: Inclusion Rule
### Aliases: inclusion.rule
### Keywords: models
### ** Examples
A <- list(c(1, 5), c(2, 5), c(3, 6), c(4, 6)) # tree
inclusion.rule(A)
B <- list(c(1, 5), c(2, 5, 6), c(3, 6), c(4, 6)) # lattice
inclusion.rule(B)
| /data/genthat_extracted_code/eba/examples/inclusion.rule.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 287 | r | library(eba)
### Name: inclusion.rule
### Title: Inclusion Rule
### Aliases: inclusion.rule
### Keywords: models
### ** Examples
A <- list(c(1, 5), c(2, 5), c(3, 6), c(4, 6)) # tree
inclusion.rule(A)
B <- list(c(1, 5), c(2, 5, 6), c(3, 6), c(4, 6)) # lattice
inclusion.rule(B)
|
#Tema: Tabla de frecuencias
#1.- Leer la matriz de datos
data (iris)
#2- Exploramos la matriz de datos
names (iris)
#3 Explorar la dimension de la matriz
dim (iris)
#4 identificar los tipos de variables
str(iris)
#5 detecta si hay valores perdidos
anyNA (iris)
#6 explorar una variable
iris$Species
iris$Sepal.Length
#7.- Revisamos el tipo de matriz de datos
typeof(iris)
#8.- Convertimos la matriz de datos a un data frame y despues agrupamos los valores para la variable Petal. Lenght y se calcula la frecuencia absoluta
tabla_PL<-as.data.frame(table(PL = iris$Petal.Lenght))
#9.- Mostramos la tabla de contingencia para la variable PL con su respectiva frecuencia absoluta
tabla_PL
#10.- Se construye la tabla de frecuencias completas redondeando las frecuencias absolutas a 3 decimales
transform(tabla_PL,
freqAc=cumsum(Freq),
Rel=round(prop.table(Freq),3),
RelAc=round(cumsum(prop.table(Freq)),3))
#11.- Agruparemos las variables en 10 clases y se calcula la frecuencia absoluta
tabla_clases<-as.data.frame(table
(Petal.lenght =factor
(cut(iris$Petal.Length,
breaks = 9))))
#7.- visualizamos la tabla de clases
tabla_clases | /Tablas de frecuencia.R | no_license | Vale9908/Probabilidad-y-estadistica | R | false | false | 1,293 | r |
#Tema: Tabla de frecuencias
#1.- Leer la matriz de datos
data (iris)
#2- Exploramos la matriz de datos
names (iris)
#3 Explorar la dimension de la matriz
dim (iris)
#4 identificar los tipos de variables
str(iris)
#5 detecta si hay valores perdidos
anyNA (iris)
#6 explorar una variable
iris$Species
iris$Sepal.Length
#7.- Revisamos el tipo de matriz de datos
typeof(iris)
#8.- Convertimos la matriz de datos a un data frame y despues agrupamos los valores para la variable Petal. Lenght y se calcula la frecuencia absoluta
tabla_PL<-as.data.frame(table(PL = iris$Petal.Lenght))
#9.- Mostramos la tabla de contingencia para la variable PL con su respectiva frecuencia absoluta
tabla_PL
#10.- Se construye la tabla de frecuencias completas redondeando las frecuencias absolutas a 3 decimales
transform(tabla_PL,
freqAc=cumsum(Freq),
Rel=round(prop.table(Freq),3),
RelAc=round(cumsum(prop.table(Freq)),3))
#11.- Agruparemos las variables en 10 clases y se calcula la frecuencia absoluta
tabla_clases<-as.data.frame(table
(Petal.lenght =factor
(cut(iris$Petal.Length,
breaks = 9))))
#7.- visualizamos la tabla de clases
tabla_clases |
#' Compare the frequency of observed edges of a specific type and the expected frequency of these edges, given the presence of a different edge type
#'
#' @param GO A vector of Strings, equal to the length of all the nodes. The names of the vector should be the names of the nodes. The values should either be the functional annotations, or a concatenation of the functional annotations, separated by a "_" symbol.
#' @param edges A matrix of Strings, with at least two columns. Each row will represent an edge, linking the node in the first column to the node in the second column. Please make sure the node names are the same as those in "GO"
#' @param GOtypes This is a vector that contains the functional annotations or GO terms that are of interest
#' @return A matrix that has the same number of rows and columns as length(GOtypes)*length(GOtypes). The value at position [i,j] will contain ratio between the observed and expected frequency of edges of type i, based on the frequency of edges of type j.
#' @export
#' @examples
#' GO=c(rep('A,C', 5), rep('A', 5), rep('C', 5), rep('B,D', 5), rep('B', 5), rep('D', 5))
#' names(GO)=paste("node", 1:length(GO))
#' edges=cbind(names(GO)[1:(length(GO)/2)], names(GO)[(length(GO)/2+1):length(GO)])
#' GOtypes=c('A', "B", "C", "D")
#' pafway_meta(GO, edges, GOtypes)
pafway_meta <- function(GO, edges, GOtypes) {
GOinNetwork = GO[unique(c(edges[, 1], edges[, 2]))]
freq_together=sapply(GOtypes, function(i) { #how often a gene has both terms
sapply(GOtypes, function(j) {
length(which(grepl(i, GOinNetwork) & grepl(j, GOinNetwork)))
})
})
freq_from=sapply(GOtypes, function(i){
length(which(grepl(i, GOinNetwork[edges[, 1]])))
})
freq_to=sapply(GOtypes, function(i){
length(which(grepl(i, GOinNetwork[edges[, 2]])))
})
freq_mat=t(sapply(GOtypes, function(i) { #how often an edge is found
sapply(GOtypes, function(j) {
length(which(grepl(i, GOinNetwork[edges[, 1]]) & grepl(j, GOinNetwork[edges[, 2]])))
})
}))
possible_edges=strsplit(sapply(GOtypes, function(i) {
sapply(GOtypes, function(j) {
paste(i,j,sep=",")
})}), ",")
names_possible_edges=sapply(possible_edges, function(i){paste(i[1], i[2], sep="->")})
expected_freq_mat=sapply(possible_edges, function(i){
sapply(possible_edges, function(j){
a=i[1]
b=i[2]
c=j[1]
d=j[2]
a_to_b=freq_mat[a,b]
a_to_nb=(freq_from[a]-freq_mat[a,b])
na_to_b=(freq_to[b]-freq_mat[a,b])
na_to_nb=(length(edges[,1])-freq_from[a]-freq_to[b]+freq_mat[a,b])
p_a_and_c=freq_together[a,c]/freq_together[a,a]
p_na_and_c=(freq_together[c,c]-freq_together[a,c])/(length(GOinNetwork)-freq_together[a,a])
p_b_and_d=freq_together[b,d]/freq_together[b,b]
p_nb_and_d=(freq_together[d,d]-freq_together[b,d])/(length(GOinNetwork)-freq_together[b,b])
a_to_b*p_a_and_c*p_b_and_d+
a_to_nb*p_a_and_c*p_nb_and_d+
na_to_b*p_na_and_c*p_b_and_d+
na_to_nb*p_na_and_c*p_nb_and_d
})
})
rownames(expected_freq_mat)= names_possible_edges
colnames(expected_freq_mat)= names_possible_edges
true_freq_mat=sapply(possible_edges, function(i){
sapply(possible_edges, function(j){
c=j[1]
d=j[2]
freq_mat[c,d]
})
})
true_freq_mat/expected_freq_mat
}
| /R/metapafway.R | no_license | cran/PAFway | R | false | false | 3,694 | r | #' Compare the frequency of observed edges of a specific type and the expected frequency of these edges, given the presence of a different edge type
#'
#' @param GO A vector of Strings, equal to the length of all the nodes. The names of the vector should be the names of the nodes. The values should either be the functional annotations, or a concatenation of the functional annotations, separated by a "_" symbol.
#' @param edges A matrix of Strings, with at least two columns. Each row will represent an edge, linking the node in the first column to the node in the second column. Please make sure the node names are the same as those in "GO"
#' @param GOtypes This is a vector that contains the functional annotations or GO terms that are of interest
#' @return A matrix that has the same number of rows and columns as length(GOtypes)*length(GOtypes). The value at position [i,j] will contain ratio between the observed and expected frequency of edges of type i, based on the frequency of edges of type j.
#' @export
#' @examples
#' GO=c(rep('A,C', 5), rep('A', 5), rep('C', 5), rep('B,D', 5), rep('B', 5), rep('D', 5))
#' names(GO)=paste("node", 1:length(GO))
#' edges=cbind(names(GO)[1:(length(GO)/2)], names(GO)[(length(GO)/2+1):length(GO)])
#' GOtypes=c('A', "B", "C", "D")
#' pafway_meta(GO, edges, GOtypes)
pafway_meta <- function(GO, edges, GOtypes) {
GOinNetwork = GO[unique(c(edges[, 1], edges[, 2]))]
freq_together=sapply(GOtypes, function(i) { #how often a gene has both terms
sapply(GOtypes, function(j) {
length(which(grepl(i, GOinNetwork) & grepl(j, GOinNetwork)))
})
})
freq_from=sapply(GOtypes, function(i){
length(which(grepl(i, GOinNetwork[edges[, 1]])))
})
freq_to=sapply(GOtypes, function(i){
length(which(grepl(i, GOinNetwork[edges[, 2]])))
})
freq_mat=t(sapply(GOtypes, function(i) { #how often an edge is found
sapply(GOtypes, function(j) {
length(which(grepl(i, GOinNetwork[edges[, 1]]) & grepl(j, GOinNetwork[edges[, 2]])))
})
}))
possible_edges=strsplit(sapply(GOtypes, function(i) {
sapply(GOtypes, function(j) {
paste(i,j,sep=",")
})}), ",")
names_possible_edges=sapply(possible_edges, function(i){paste(i[1], i[2], sep="->")})
expected_freq_mat=sapply(possible_edges, function(i){
sapply(possible_edges, function(j){
a=i[1]
b=i[2]
c=j[1]
d=j[2]
a_to_b=freq_mat[a,b]
a_to_nb=(freq_from[a]-freq_mat[a,b])
na_to_b=(freq_to[b]-freq_mat[a,b])
na_to_nb=(length(edges[,1])-freq_from[a]-freq_to[b]+freq_mat[a,b])
p_a_and_c=freq_together[a,c]/freq_together[a,a]
p_na_and_c=(freq_together[c,c]-freq_together[a,c])/(length(GOinNetwork)-freq_together[a,a])
p_b_and_d=freq_together[b,d]/freq_together[b,b]
p_nb_and_d=(freq_together[d,d]-freq_together[b,d])/(length(GOinNetwork)-freq_together[b,b])
a_to_b*p_a_and_c*p_b_and_d+
a_to_nb*p_a_and_c*p_nb_and_d+
na_to_b*p_na_and_c*p_b_and_d+
na_to_nb*p_na_and_c*p_nb_and_d
})
})
rownames(expected_freq_mat)= names_possible_edges
colnames(expected_freq_mat)= names_possible_edges
true_freq_mat=sapply(possible_edges, function(i){
sapply(possible_edges, function(j){
c=j[1]
d=j[2]
freq_mat[c,d]
})
})
true_freq_mat/expected_freq_mat
}
|
#' Objects, features
#'
#' The objects are the front end entities, the usual "GIS contract" objects,
#' the features.
#'
#' @seealso `sc_coord` for the coordinates part of the model, `sc_path` for
#' the central part of the model, and `PATH` for the full model.
#' @name sc_object
#' @importFrom tibble as_tibble
#' @export
#' @examples
#' #library(sf)
#' #nc <- st_read(system.file("shape/nc.shp", package="sf"), quiet = TRUE)
#' #sc_object(nc)
sc_object.sf <- function(x, ...) {
faster_as_tibble(.st_set_geometry(x))
}
## a function sf should have
## to drop the spatial stuff
.st_set_geometry <- function(x, value = NULL) {
#st_geometry(x) <- value
x[[attr(x, "sf_column")]] <- NULL
as.data.frame(x)
}
.st_get_geometry <- function(x) {
x[[attr(x, "sf_column")]]
} | /R/sf-object.r | no_license | MilesMcBain/silicate | R | false | false | 788 | r | #' Objects, features
#'
#' The objects are the front end entities, the usual "GIS contract" objects,
#' the features.
#'
#' @seealso `sc_coord` for the coordinates part of the model, `sc_path` for
#' the central part of the model, and `PATH` for the full model.
#' @name sc_object
#' @importFrom tibble as_tibble
#' @export
#' @examples
#' #library(sf)
#' #nc <- st_read(system.file("shape/nc.shp", package="sf"), quiet = TRUE)
#' #sc_object(nc)
sc_object.sf <- function(x, ...) {
faster_as_tibble(.st_set_geometry(x))
}
## a function sf should have
## to drop the spatial stuff
.st_set_geometry <- function(x, value = NULL) {
#st_geometry(x) <- value
x[[attr(x, "sf_column")]] <- NULL
as.data.frame(x)
}
.st_get_geometry <- function(x) {
x[[attr(x, "sf_column")]]
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model.misc.R
\name{model.laws}
\alias{model.laws}
\title{Laws et al Model}
\usage{
model.laws(NPP, SST, chl = NULL)
}
\description{
Laws et al Model
}
\author{
Thomas Bryce Kelly
}
| /man/model.laws.Rd | no_license | tbrycekelly/TheSource | R | false | true | 259 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model.misc.R
\name{model.laws}
\alias{model.laws}
\title{Laws et al Model}
\usage{
model.laws(NPP, SST, chl = NULL)
}
\description{
Laws et al Model
}
\author{
Thomas Bryce Kelly
}
|
#' @author Gregoire Versmee, Laura Versmee
available.result <- function(env, resultID, token, verbose = FALSE) {
count <- 0L
message("\nWaiting for PIC-SURE to return the query")
status <- content.get(paste0(env, "/rest/v1/resultService/resultStatus/", resultID), token)$status
while (status != 'AVAILABLE') {
if (status == 'ERROR') {
stop("Query Failed", call. = FALSE)
} else {
Sys.sleep(0.2)
count <- count + 1L
if (count%%10 == 0) message(" ...still waiting")
status <- content.get(paste0(env, "/rest/v1/resultService/resultStatus/", resultID), token)$status
}
}
if (verbose) message(" Result available \\o/")
}
| /R/available.result.R | no_license | gversmee/picsuRe | R | false | false | 685 | r | #' @author Gregoire Versmee, Laura Versmee
available.result <- function(env, resultID, token, verbose = FALSE) {
count <- 0L
message("\nWaiting for PIC-SURE to return the query")
status <- content.get(paste0(env, "/rest/v1/resultService/resultStatus/", resultID), token)$status
while (status != 'AVAILABLE') {
if (status == 'ERROR') {
stop("Query Failed", call. = FALSE)
} else {
Sys.sleep(0.2)
count <- count + 1L
if (count%%10 == 0) message(" ...still waiting")
status <- content.get(paste0(env, "/rest/v1/resultService/resultStatus/", resultID), token)$status
}
}
if (verbose) message(" Result available \\o/")
}
|
##remove all R objects from memory
rm(list = ls())
##Simply highlight all and click run
##Jeremy Koelmel - 04/22/2019
########################Other Parameters, can manually change########################
#which inclusion list format are you using?
waters=TRUE #are you using waters (may not be applicable to all instruments)
thermoQE=FALSE #thermo? (formatted for Q-Exactive, some instruments require different formats)
thermoLumos=FALSE #thermo? (formatted for Lumos Fusion, some instruments require different formats)
#the percentile of peak areas will be calculated for samples, and used to determine is the samples > average(blanks)+c*stdev(blanks)
PERCENTILE<-0.75
#CAMERA: do you do adduct searching? This is an extra step and is not necessary for inclusion list generation
CameraAdductSearch<-FALSE
# True or false for adduct searching, does this adduct appear in your chromatography? (eg. metDNA library)
HCO2<-TRUE
CH3COO<-TRUE
NH4<-TRUE
#Exact Mass Search? To reduce the number of included ion only to those in your MS/MS library
ExactMass_LibrarySearch<-TRUE
#How narrow a window do you want for inclusion (minutes)
RTWindow<-0.2
#How to split you data files in the case of multiple inclusion lists? (ONLY CHOOSE ONE)
#Have the first list be the top most abundant ions, second list second top most, etc.
SplitByAbundance<-FALSE
#Have each list evenly distributed throughout the retention time
#eg. RT 1,2,3,4,5,6 if split into two lists would be: 1,3,5 and 2,4,6
SplitByRT<-TRUE
#Parameters that can either be changed manually or using the pop-boxes after selecting all and running
PPM<-15
SNTHR <-10
MINFRAC <-.1
PeakMin<-5
PeakMax<-15
#spell this exactly as such "positive" or "negative"
POLARITY<-"positive"
blankFilterConstant<-5
maxInclusionListSize <-100
Threshold <- 2000
###########################################Inclusion Step##################################################
##Step 1: XCMS Peak picking
##Step 2: Filter by blanks and intensity threshold
##Step 3: Filter by exact mass matches to metabolite library / lipid library (if desired)
##Step 4: Generates inclusion lists
if("gWidgets" %in% rownames(installed.packages()) == FALSE) {install.packages("gWidgets")}
if("gWidgetstcltk" %in% rownames(installed.packages()) == FALSE) {install.packages("gWidgetstcltk")}
if("dplyr" %in% rownames(installed.packages()) == FALSE) {install.packages("dplyr")}
require(gWidgets)
require(gWidgetstcltk)
options(guiToolkit="tcltk")
Install = ginput(message="first time running? \nShould we attempt to install all packages?\n (software will check if packages exist before installing) \ninput: y or n", title="Install Packages?",icon="question")
if (Install == "y") {
install.packages("installr")
library(installr)
updateR()
## Install packages (more than are loaded...)
#install.packages("colorspace")
#install.packages("ggplot2")
source("http://bioconductor.org/biocLite.R")
if(!requireNamespace("xcms")){biocLite("xcms")}
#if(!requireNamespace("pandaR")){biocLite("pandaR", version = "3.8")}
#if(!requireNamespace("faahKO")){biocLite("faahKO")}
#if(!requireNamespace("MSnbase")){biocLite("MSnbase")}
#if(!requireNamespace("pander")){biocLite("pander")}
if(!requireNamespace("CAMERA")){biocLite("CAMERA")}
#if(!requireNamespace("limma")){biocLite("limma")}
}
## load packages
library(xcms)
# library(faahKO)
# library(RColorBrewer)
# library(pander)
# library(magrittr)
library(MSnbase)
library(CAMERA)
library(dplyr)
#inputs: Import directory with mzML files
d.in <- choose.dir(caption="Directory with mzML files (do not select a file)\nshould contain atleast 1 file ending in neg.mzML or pos.mzML\nas well as 1-3 files'blank' somewhere in the name \n and ending in _neg or _pos.mzML")
if (CameraAdductSearch==TRUE){
AdductFile<-choose.files(caption="import .csv file with adducts to search against \nto be used in CAMERA annotation",multi=FALSE)
}
if (ExactMass_LibrarySearch==TRUE) {
DirExactMassLibrary<-choose.files(caption="import .csv file with metabolites/lipids to search against \nto be used in exact mass matching \nand to reduce the size of the inclusion list",multi=FALSE)
}
ExportName<-ginput(message="What should the export file be called? \nDo not include extension (.csv), periods, or special characters", title="default",icon="question")
setwd(d.in)
POLARITY = ginput(message="What is the polarity? \ninput example: positive OR negative, spell exactly, case sensitive", title="polarity (positve OR negative)",icon="question")
default = ginput(message="use default parameters? \ninput: y or n", title="default",icon="question")
######################interface for parameters#######################
if (default=="n"){
PPM = ginput(message="What is mass accuracy in ppm for peak picking? \ninput: numeric, example 15", title="mass accuracy (ppm)",icon="question")
PeakMin = ginput(message="What is your minimum peak width (seconds)? \ninput: numeric, example 2", title="min Peak Width (seconds)",icon="question")
PeakMax = ginput(message="What is your maximum peak width (seconds)? \ninput: numeric, example 30", title="max Peak Width (seconds)",icon="question")
SNTHR = ginput(message="What do you want for the signal to noise thresold? \nDefault 10, input: numeric", title="signal to noise threshold",icon="question")
MINFRAC = ginput(message="What fraction of samples must have peaks? \ninput example: 0.5", title="signal to noise threshold",icon="question")
blankFilterConstant = ginput(message="What is the number (c) to multiply to the blank standard deviation \nfor which average(sample)-(average(blank)+c*stdev(blank)) must be > 0? \ndefault = 10", title="Blank Subtraction",icon="question")
Threshold = ginput(message="What is the maximum extracted chromatogram intensity to be consider a peak \nall peaks below this intensity will be removed", title="Threshold Peak Filter",icon="question")
maxInclusionListSize = ginput(message="What is the maximum number of ions contained on each inclusion list?", title="Exclusion List Max Size",icon="question")
PPM = as.numeric(PPM)
PeakMin = as.numeric(PeakMin)
PeakMax = as.numeric(PeakMax)
SNTHR = as.numeric(SNTHR)
MINFRAC = as.numeric(MINFRAC)
blankFilterConstant = as.numeric(blankFilterConstant)
maxInclusionListSize = as.numeric(maxInclusionListSize)
}
# ------------------xcms OLD------------------
# 1. peak detection
files <- dir(d.in, pattern = '(?i)mzml', recursive = TRUE, full.names = TRUE)####(?i)????????????????????????;"recursive"????????????????????????????????????
numOfSamples <- length(list.files(path=d.in, pattern="(NEG)+|(POS)+", ignore.case=FALSE))
numOfBlanks <- length(list.files(path=d.in, pattern="+blank", ignore.case=FALSE))
if((numOfSamples-numOfBlanks)<1){
tkmessageBox(title = "An error has occured!",
message = "Please have atleast one sample (should have 'POS' or 'NEG' in the name\n and not have 'blank' in the name", icon = "error", type = "ok")
}
#Error testing
#error message
if ((numOfSamples<2)||(length(files)<2)){
tkmessageBox(title = "An error has occured!",
message = "Please have 2 or more .mzML files in the input folder(s)\nMake sure files have 'pos' or 'neg' in the name\nMake sure blanks have 'blank' in the name", icon = "error", type = "ok")
}
xset <- xcmsSet(files, method = 'centWave', ppm = PPM, snthr = SNTHR, peakwidth = c(PeakMin,PeakMax))
# 2. grouping 1
xset <- group(xset, minfrac = MINFRAC)
# RT correction obiwarp
xset2 <- retcor(xset, method = 'obiwarp') ## error, change to below--error too, delete QC10022
xset2 <- retcor(xset, method = 'peakgroups', plottype = 'deviation') ## NO plottype Error!
xset2<-xset
# 3. grouping 2
xset2 <- group(xset2, bw = 10, mzwid = 0.015, minfrac = MINFRAC)
# 4. filling gaps
#xset3 <- fillPeaks(xset2)
# groupmat <- xcms::groups(xset3)
# 6. --------------- CAMERA annotation -----------------
xa <- xsAnnotate(xset2, polarity= POLARITY, nSlaves = 1)
xa <- groupFWHM(xa)
xa <- findIsotopes(xa)
if (CameraAdductSearch==TRUE) {
rules.camera <- read.csv(AdductFile)
xa <- findAdducts(xa, rules = rules.camera, polarity = polarity)
xa <- findAdducts(xa, rules = NULL, polarity = polarity)
}
peaklist.anno <- cbind('name' = groupnames(xset2), #change into xset2 since not using filling gaps
getPeaklist(xa, intval = "into")) # into is peakarea without baseline correct, intb with baseline correct(result NA because of the incorrect baseline removement), maxo is intensity
colnames(peaklist.anno)[c(2, 5)] <- c('mzmed', 'rtmed')
#empty matrix to fill with values from peak list
PeakList<-matrix(0,nrow(peaklist.anno),ncol(peaklist.anno)+4)
PeakList[1:nrow(peaklist.anno),1:ncol(peaklist.anno)]<-as.matrix(peaklist.anno)
#select column names with blank
Blanks<-select(peaklist.anno,contains("blank"))
#select samples
Samples<-select(peaklist.anno,matches("(POS)|(NEG)"),-contains("blank"))
#create an average and stdev of blanks and filter by blanks
if (ncol(Blanks)!=0) {
if (ncol(Blanks)>2) {
for (i in 1:nrow(Blanks)) {
Blanks[i,is.na(Blanks[i,])]<-0
PeakList[i,ncol(PeakList)-3]<-sum(Blanks[i,])/length(Blanks[i,]) #average blank
PeakList[i,ncol(PeakList)-2]<-sd(Blanks[i,])/length(Blanks[i,]) #standard deviation blank
Samples[i,is.na(Samples[i,])]<-0
PeakList[i,ncol(PeakList)-1]<-(sum(Samples[i,])/length(Samples[i,]))
SamplePercentile<-as.numeric(quantile(Samples[i,], PERCENTILE))
#percentile(samples)-(average(blanks)+c*standardDeviation(blanks))
PeakList[i,ncol(PeakList)]<-SamplePercentile-(as.numeric(PeakList[i,(ncol(PeakList)-3)])+blankFilterConstant*as.numeric(PeakList[i,(ncol(PeakList)-2)]))
}
} else {
#in the case there is only one blank
for (i in 1:nrow(Blanks)) {
Blanks[i,is.na(Blanks[i,])]<-0
#average blanks
PeakList[i,ncol(PeakList)-3]<-sum(Blanks[i,])/length(Blanks[i,])
Samples[i,is.na(Samples[i,])]<-0
#average samples
PeakList[i,ncol(PeakList)-1]<-(sum(Samples[i,])/length(Samples[i,]))
#sample percentile
SamplePercentile<-as.numeric(quantile(Samples[i,], PERCENTILE))
# in the case of less than 3 samples, instead of standard deviation use the average and look for a FC difference between samples and blanks
PeakList[i,ncol(PeakList)]<-SamplePercentile-(as.numeric(ncol(PeakList)-3)+blankFilterConstant*as.numeric(ncol(PeakList)-3))
}
}
} else {
for (i in 1:nrow(Samples)) {
Samples[i,is.na(Samples[i,])]<-0
PeakList[i,ncol(PeakList)-1]<-(sum(Samples[i,])/length(Samples[i,]))
}
}
peaklist.anno<-cbind(peaklist.anno,PeakList[,ncol(PeakList)-1])
names(peaklist.anno)[ncol(peaklist.anno)]<-"Avg_Samples"
###########################subset data by threshold & Blanks#####################
if (ncol(Blanks)!=0) {
#Maybe issue, changed PeakList[,20] to PeakList[,ncol(PeakList)]
peaklist.anno.filtered<-peaklist.anno[as.logical(as.numeric(as.numeric(PeakList[,ncol(PeakList)-1])>as.numeric(Threshold))*as.numeric(as.numeric(PeakList[,ncol(PeakList)])>0)),]
} else {
peaklist.anno.filtered<-peaklist.anno[as.logical(as.numeric(as.numeric(PeakList[,ncol(PeakList)-1])>as.numeric(Threshold))),]
}
#############subset by exact mass hits##############################
if (ExactMass_LibrarySearch==TRUE) {
ExactMassLibrary<-read.csv(DirExactMassLibrary)
# Reduce the library to certain adducts and a given polarity
if (POLARITY=="negative") {
ExactMassLibrary<-ExactMassLibrary[ExactMassLibrary[,4]=="negative",]
if (HCO2==FALSE) {
ExactMassLibrary<-ExactMassLibrary[ExactMassLibrary[,2]!="[M+HCO2]-",]
}
if (CH3COO==FALSE) {
ExactMassLibrary<-ExactMassLibrary[ExactMassLibrary[,2]!="[M+CH3COO]-",]
}
}
if (POLARITY=="positive") {
ExactMassLibrary<-ExactMassLibrary[ExactMassLibrary[,4]=="positive",]
if (NH4==FALSE) {
ExactMassLibrary<-ExactMassLibrary[ExactMassLibrary[,2]!="[M+NH4]+",]
}
}
ExactMassLibrary <- as.matrix(ExactMassLibrary)
#Exact mass matching for Precursor ExactMassLibraryrary to feature table m/z's
#Need to create a constant to multiply to mass to get a search tolerance in Da's per mass
PPM_CONST <- (10^6 + PPM) / 10^6
NumExactMassLibraryMZ <- as.numeric(ExactMassLibrary[,3])
peaklist.anno.filtered<-cbind(peaklist.anno.filtered,0)
for (i in 1:nrow(peaklist.anno.filtered)) {
NumData <- as.numeric(as.character(peaklist.anno.filtered[i,2]))
DaTolerance<-NumData*PPM_CONST - NumData
TempID <- ExactMassLibrary[(NumData-DaTolerance < NumExactMassLibraryMZ) & (NumExactMassLibraryMZ < NumData+DaTolerance), 5]
TempID<-as.character(paste(TempID,collapse=" & "))
peaklist.anno.filtered[i,ncol(peaklist.anno.filtered)]<-TempID
}
names(peaklist.anno.filtered)[ncol(peaklist.anno.filtered)]<-"Metabolite_ID"
peaklist.anno.filtered.anno<-peaklist.anno.filtered[peaklist.anno.filtered[,ncol(peaklist.anno.filtered)]!="",]
#sort the filtered and annotated table by most abundant ions or RT
if (SplitByAbundance==TRUE) {
peaklist.anno.filtered.anno$Avg_Samples<-as.numeric(as.character(peaklist.anno.filtered.anno$Avg_Samples))
peaklist.anno.filtered.anno<-arrange(peaklist.anno.filtered.anno, desc(Avg_Samples))
} else {
peaklist.anno.filtered.anno$rtmed<-as.numeric(as.character(peaklist.anno.filtered.anno$rtmed))
peaklist.anno.filtered.anno<-arrange(peaklist.anno.filtered.anno, rtmed)
}
}
#sort the filtered table by most abundant ions or RT
if (SplitByAbundance==TRUE) {
peaklist.anno.filtered$Avg_Samples<-as.numeric(as.character(peaklist.anno.filtered$Avg_Samples))
peaklist.anno.filtered<-arrange(peaklist.anno.filtered, desc(Avg_Samples))
} else {
peaklist.anno.filtered$rtmed<-as.numeric(as.character(peaklist.anno.filtered$rtmed))
peaklist.anno.filtered<-arrange(peaklist.anno.filtered, rtmed)
}
#determine inclusion list format based on instrument type
if (waters==TRUE) {
#Inclusion List Format
peaklist.anno.filtered.incl<-matrix(-1,nrow(peaklist.anno.filtered),8)
peaklist.anno.filtered.incl[,1]<-peaklist.anno.filtered[,2]
peaklist.anno.filtered.incl[,5]<-peaklist.anno.filtered[,5]
if (ExactMass_LibrarySearch==TRUE) {
peaklist.anno.filtered.anno.incl<-matrix(-1,nrow(peaklist.anno.filtered.anno),8)
peaklist.anno.filtered.anno.incl[,1]<-peaklist.anno.filtered.anno[,2]
peaklist.anno.filtered.anno.incl[,5]<-peaklist.anno.filtered.anno[,5]
}
}
if (thermoQE==TRUE) {
#header for for thermo formatted exclusion list (Q-Exactive)
peaklist.anno.filtered.incl = matrix("",nrow(peaklist.anno.filtered),9)
colnames(peaklist.anno.filtered.incl) = c("Mass [m/z]", "Formula [M]", "Formula type", "Species", "CS [z]", "Polarity", "Start [min]", "End [min]", "Comment")
#Fill in m/z and RT values for first exclusion list
peaklist.anno.filtered.incl[,1] = peaklist.anno.filtered[,2] #m/z
peaklist.anno.filtered.incl[,7] = (as.numeric(peaklist.anno.filtered[,5])/60)-(RTWindow/2) #rt min (minutes)
peaklist.anno.filtered.incl[,8] = (as.numeric(peaklist.anno.filtered[,5])/60)+(RTWindow/2) #rt max (minutes)
if (ExactMass_LibrarySearch==TRUE) {
#header for for thermo formatted exclusion list (Q-Exactive)
peaklist.anno.filtered.anno.incl = matrix("",nrow(peaklist.anno.filtered.anno),9)
colnames(peaklist.anno.filtered.anno.incl) = c("Mass [m/z]", "Formula [M]", "Formula type", "Species", "CS [z]", "Polarity", "Start [min]", "End [min]", "Comment")
#Fill in m/z and RT values for first exclusion list
peaklist.anno.filtered.anno.incl[,1] = peaklist.anno.filtered.anno[,2] #m/z
peaklist.anno.filtered.anno.incl[,7] = (as.numeric(peaklist.anno.filtered.anno[,5])/60)-(RTWindow/2) #rt min (minutes)
peaklist.anno.filtered.anno.incl[,8] = (as.numeric(peaklist.anno.filtered.anno[,5])/60)+(RTWindow/2) #rt max (minutes)
}
}
#***BETA, NOT SURE OF EXACT FORMAT (used exclusion format)
if (thermoLumos==TRUE) {
#Inclusion List Format
peaklist.anno.filtered.incl<-matrix("",nrow(peaklist.anno.filtered),4) #create empty matrix for lumos (n rows by 3 columns)
colnames(peaklist.anno.filtered.incl) = c("m/z","Name","t start (min)","stop (min)")
peaklist.anno.filtered.incl[,1]<-peaklist.anno.filtered[,2] #add m/z values
#add start and end RT (min)
peaklist.anno.filtered.incl[,3]<-(as.numeric(peaklist.anno.filtered[,5])/60)-(RTWindow/2) #rt min (minutes)
peaklist.anno.filtered.incl[,4]<-(as.numeric(peaklist.anno.filtered[,5])/60)+(RTWindow/2) #rt max (minutes)
if (ExactMass_LibrarySearch==TRUE) {
#Inclusion List Format
peaklist.anno.filtered.anno.incl<-matrix("",nrow(peaklist.anno.filtered.anno),4) #create empty matrix for lumos (n rows by 3 columns)
colnames(peaklist.anno.filtered.anno.incl) = c("m/z","Name","t start (min)","stop (min)")
peaklist.anno.filtered.anno.incl[,1]<-peaklist.anno.filtered.anno[,2] #add m/z values
#add start and end RT (min)
peaklist.anno.filtered.anno.incl[,3]<-(as.numeric(peaklist.anno.filtered.anno[,5])/60)-(RTWindow/2) #rt min (minutes)
peaklist.anno.filtered.anno.incl[,4]<-(as.numeric(peaklist.anno.filtered.anno[,5])/60)+(RTWindow/2) #rt max (minutes)
}
}
#the number of lists which the inclusion list will be split into
nFilteredLists.filtered<-ceiling(nrow(peaklist.anno.filtered.incl)/maxInclusionListSize)
StepSize.filtered<-nFilteredLists.filtered
#How large will each individual inclusion list be (may remove some start and end ions)
SplitSize.filtered<-floor(nrow(peaklist.anno.filtered.incl)/StepSize.filtered)
dir.create("InclusionLists_Filtered")
if (ExactMass_LibrarySearch==TRUE) {
nFilteredLists.filtered.anno<-ceiling(nrow(peaklist.anno.filtered.anno.incl)/maxInclusionListSize)
StepSize.filtered.anno<-nFilteredLists.filtered.anno
SplitSize.filtered.anno<-floor(nrow(peaklist.anno.filtered.anno.incl)/StepSize.filtered.anno)
dir.create("InclusionLists_Annotated")
}
#Export the inclusion lists after filtering (sorted by most abundant ions)
if (SplitByAbundance==TRUE) {
start<-1
for (i in 1:nFilteredLists.filtered) {
if (i == nFilteredLists.filtered) {
tempPeakList<-peaklist.anno.filtered.incl[start:nrow(peaklist.anno.filtered.incl),]
if (waters==TRUE){
write.table(tempPeakList, paste("InclusionLists_Filtered/",ExportName,"_Filtered_Incl_",i,".txt",sep=""), sep=",", col.names=FALSE, row.names=FALSE, quote=TRUE, na="NA")
} else {
write.table(tempPeakList, paste("InclusionLists_Filtered/",ExportName,"_Filtered_Incl_",i,".csv",sep=""), sep=",", col.names=TRUE, row.names=FALSE, quote=TRUE, na="NA")
}
} else {
tempPeakList<-peaklist.anno.filtered.incl[start:(i*SplitSize.filtered),]
start<-i*SplitSize.filtered+1
if (waters==TRUE){
write.table(tempPeakList, paste("InclusionLists_Filtered/",ExportName,"_Annotated_Filtered_",i,".txt",sep=""), sep=",", col.names=FALSE, row.names=FALSE, quote=TRUE, na="NA")
} else {
write.table(tempPeakList, paste("InclusionLists_Filtered/",ExportName,"_Annotated_Filtered_",i,".csv",sep=""), sep=",", col.names=TRUE, row.names=FALSE, quote=TRUE, na="NA")
}
}
}
#export the inclusion list after exact mass searching
if (ExactMass_LibrarySearch==TRUE) {
start<-1
for (i in 1:nFilteredLists.filtered.anno) {
if (i == nFilteredLists.filtered.anno) {
tempPeakList<-peaklist.anno.filtered.anno.incl[start:nrow(peaklist.anno.filtered.anno.incl),]
if (waters==TRUE){
write.table(tempPeakList, paste("InclusionLists_Annotated/",ExportName,"_Annotated_Incl_",i,".txt",sep=""), sep=",", col.names=FALSE, row.names=FALSE, quote=TRUE, na="NA")
} else {
write.table(tempPeakList, paste("InclusionLists_Annotated/",ExportName,"_Annotated_Incl_",i,".csv",sep=""), sep=",", col.names=TRUE, row.names=FALSE, quote=TRUE, na="NA")
}
} else {
tempPeakList<-peaklist.anno.filtered.anno.incl[start:(i*maxInclusionListSize),]
start<-i*maxInclusionListSize+1
if (waters==TRUE){
write.table(tempPeakList, paste("InclusionLists_Annotated/",ExportName,"_Annotated_Incl_",i,".txt",sep=""), sep=",", col.names=FALSE, row.names=FALSE, quote=TRUE, na="NA")
} else {
write.table(tempPeakList, paste("InclusionLists_Annotated/",ExportName,"_Annotated_Incl_",i,".csv",sep=""), sep=",", col.names=TRUE, row.names=FALSE, quote=TRUE, na="NA")
}
}
}
}
}
#Export the inclusion lists after filtering (split so each inclusion list covers the same retention time ranges)
#This will optimally reduce the density of ions on the list which have the same retention time
if (SplitByRT==TRUE) {
for (i in 1:nFilteredLists.filtered){
temporaryInc<-peaklist.anno.filtered.incl[1:SplitSize.filtered,]
a<-i
for (x in 1:SplitSize.filtered) {
temporaryInc[x,]<-peaklist.anno.filtered.incl[a,]
a<-a+StepSize.filtered
}
if (waters==TRUE){
write.table(temporaryInc, paste("InclusionLists_Filtered/",ExportName,"_Filtered_Incl_",i,".txt",sep=""), sep=",", col.names=FALSE, row.names=FALSE, quote=TRUE, na="NA")
} else {
write.table(temporaryInc, paste("InclusionLists_Filtered/",ExportName,"_Filtered_Incl_",i,".csv",sep=""), sep=",", col.names=TRUE, row.names=FALSE, quote=TRUE, na="NA")
}
}
if (ExactMass_LibrarySearch==TRUE) {
for (i in 1:nFilteredLists.filtered.anno){
temporaryInc<-peaklist.anno.filtered.anno.incl[1:SplitSize.filtered.anno,]
a<-i
for (x in 1:SplitSize.filtered.anno) {
temporaryInc[x,]<-peaklist.anno.filtered.anno.incl[a,]
a<-a+StepSize.filtered.anno
}
if (waters==TRUE){
write.table(temporaryInc, paste("InclusionLists_Annotated/",ExportName,"_Annotated_Incl_",i,".txt",sep=""), sep=",", col.names=FALSE, row.names=FALSE, quote=TRUE, na="NA")
} else {
write.table(temporaryInc, paste("InclusionLists_Annotated/",ExportName,"_Annotated_Incl_",i,".csv",sep=""), sep=",", col.names=TRUE, row.names=FALSE, quote=TRUE, na="NA")
}
}
}
}
write.csv(peaklist.anno.filtered, paste(ExportName,"_filtered.csv",sep=""))
if (ExactMass_LibrarySearch==TRUE) {
write.csv(peaklist.anno.filtered.anno, paste(ExportName,"_annotated.csv",sep=""))
}
##Jeremy Koelmel - 04/10/2019
##Step 1: XCMS Peak picking
##Step 2: Camera Annotation
##Step 3: Filter by blanks and threshold (change to maximum or 3rd quartile)
##Step 4: Filter by exact mass matches to metabolite library
##Step 5: Generates inclusion lists
| /Inclusion/2019_05_08_IntelligentAcquisition_Inclusion.R | permissive | GarrettLab-UF/IntelligentAcquisition | R | false | false | 22,565 | r | ##remove all R objects from memory
rm(list = ls())
##Simply highlight all and click run
##Jeremy Koelmel - 04/22/2019
########################Other Parameters, can manually change########################
#which inclusion list format are you using?
waters=TRUE #are you using waters (may not be applicable to all instruments)
thermoQE=FALSE #thermo? (formatted for Q-Exactive, some instruments require different formats)
thermoLumos=FALSE #thermo? (formatted for Lumos Fusion, some instruments require different formats)
#the percentile of peak areas will be calculated for samples, and used to determine is the samples > average(blanks)+c*stdev(blanks)
PERCENTILE<-0.75
#CAMERA: do you do adduct searching? This is an extra step and is not necessary for inclusion list generation
CameraAdductSearch<-FALSE
# True or false for adduct searching, does this adduct appear in your chromatography? (eg. metDNA library)
HCO2<-TRUE
CH3COO<-TRUE
NH4<-TRUE
#Exact Mass Search? To reduce the number of included ion only to those in your MS/MS library
ExactMass_LibrarySearch<-TRUE
#How narrow a window do you want for inclusion (minutes)
RTWindow<-0.2
#How to split you data files in the case of multiple inclusion lists? (ONLY CHOOSE ONE)
#Have the first list be the top most abundant ions, second list second top most, etc.
SplitByAbundance<-FALSE
#Have each list evenly distributed throughout the retention time
#eg. RT 1,2,3,4,5,6 if split into two lists would be: 1,3,5 and 2,4,6
SplitByRT<-TRUE
#Parameters that can either be changed manually or using the pop-boxes after selecting all and running
PPM<-15
SNTHR <-10
MINFRAC <-.1
PeakMin<-5
PeakMax<-15
#spell this exactly as such "positive" or "negative"
POLARITY<-"positive"
blankFilterConstant<-5
maxInclusionListSize <-100
Threshold <- 2000
###########################################Inclusion Step##################################################
##Step 1: XCMS Peak picking
##Step 2: Filter by blanks and intensity threshold
##Step 3: Filter by exact mass matches to metabolite library / lipid library (if desired)
##Step 4: Generates inclusion lists
if("gWidgets" %in% rownames(installed.packages()) == FALSE) {install.packages("gWidgets")}
if("gWidgetstcltk" %in% rownames(installed.packages()) == FALSE) {install.packages("gWidgetstcltk")}
if("dplyr" %in% rownames(installed.packages()) == FALSE) {install.packages("dplyr")}
require(gWidgets)
require(gWidgetstcltk)
options(guiToolkit="tcltk")
Install = ginput(message="first time running? \nShould we attempt to install all packages?\n (software will check if packages exist before installing) \ninput: y or n", title="Install Packages?",icon="question")
if (Install == "y") {
install.packages("installr")
library(installr)
updateR()
## Install packages (more than are loaded...)
#install.packages("colorspace")
#install.packages("ggplot2")
source("http://bioconductor.org/biocLite.R")
if(!requireNamespace("xcms")){biocLite("xcms")}
#if(!requireNamespace("pandaR")){biocLite("pandaR", version = "3.8")}
#if(!requireNamespace("faahKO")){biocLite("faahKO")}
#if(!requireNamespace("MSnbase")){biocLite("MSnbase")}
#if(!requireNamespace("pander")){biocLite("pander")}
if(!requireNamespace("CAMERA")){biocLite("CAMERA")}
#if(!requireNamespace("limma")){biocLite("limma")}
}
## load packages
library(xcms)
# library(faahKO)
# library(RColorBrewer)
# library(pander)
# library(magrittr)
library(MSnbase)
library(CAMERA)
library(dplyr)
#inputs: Import directory with mzML files
d.in <- choose.dir(caption="Directory with mzML files (do not select a file)\nshould contain atleast 1 file ending in neg.mzML or pos.mzML\nas well as 1-3 files'blank' somewhere in the name \n and ending in _neg or _pos.mzML")
if (CameraAdductSearch==TRUE){
AdductFile<-choose.files(caption="import .csv file with adducts to search against \nto be used in CAMERA annotation",multi=FALSE)
}
if (ExactMass_LibrarySearch==TRUE) {
DirExactMassLibrary<-choose.files(caption="import .csv file with metabolites/lipids to search against \nto be used in exact mass matching \nand to reduce the size of the inclusion list",multi=FALSE)
}
ExportName<-ginput(message="What should the export file be called? \nDo not include extension (.csv), periods, or special characters", title="default",icon="question")
setwd(d.in)
POLARITY = ginput(message="What is the polarity? \ninput example: positive OR negative, spell exactly, case sensitive", title="polarity (positve OR negative)",icon="question")
default = ginput(message="use default parameters? \ninput: y or n", title="default",icon="question")
######################interface for parameters#######################
if (default=="n"){
PPM = ginput(message="What is mass accuracy in ppm for peak picking? \ninput: numeric, example 15", title="mass accuracy (ppm)",icon="question")
PeakMin = ginput(message="What is your minimum peak width (seconds)? \ninput: numeric, example 2", title="min Peak Width (seconds)",icon="question")
PeakMax = ginput(message="What is your maximum peak width (seconds)? \ninput: numeric, example 30", title="max Peak Width (seconds)",icon="question")
SNTHR = ginput(message="What do you want for the signal to noise thresold? \nDefault 10, input: numeric", title="signal to noise threshold",icon="question")
MINFRAC = ginput(message="What fraction of samples must have peaks? \ninput example: 0.5", title="signal to noise threshold",icon="question")
blankFilterConstant = ginput(message="What is the number (c) to multiply to the blank standard deviation \nfor which average(sample)-(average(blank)+c*stdev(blank)) must be > 0? \ndefault = 10", title="Blank Subtraction",icon="question")
Threshold = ginput(message="What is the maximum extracted chromatogram intensity to be consider a peak \nall peaks below this intensity will be removed", title="Threshold Peak Filter",icon="question")
maxInclusionListSize = ginput(message="What is the maximum number of ions contained on each inclusion list?", title="Exclusion List Max Size",icon="question")
PPM = as.numeric(PPM)
PeakMin = as.numeric(PeakMin)
PeakMax = as.numeric(PeakMax)
SNTHR = as.numeric(SNTHR)
MINFRAC = as.numeric(MINFRAC)
blankFilterConstant = as.numeric(blankFilterConstant)
maxInclusionListSize = as.numeric(maxInclusionListSize)
}
# ------------------xcms OLD------------------
# 1. peak detection
files <- dir(d.in, pattern = '(?i)mzml', recursive = TRUE, full.names = TRUE)####(?i)????????????????????????;"recursive"????????????????????????????????????
numOfSamples <- length(list.files(path=d.in, pattern="(NEG)+|(POS)+", ignore.case=FALSE))
numOfBlanks <- length(list.files(path=d.in, pattern="+blank", ignore.case=FALSE))
if((numOfSamples-numOfBlanks)<1){
tkmessageBox(title = "An error has occured!",
message = "Please have atleast one sample (should have 'POS' or 'NEG' in the name\n and not have 'blank' in the name", icon = "error", type = "ok")
}
#Error testing
#error message
if ((numOfSamples<2)||(length(files)<2)){
tkmessageBox(title = "An error has occured!",
message = "Please have 2 or more .mzML files in the input folder(s)\nMake sure files have 'pos' or 'neg' in the name\nMake sure blanks have 'blank' in the name", icon = "error", type = "ok")
}
xset <- xcmsSet(files, method = 'centWave', ppm = PPM, snthr = SNTHR, peakwidth = c(PeakMin,PeakMax))
# 2. grouping 1
xset <- group(xset, minfrac = MINFRAC)
# RT correction obiwarp
xset2 <- retcor(xset, method = 'obiwarp') ## error, change to below--error too, delete QC10022
xset2 <- retcor(xset, method = 'peakgroups', plottype = 'deviation') ## NO plottype Error!
xset2<-xset
# 3. grouping 2
xset2 <- group(xset2, bw = 10, mzwid = 0.015, minfrac = MINFRAC)
# 4. filling gaps
#xset3 <- fillPeaks(xset2)
# groupmat <- xcms::groups(xset3)
# 6. --------------- CAMERA annotation -----------------
xa <- xsAnnotate(xset2, polarity= POLARITY, nSlaves = 1)
xa <- groupFWHM(xa)
xa <- findIsotopes(xa)
if (CameraAdductSearch==TRUE) {
rules.camera <- read.csv(AdductFile)
xa <- findAdducts(xa, rules = rules.camera, polarity = polarity)
xa <- findAdducts(xa, rules = NULL, polarity = polarity)
}
peaklist.anno <- cbind('name' = groupnames(xset2), #change into xset2 since not using filling gaps
getPeaklist(xa, intval = "into")) # into is peakarea without baseline correct, intb with baseline correct(result NA because of the incorrect baseline removement), maxo is intensity
colnames(peaklist.anno)[c(2, 5)] <- c('mzmed', 'rtmed')
#empty matrix to fill with values from peak list
PeakList<-matrix(0,nrow(peaklist.anno),ncol(peaklist.anno)+4)
PeakList[1:nrow(peaklist.anno),1:ncol(peaklist.anno)]<-as.matrix(peaklist.anno)
#select column names with blank
Blanks<-select(peaklist.anno,contains("blank"))
#select samples
Samples<-select(peaklist.anno,matches("(POS)|(NEG)"),-contains("blank"))
#create an average and stdev of blanks and filter by blanks
if (ncol(Blanks)!=0) {
if (ncol(Blanks)>2) {
for (i in 1:nrow(Blanks)) {
Blanks[i,is.na(Blanks[i,])]<-0
PeakList[i,ncol(PeakList)-3]<-sum(Blanks[i,])/length(Blanks[i,]) #average blank
PeakList[i,ncol(PeakList)-2]<-sd(Blanks[i,])/length(Blanks[i,]) #standard deviation blank
Samples[i,is.na(Samples[i,])]<-0
PeakList[i,ncol(PeakList)-1]<-(sum(Samples[i,])/length(Samples[i,]))
SamplePercentile<-as.numeric(quantile(Samples[i,], PERCENTILE))
#percentile(samples)-(average(blanks)+c*standardDeviation(blanks))
PeakList[i,ncol(PeakList)]<-SamplePercentile-(as.numeric(PeakList[i,(ncol(PeakList)-3)])+blankFilterConstant*as.numeric(PeakList[i,(ncol(PeakList)-2)]))
}
} else {
#in the case there is only one blank
for (i in 1:nrow(Blanks)) {
Blanks[i,is.na(Blanks[i,])]<-0
#average blanks
PeakList[i,ncol(PeakList)-3]<-sum(Blanks[i,])/length(Blanks[i,])
Samples[i,is.na(Samples[i,])]<-0
#average samples
PeakList[i,ncol(PeakList)-1]<-(sum(Samples[i,])/length(Samples[i,]))
#sample percentile
SamplePercentile<-as.numeric(quantile(Samples[i,], PERCENTILE))
# in the case of less than 3 samples, instead of standard deviation use the average and look for a FC difference between samples and blanks
PeakList[i,ncol(PeakList)]<-SamplePercentile-(as.numeric(ncol(PeakList)-3)+blankFilterConstant*as.numeric(ncol(PeakList)-3))
}
}
} else {
for (i in 1:nrow(Samples)) {
Samples[i,is.na(Samples[i,])]<-0
PeakList[i,ncol(PeakList)-1]<-(sum(Samples[i,])/length(Samples[i,]))
}
}
peaklist.anno<-cbind(peaklist.anno,PeakList[,ncol(PeakList)-1])
names(peaklist.anno)[ncol(peaklist.anno)]<-"Avg_Samples"
###########################subset data by threshold & Blanks#####################
if (ncol(Blanks)!=0) {
#Maybe issue, changed PeakList[,20] to PeakList[,ncol(PeakList)]
peaklist.anno.filtered<-peaklist.anno[as.logical(as.numeric(as.numeric(PeakList[,ncol(PeakList)-1])>as.numeric(Threshold))*as.numeric(as.numeric(PeakList[,ncol(PeakList)])>0)),]
} else {
peaklist.anno.filtered<-peaklist.anno[as.logical(as.numeric(as.numeric(PeakList[,ncol(PeakList)-1])>as.numeric(Threshold))),]
}
#############subset by exact mass hits##############################
if (ExactMass_LibrarySearch==TRUE) {
ExactMassLibrary<-read.csv(DirExactMassLibrary)
# Reduce the library to certain adducts and a given polarity
if (POLARITY=="negative") {
ExactMassLibrary<-ExactMassLibrary[ExactMassLibrary[,4]=="negative",]
if (HCO2==FALSE) {
ExactMassLibrary<-ExactMassLibrary[ExactMassLibrary[,2]!="[M+HCO2]-",]
}
if (CH3COO==FALSE) {
ExactMassLibrary<-ExactMassLibrary[ExactMassLibrary[,2]!="[M+CH3COO]-",]
}
}
if (POLARITY=="positive") {
ExactMassLibrary<-ExactMassLibrary[ExactMassLibrary[,4]=="positive",]
if (NH4==FALSE) {
ExactMassLibrary<-ExactMassLibrary[ExactMassLibrary[,2]!="[M+NH4]+",]
}
}
ExactMassLibrary <- as.matrix(ExactMassLibrary)
#Exact mass matching for Precursor ExactMassLibraryrary to feature table m/z's
#Need to create a constant to multiply to mass to get a search tolerance in Da's per mass
PPM_CONST <- (10^6 + PPM) / 10^6
NumExactMassLibraryMZ <- as.numeric(ExactMassLibrary[,3])
peaklist.anno.filtered<-cbind(peaklist.anno.filtered,0)
for (i in 1:nrow(peaklist.anno.filtered)) {
NumData <- as.numeric(as.character(peaklist.anno.filtered[i,2]))
DaTolerance<-NumData*PPM_CONST - NumData
TempID <- ExactMassLibrary[(NumData-DaTolerance < NumExactMassLibraryMZ) & (NumExactMassLibraryMZ < NumData+DaTolerance), 5]
TempID<-as.character(paste(TempID,collapse=" & "))
peaklist.anno.filtered[i,ncol(peaklist.anno.filtered)]<-TempID
}
names(peaklist.anno.filtered)[ncol(peaklist.anno.filtered)]<-"Metabolite_ID"
peaklist.anno.filtered.anno<-peaklist.anno.filtered[peaklist.anno.filtered[,ncol(peaklist.anno.filtered)]!="",]
#sort the filtered and annotated table by most abundant ions or RT
if (SplitByAbundance==TRUE) {
peaklist.anno.filtered.anno$Avg_Samples<-as.numeric(as.character(peaklist.anno.filtered.anno$Avg_Samples))
peaklist.anno.filtered.anno<-arrange(peaklist.anno.filtered.anno, desc(Avg_Samples))
} else {
peaklist.anno.filtered.anno$rtmed<-as.numeric(as.character(peaklist.anno.filtered.anno$rtmed))
peaklist.anno.filtered.anno<-arrange(peaklist.anno.filtered.anno, rtmed)
}
}
#sort the filtered table by most abundant ions or RT
if (SplitByAbundance==TRUE) {
peaklist.anno.filtered$Avg_Samples<-as.numeric(as.character(peaklist.anno.filtered$Avg_Samples))
peaklist.anno.filtered<-arrange(peaklist.anno.filtered, desc(Avg_Samples))
} else {
peaklist.anno.filtered$rtmed<-as.numeric(as.character(peaklist.anno.filtered$rtmed))
peaklist.anno.filtered<-arrange(peaklist.anno.filtered, rtmed)
}
#determine inclusion list format based on instrument type
if (waters==TRUE) {
#Inclusion List Format
peaklist.anno.filtered.incl<-matrix(-1,nrow(peaklist.anno.filtered),8)
peaklist.anno.filtered.incl[,1]<-peaklist.anno.filtered[,2]
peaklist.anno.filtered.incl[,5]<-peaklist.anno.filtered[,5]
if (ExactMass_LibrarySearch==TRUE) {
peaklist.anno.filtered.anno.incl<-matrix(-1,nrow(peaklist.anno.filtered.anno),8)
peaklist.anno.filtered.anno.incl[,1]<-peaklist.anno.filtered.anno[,2]
peaklist.anno.filtered.anno.incl[,5]<-peaklist.anno.filtered.anno[,5]
}
}
if (thermoQE==TRUE) {
#header for for thermo formatted exclusion list (Q-Exactive)
peaklist.anno.filtered.incl = matrix("",nrow(peaklist.anno.filtered),9)
colnames(peaklist.anno.filtered.incl) = c("Mass [m/z]", "Formula [M]", "Formula type", "Species", "CS [z]", "Polarity", "Start [min]", "End [min]", "Comment")
#Fill in m/z and RT values for first exclusion list
peaklist.anno.filtered.incl[,1] = peaklist.anno.filtered[,2] #m/z
peaklist.anno.filtered.incl[,7] = (as.numeric(peaklist.anno.filtered[,5])/60)-(RTWindow/2) #rt min (minutes)
peaklist.anno.filtered.incl[,8] = (as.numeric(peaklist.anno.filtered[,5])/60)+(RTWindow/2) #rt max (minutes)
if (ExactMass_LibrarySearch==TRUE) {
#header for for thermo formatted exclusion list (Q-Exactive)
peaklist.anno.filtered.anno.incl = matrix("",nrow(peaklist.anno.filtered.anno),9)
colnames(peaklist.anno.filtered.anno.incl) = c("Mass [m/z]", "Formula [M]", "Formula type", "Species", "CS [z]", "Polarity", "Start [min]", "End [min]", "Comment")
#Fill in m/z and RT values for first exclusion list
peaklist.anno.filtered.anno.incl[,1] = peaklist.anno.filtered.anno[,2] #m/z
peaklist.anno.filtered.anno.incl[,7] = (as.numeric(peaklist.anno.filtered.anno[,5])/60)-(RTWindow/2) #rt min (minutes)
peaklist.anno.filtered.anno.incl[,8] = (as.numeric(peaklist.anno.filtered.anno[,5])/60)+(RTWindow/2) #rt max (minutes)
}
}
#***BETA, NOT SURE OF EXACT FORMAT (used exclusion format)
if (thermoLumos==TRUE) {
#Inclusion List Format
peaklist.anno.filtered.incl<-matrix("",nrow(peaklist.anno.filtered),4) #create empty matrix for lumos (n rows by 3 columns)
colnames(peaklist.anno.filtered.incl) = c("m/z","Name","t start (min)","stop (min)")
peaklist.anno.filtered.incl[,1]<-peaklist.anno.filtered[,2] #add m/z values
#add start and end RT (min)
peaklist.anno.filtered.incl[,3]<-(as.numeric(peaklist.anno.filtered[,5])/60)-(RTWindow/2) #rt min (minutes)
peaklist.anno.filtered.incl[,4]<-(as.numeric(peaklist.anno.filtered[,5])/60)+(RTWindow/2) #rt max (minutes)
if (ExactMass_LibrarySearch==TRUE) {
#Inclusion List Format
peaklist.anno.filtered.anno.incl<-matrix("",nrow(peaklist.anno.filtered.anno),4) #create empty matrix for lumos (n rows by 3 columns)
colnames(peaklist.anno.filtered.anno.incl) = c("m/z","Name","t start (min)","stop (min)")
peaklist.anno.filtered.anno.incl[,1]<-peaklist.anno.filtered.anno[,2] #add m/z values
#add start and end RT (min)
peaklist.anno.filtered.anno.incl[,3]<-(as.numeric(peaklist.anno.filtered.anno[,5])/60)-(RTWindow/2) #rt min (minutes)
peaklist.anno.filtered.anno.incl[,4]<-(as.numeric(peaklist.anno.filtered.anno[,5])/60)+(RTWindow/2) #rt max (minutes)
}
}
#the number of lists which the inclusion list will be split into
nFilteredLists.filtered<-ceiling(nrow(peaklist.anno.filtered.incl)/maxInclusionListSize)
StepSize.filtered<-nFilteredLists.filtered
#How large will each individual inclusion list be (may remove some start and end ions)
SplitSize.filtered<-floor(nrow(peaklist.anno.filtered.incl)/StepSize.filtered)
dir.create("InclusionLists_Filtered")
if (ExactMass_LibrarySearch==TRUE) {
nFilteredLists.filtered.anno<-ceiling(nrow(peaklist.anno.filtered.anno.incl)/maxInclusionListSize)
StepSize.filtered.anno<-nFilteredLists.filtered.anno
SplitSize.filtered.anno<-floor(nrow(peaklist.anno.filtered.anno.incl)/StepSize.filtered.anno)
dir.create("InclusionLists_Annotated")
}
#Export the inclusion lists after filtering (sorted by most abundant ions)
if (SplitByAbundance==TRUE) {
start<-1
for (i in 1:nFilteredLists.filtered) {
if (i == nFilteredLists.filtered) {
tempPeakList<-peaklist.anno.filtered.incl[start:nrow(peaklist.anno.filtered.incl),]
if (waters==TRUE){
write.table(tempPeakList, paste("InclusionLists_Filtered/",ExportName,"_Filtered_Incl_",i,".txt",sep=""), sep=",", col.names=FALSE, row.names=FALSE, quote=TRUE, na="NA")
} else {
write.table(tempPeakList, paste("InclusionLists_Filtered/",ExportName,"_Filtered_Incl_",i,".csv",sep=""), sep=",", col.names=TRUE, row.names=FALSE, quote=TRUE, na="NA")
}
} else {
tempPeakList<-peaklist.anno.filtered.incl[start:(i*SplitSize.filtered),]
start<-i*SplitSize.filtered+1
if (waters==TRUE){
write.table(tempPeakList, paste("InclusionLists_Filtered/",ExportName,"_Annotated_Filtered_",i,".txt",sep=""), sep=",", col.names=FALSE, row.names=FALSE, quote=TRUE, na="NA")
} else {
write.table(tempPeakList, paste("InclusionLists_Filtered/",ExportName,"_Annotated_Filtered_",i,".csv",sep=""), sep=",", col.names=TRUE, row.names=FALSE, quote=TRUE, na="NA")
}
}
}
#export the inclusion list after exact mass searching
if (ExactMass_LibrarySearch==TRUE) {
start<-1
for (i in 1:nFilteredLists.filtered.anno) {
if (i == nFilteredLists.filtered.anno) {
tempPeakList<-peaklist.anno.filtered.anno.incl[start:nrow(peaklist.anno.filtered.anno.incl),]
if (waters==TRUE){
write.table(tempPeakList, paste("InclusionLists_Annotated/",ExportName,"_Annotated_Incl_",i,".txt",sep=""), sep=",", col.names=FALSE, row.names=FALSE, quote=TRUE, na="NA")
} else {
write.table(tempPeakList, paste("InclusionLists_Annotated/",ExportName,"_Annotated_Incl_",i,".csv",sep=""), sep=",", col.names=TRUE, row.names=FALSE, quote=TRUE, na="NA")
}
} else {
tempPeakList<-peaklist.anno.filtered.anno.incl[start:(i*maxInclusionListSize),]
start<-i*maxInclusionListSize+1
if (waters==TRUE){
write.table(tempPeakList, paste("InclusionLists_Annotated/",ExportName,"_Annotated_Incl_",i,".txt",sep=""), sep=",", col.names=FALSE, row.names=FALSE, quote=TRUE, na="NA")
} else {
write.table(tempPeakList, paste("InclusionLists_Annotated/",ExportName,"_Annotated_Incl_",i,".csv",sep=""), sep=",", col.names=TRUE, row.names=FALSE, quote=TRUE, na="NA")
}
}
}
}
}
#Export the inclusion lists after filtering (split so each inclusion list covers the same retention time ranges)
#This will optimally reduce the density of ions on the list which have the same retention time
if (SplitByRT==TRUE) {
for (i in 1:nFilteredLists.filtered){
temporaryInc<-peaklist.anno.filtered.incl[1:SplitSize.filtered,]
a<-i
for (x in 1:SplitSize.filtered) {
temporaryInc[x,]<-peaklist.anno.filtered.incl[a,]
a<-a+StepSize.filtered
}
if (waters==TRUE){
write.table(temporaryInc, paste("InclusionLists_Filtered/",ExportName,"_Filtered_Incl_",i,".txt",sep=""), sep=",", col.names=FALSE, row.names=FALSE, quote=TRUE, na="NA")
} else {
write.table(temporaryInc, paste("InclusionLists_Filtered/",ExportName,"_Filtered_Incl_",i,".csv",sep=""), sep=",", col.names=TRUE, row.names=FALSE, quote=TRUE, na="NA")
}
}
if (ExactMass_LibrarySearch==TRUE) {
for (i in 1:nFilteredLists.filtered.anno){
temporaryInc<-peaklist.anno.filtered.anno.incl[1:SplitSize.filtered.anno,]
a<-i
for (x in 1:SplitSize.filtered.anno) {
temporaryInc[x,]<-peaklist.anno.filtered.anno.incl[a,]
a<-a+StepSize.filtered.anno
}
if (waters==TRUE){
write.table(temporaryInc, paste("InclusionLists_Annotated/",ExportName,"_Annotated_Incl_",i,".txt",sep=""), sep=",", col.names=FALSE, row.names=FALSE, quote=TRUE, na="NA")
} else {
write.table(temporaryInc, paste("InclusionLists_Annotated/",ExportName,"_Annotated_Incl_",i,".csv",sep=""), sep=",", col.names=TRUE, row.names=FALSE, quote=TRUE, na="NA")
}
}
}
}
write.csv(peaklist.anno.filtered, paste(ExportName,"_filtered.csv",sep=""))
if (ExactMass_LibrarySearch==TRUE) {
write.csv(peaklist.anno.filtered.anno, paste(ExportName,"_annotated.csv",sep=""))
}
##Jeremy Koelmel - 04/10/2019
##Step 1: XCMS Peak picking
##Step 2: Camera Annotation
##Step 3: Filter by blanks and threshold (change to maximum or 3rd quartile)
##Step 4: Filter by exact mass matches to metabolite library
##Step 5: Generates inclusion lists
|
# Clean up workspace
#rm(list = ls())
# load libs
library(ggplot2)
# read data files
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
bwiLAX <- NEI[NEI$type=="ON-ROAD" & (NEI$fips=="24510" | NEI$fips=="06037"), ]
bwiLAXagg <- aggregate(Emissions ~ year+fips, bwiLAX, sum)
bwiLAXagg$year=as.factor(bwiLAXagg$year)
bwiLAXagg$fips[bwiLAXagg$fips=="24510"]='Baltimore'
bwiLAXagg$fips[bwiLAXagg$fips=="06037"]='Los Angeles'
g <- ggplot(bwiLAXagg, aes(x=year,y=Emissions))+
ggtitle("PM2.5 Motor Vehicle Emissions:\nBaltimore City, MD and Los Angeles, CA (1999 - 2008)") ##Use this title for the plot
g +
geom_bar(stat = "identity", width = 0.5, colour="black", fill="#003380") +
facet_grid(.~fips) +
theme_bw()+
theme(axis.text=element_text(color="black",size=10))+ ## size 10pt and black for axis
theme(axis.title.x=element_text(color="blue"), ## x title blue
axis.title.y=element_text(color="blue"), ## y title blue
plot.title=element_text(color="blue",size=12))+ ##set title blue and 12pt
labs(x = "Year", y = "PM2.5 Emissions (in Tons)" )+ ## label x and y +
ggsave(file = "plot6.png")
| /plot6.R | no_license | sauceress/exploratory-data-analysis-course-project2 | R | false | false | 1,164 | r | # Clean up workspace
#rm(list = ls())
# load libs
library(ggplot2)
# read data files
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
bwiLAX <- NEI[NEI$type=="ON-ROAD" & (NEI$fips=="24510" | NEI$fips=="06037"), ]
bwiLAXagg <- aggregate(Emissions ~ year+fips, bwiLAX, sum)
bwiLAXagg$year=as.factor(bwiLAXagg$year)
bwiLAXagg$fips[bwiLAXagg$fips=="24510"]='Baltimore'
bwiLAXagg$fips[bwiLAXagg$fips=="06037"]='Los Angeles'
g <- ggplot(bwiLAXagg, aes(x=year,y=Emissions))+
ggtitle("PM2.5 Motor Vehicle Emissions:\nBaltimore City, MD and Los Angeles, CA (1999 - 2008)") ##Use this title for the plot
g +
geom_bar(stat = "identity", width = 0.5, colour="black", fill="#003380") +
facet_grid(.~fips) +
theme_bw()+
theme(axis.text=element_text(color="black",size=10))+ ## size 10pt and black for axis
theme(axis.title.x=element_text(color="blue"), ## x title blue
axis.title.y=element_text(color="blue"), ## y title blue
plot.title=element_text(color="blue",size=12))+ ##set title blue and 12pt
labs(x = "Year", y = "PM2.5 Emissions (in Tons)" )+ ## label x and y +
ggsave(file = "plot6.png")
|
### R code from vignette source 'simstudy_survival.Rnw'
### Encoding: UTF-8
###################################################
### code chunk number 1: simstudy_survival.Rnw:13-43
###################################################
# Chunk 1
library(CALIBERrfimpute)
library(missForest)
library(survival)
library(xtable)
library(rpart)
library(mice)
library(ranger)
kPmiss <- 0.2 # probability of missingness
kLogHR <- 0.5 # true log hazard ratio
# To analyse samples of more than 200 patients, (recommend about 2000,
# but this will slow down the program), set NPATS before running
# this vignette.
if (!exists('NPATS')){
kSampleSize <- 200 # number of patients in simulated datasets
} else {
kSampleSize <- NPATS
}
# e.g.
# NPATS <- 2000
# To analyse more than 3 samples, set N to a number greater than 3
# e.g.
# N <- 1000
# To use more than 4 imputations, set NIMPS to a number greater than 4
# e.g.
# NIMPS <- 10
###################################################
### code chunk number 2: simstudy_survival.Rnw:86-280
###################################################
# Chunk 2
#### DATA GENERATING FUNCTIONS ####
makeSurv <- function(n = 2000, loghr = kLogHR){
# Creates a survival cohort of n patients. Assumes that censoring is
# independent of all other variables
# x1 and x2 are random normal variables
data <- data.frame(x1 = rnorm(n), x2 = rnorm(n))
# Create the x3 variable
data$x3 <- 0.5 * (data$x1 + data$x2 - data$x1 * data$x2) + rnorm(n)
# Underlying log hazard ratio for all variables is the same
data$y <- with(data, loghr * (x1 + x2 + x3))
data$survtime <- rexp(n, exp(data$y))
# Censoring - assume uniform distribution of observation times
# up to a maximum
obstime <- runif(nrow(data), min = 0,
max = quantile(data$survtime, 0.5))
data$event <- as.integer(data$survtime <= obstime)
# Generate integer survival times
data$time <- ceiling(100 * pmin(data$survtime, obstime))
# Observed marginal cumulative hazard for imputation models
data$cumhaz <- nelsonaalen(data, time, event)
# True log hazard and survival time are not seen in the data
# so remove them
data$y <- NULL
data$survtime <- NULL
return(data)
}
makeMarSurv <- function(data, pmissing = kPmiss){
# Introduces missing data dependent on event indicator
# and cumulative hazard and x1 and x2
logistic <- function(x){
exp(x) / (1 + exp(x))
}
predictions <- function(lp, n){
# uses the vector of linear predictions (lp) from a logistic model
# and the expected number of positive responses (n) to generate
# a set of predictions by modifying the baseline
trialn <- function(lptrial){
sum(logistic(lptrial))
}
stepsize <- 32
lptrial <- lp
# To avoid errors due to missing linear predictors (ideally
# there should not be any missing), replace with the mean
if (any(is.na(lptrial))){
lp[is.na(lptrial)] <- mean(lptrial, na.rm = TRUE)
}
while(abs(trialn(lptrial) - n) > 1){
if (trialn(lptrial) > n){
# trialn bigger than required
lptrial <- lptrial - stepsize
} else {
lptrial <- lptrial + stepsize
}
stepsize <- stepsize / 2
}
# Generate predictions from binomial distribution
as.logical(rbinom(logical(length(lp)), 1, logistic(lptrial)))
}
data$x3[predictions(0.1 * data$x1 + 0.1 * data$x2 +
0.1 * data$cumhaz + 0.1 * data$event, nrow(data) * pmissing)] <- NA
return(data)
}
#### IMPUTATION FUNCTIONS FROM DOOVE AND VAN BUUREN ####
mice.impute.rfdoove10 <- function(y, ry, x, ...){
mice::mice.impute.rf(y = y, ry = ry, x = x, ntrees = 10)
}
mice.impute.rfdoove100 <- function(y, ry, x, ...){
mice::mice.impute.rf(y = y, ry = ry, x = x, ntrees = 100)
}
#### OUR MICE RANDOM FOREST FUNCTIONS ####
mice.impute.rfcont5 <- function(y, ry, x, ...){
CALIBERrfimpute::mice.impute.rfcont(
y = y, ry = ry, x = x, ntree_cont = 5)
}
mice.impute.rfcont10 <- function(y, ry, x, ...){
CALIBERrfimpute::mice.impute.rfcont(
y = y, ry = ry, x = x, ntree_cont = 10)
}
mice.impute.rfcont20 <- function(y, ry, x, ...){
CALIBERrfimpute::mice.impute.rfcont(
y = y, ry = ry, x = x, ntree_cont = 20)
}
mice.impute.rfcont50 <- function(y, ry, x, ...){
CALIBERrfimpute::mice.impute.rfcont(
y = y, ry = ry, x = x, ntree_cont = 50)
}
mice.impute.rfcont100 <- function(y, ry, x, ...){
CALIBERrfimpute::mice.impute.rfcont(
y = y, ry = ry, x = x, ntree_cont = 100)
}
#### FUNCTIONS TO DO THE ANALYSIS ####
coxfull <- function(data){
# Full data analysis
coefs <- as.data.frame(summary(coxph(myformula, data = data))$coef)
# return a data.frame of coefficients (est), upper and lower 95% limits
out <- data.frame(est = coefs[, 'coef'],
lo95 = coefs[, 'coef'] + qnorm(0.025) * coefs[, 'se(coef)'],
hi95 = coefs[, 'coef'] + qnorm(0.975) * coefs[, 'se(coef)'],
row.names = row.names(coefs))
out$cover <- kLogHR >= out$lo95 & kLogHR <= out$hi95
out
}
coximpute <- function(imputed_datasets){
# Analyses a list of imputed datasets
docoxmodel <- function(data){
coxph(myformula, data = data)
}
mirafits <- as.mira(lapply(imputed_datasets, docoxmodel))
coefs <- as.data.frame(summary(pool(mirafits)))
if ('term' %in% colnames(coefs)){
row.names(coefs) <- as.character(coefs$term)
}
if (!('lo 95' %in% colnames(coefs))){
# newer version of mice
# use normal approximation for now, as assume large sample
# and large degrees of freedom for t distribution
out <- data.frame(est = coefs$estimate,
lo95 = coefs$estimate + qnorm(0.025) * coefs$std.error,
hi95 = coefs$estimate + qnorm(0.975) * coefs$std.error,
row.names = row.names(coefs))
} else if ('lo 95' %in% colnames(coefs)){
# older version of mice
out <- data.frame(est = coefs$est,
lo95 = coefs[, 'lo 95'], hi95 = coefs[, 'hi 95'],
row.names = row.names(coefs))
} else {
stop('Unable to handle format of summary.mipo object')
}
# Whether this confidence interval contains the true hazard ratio
out$cover <- kLogHR >= out$lo95 & kLogHR <= out$hi95
out
}
domissf <- function(missdata, reps = NIMPS){
# Imputation by missForest
out <- list()
for (i in 1:reps){
invisible(capture.output(
out[[i]] <- missForest(missdata)$ximp))
}
out
}
domice <- function(missdata, functions, reps = NIMPS){
mids <- mice(missdata, defaultMethod = functions,
m = reps, visitSequence = 'monotone',
printFlag = FALSE, maxit = 10)
lapply(1:reps, function(x) complete(mids, x))
}
doanalysis <- function(x){
# Creates a dataset, analyses it using different methods, and outputs
# the result as a matrix of coefficients / SE and coverage
data <- makeSurv(kSampleSize)
missdata <- makeMarSurv(data)
out <- list()
out$full <- coxfull(data)
out$missf <- coximpute(domissf(missdata))
out$rf5 <- coximpute(domice(missdata, 'rfcont5'))
out$rf10 <- coximpute(domice(missdata, 'rfcont10'))
out$rf20 <- coximpute(domice(missdata, 'rfcont20'))
out$rf100 <- coximpute(domice(missdata, 'rfcont100'))
out$rfdoove10 <- coximpute(domice(missdata, 'rfdoove10'))
out$rfdoove100 <- coximpute(domice(missdata, 'rfdoove100'))
out$cart <- coximpute(domice(missdata, 'cart'))
out$mice <- coximpute(domice(missdata, 'norm'))
out
}
###################################################
### code chunk number 3: simstudy_survival.Rnw:284-290
###################################################
# Chunk 3
mydata <- makeSurv(200)
plot(mydata[, c('x1', 'x2', 'x3')],
main = "Associations between predictor variables in a sample dataset")
mydata <- makeSurv(20000)
###################################################
### code chunk number 4: simstudy_survival.Rnw:295-298
###################################################
# Chunk 4
summary(lm(x3 ~ x1*x2, data = mydata))
###################################################
### code chunk number 5: simstudy_survival.Rnw:301-314
###################################################
# Chunk 5
mydata <- makeSurv(2000)
mydata2 <- makeMarSurv(mydata)
# Plot non-missing data
plot(mydata$x1[!is.na(mydata2$x3)], mydata$x3[!is.na(mydata2$x3)],
pch = 19, xlab = 'x1', ylab = 'x3')
# Plot missing data
points(mydata$x1[is.na(mydata2$x3)], mydata$x3[is.na(mydata2$x3)],
col = 'red', pch = 19)
legend('bottomright', legend = c('x3 observed', 'x3 missing'),
col = c('black', 'red'), pch = 19)
title('Association of predictor variables x1 and x3')
###################################################
### code chunk number 6: simstudy_survival.Rnw:319-345
###################################################
# Chunk 6
# Cox proportional hazards analysis
myformula <- as.formula(Surv(time, event) ~ x1 + x2 + x3)
# Analysis with 10,000 simulated patients (or more
# if the variable REFERENCE_SAMPLESIZE exists)
if (!exists('REFERENCE_SAMPLESIZE')){
REFERENCE_SAMPLESIZE <- 10000
}
# Use parallel processing, if available, to create
# datasets more quickly.
if ('parallel' %in% loadedNamespaces() &&
!is.null(getOption('mc.cores')) &&
.Platform$OS.type == 'unix'){
REFERENCE_SAMPLESIZE <- REFERENCE_SAMPLESIZE %/%
getOption('mc.cores')
simdata <- parallel::mclapply(1:getOption('mc.cores'),
function(x) makeSurv(REFERENCE_SAMPLESIZE))
simdata <- do.call('rbind', simdata)
} else {
simdata <- makeSurv(REFERENCE_SAMPLESIZE)
}
summary(coxph(myformula, data = simdata))
###################################################
### code chunk number 7: simstudy_survival.Rnw:367-387
###################################################
# Chunk 7
# Setting analysis parameters: To analyse more than 3 samples,
# set N to the desired number before running this program
if (!exists('N')){
N <- 3
}
# Number of imputations (set to at least 10 when
# running an actual simulation)
if (!exists('NIMPS')){
NIMPS <- 4
}
# Use parallel processing if the 'parallel' package is loaded
if ('parallel' %in% loadedNamespaces() &&
.Platform$OS.type == 'unix'){
cat('Using parallel processing\n')
results <- parallel::mclapply(1:N, doanalysis)
} else {
results <- lapply(1:N, doanalysis)
}
###################################################
### code chunk number 8: simstudy_survival.Rnw:416-455
###################################################
# Chunk 8
getParams <- function(coef, method){
estimates <- sapply(results, function(x){
x[[method]][coef, 'est']
})
bias <- mean(estimates) - kLogHR
se_bias <- sd(estimates) / sqrt(length(estimates))
mse <- mean((estimates - kLogHR) ^ 2)
ci_len <- mean(sapply(results, function(x){
x[[method]][coef, 'hi95'] - x[[method]][coef, 'lo95']
}))
ci_cov <- mean(sapply(results, function(x){
x[[method]][coef, 'cover']
}))
out <- c(bias, se_bias, mse, sd(estimates), ci_len, ci_cov)
names(out) <- c('bias', 'se_bias', 'mse', 'sd', 'ci_len', 'ci_cov')
out
}
showTable <- function(coef){
methods <- c('full', 'missf', 'cart', 'rfdoove10',
'rfdoove100', 'rf5', 'rf10', 'rf20', 'rf100', 'mice')
methodnames <- c('Full data', 'missForest', 'CART MICE',
'RF Doove MICE 10', 'RF Doove MICE 100',
paste('RFcont MICE', c(5, 10, 20, 100)),
'Parametric MICE')
out <- t(sapply(methods, function(x){
getParams(coef, x)
}))
out <- formatC(out, digits = 3, format = 'fg')
out <- rbind(c('', 'Standard', 'Mean', 'SD of', 'Mean 95%',
'95% CI'), c('Bias', 'error of bias', 'square error', 'estimate',
'CI length', 'coverage'), out)
out <- cbind(c('', '', methodnames), out)
rownames(out) <- NULL
print(xtable(out), floating = FALSE, include.rownames = FALSE,
include.colnames = FALSE, hline.after = c(0, 2, nrow(out)))
}
###################################################
### code chunk number 9: simstudy_survival.Rnw:468-471
###################################################
# Chunk 9
showTable('x1')
###################################################
### code chunk number 10: simstudy_survival.Rnw:480-483
###################################################
# Chunk 10
showTable('x2')
###################################################
### code chunk number 11: simstudy_survival.Rnw:493-496
###################################################
# Chunk 11
showTable('x3')
###################################################
### code chunk number 12: simstudy_survival.Rnw:506-530
###################################################
# Chunk 12
numtrees <- c(5, 10, 20, 100)
bias <- sapply(numtrees, function(x){
getParams('x3', paste('rf', x, sep=''))['bias']
})
se_bias <- sapply(numtrees, function(x){
getParams('x3', paste('rf', x, sep=''))['se_bias']
})
lower_bias <- bias - 1.96*se_bias
upper_bias <- bias + 1.96*se_bias
# Blank plot
plot(-100, 0, type = 'p', pch = 15, cex = 1.3, ylab = 'Bias',
xlab = 'Number of trees', xlim = c(0,100),
ylim = c(min(lower_bias), max(upper_bias)))
# Zero bias line
lines(c(0,100), c(0,0), lty = 2, col = 'gray')
# Confidence interval lines
for (i in 1:5){lines(rep(numtrees[i], 2),
c(lower_bias[i], upper_bias[i]))}
# Points
points(numtrees, bias, pch = 15, cex = 1.3)
title('Bias in estimate of x3 coefficient after\nmultiple imputation using RFcont MICE')
###################################################
### code chunk number 13: simstudy_survival.Rnw:535-690
###################################################
# Chunk 13
# Comparing confidence interval coverage and bias between:
# RF MICE 100 trees
# RF MICE 10 trees
# Parametric MICE
# Names of the variables in the comparison
variables <- c('x1', 'x2', 'x3')
pstar <- function(x){
if (!is.na(x)){
if (x < 0.001){
'***'
} else if (x < 0.01){
'**'
} else if (x < 0.05){
'*'
} else {
''
}
} else {
''
}
}
compareBias <- function(method1, method2){
# Generates a table comparing bias
# Comparison statistic is the difference in absolute bias
# (negative means first method is better)
compareBiasVar <- function(varname){
# All coefficients should be kLogHR
bias1 <- sapply(results, function(x){
x[[method1]][varname, 'est']
}) - kLogHR
bias2 <- sapply(results, function(x){
x[[method2]][varname, 'est']
}) - kLogHR
if (sign(mean(bias1)) == -1){
bias1 <- -bias1
}
if (sign(mean(bias2)) == -1){
bias2 <- -bias2
}
paste(formatC(mean(bias1) - mean(bias2), format = 'fg', digits = 3),
pstar(t.test(bias1 - bias2)$p.value))
}
sapply(variables, compareBiasVar)
}
compareVariance <- function(method1, method2){
# Generates a table comparing precision between two methods
# Comparison statistic is ratio of variance
# (smaller means first method is better)
compareVarianceVar <- function(varname){
e1 <- sapply(results, function(x){
x[[method1]][varname, 'est']
})
e2 <- sapply(results, function(x){
x[[method2]][varname, 'est']
})
paste(formatC(var(e1) / var(e2), format = 'fg', digits = 3),
pstar(var.test(e1, e2)$p.value))
}
sapply(variables, compareVarianceVar)
}
compareCIlength <- function(method1, method2){
# Generates a table comparing coverage percentage between two methods
# Comparison statistic is the ratio of confidence interval lengths
# (less than 1 = first better)
compareCIlengthVar <- function(varname){
# Paired t test for bias (difference in estimate)
len1 <- sapply(results, function(x){
x[[method1]][varname, 'hi95'] -
x[[method1]][varname, 'lo95']
})
len2 <- sapply(results, function(x){
x[[method2]][varname, 'hi95'] -
x[[method2]][varname, 'lo95']
})
paste(formatC(mean(len1) / mean(len2),
format = 'fg', digits = 4),
pstar(t.test(len1 - len2)$p.value))
}
sapply(variables, compareCIlengthVar)
}
compareCoverage <- function(method1, method2){
# Generates a table comparing coverage percentage between two methods
# Comparison statistic is the difference in coverage
# (positive = first better)
compareCoverageVar <- function(varname){
# Paired t test for bias (difference in estimate)
cov1 <- sapply(results, function(x){
x[[method1]][varname, 'cover']
})
cov2 <- sapply(results, function(x){
x[[method2]][varname, 'cover']
})
paste(formatC(100 * (mean(cov1) - mean(cov2)), format = 'f',
digits = 1),
pstar(binom.test(c(sum(cov1 == TRUE & cov2 == FALSE),
sum(cov1 == FALSE & cov2 == TRUE)))$p.value))
}
sapply(variables, compareCoverageVar)
}
maketable <- function(comparison){
# comparison is a function such as compareCoverage, compareBias
compare <- cbind(comparison('rf10', 'mice'),
comparison('rf100', 'mice'),
comparison('rf10', 'rf100'))
compare <- cbind(rownames(compare), compare)
compare <- rbind(
c('', 'RFcont MICE 10 vs', 'RFcont MICE 100 vs',
'RFcont MICE 10 vs'),
c('Coefficient', 'parametric MICE',
'parametric MICE', 'RFcont MICE 100'),
compare)
rownames(compare) <- NULL
print(xtable(compare), include.rownames = FALSE,
include.colnames = FALSE, floating = FALSE,
hline.after = c(0, 2, nrow(compare)))
cat('\n\\vspace{1em}\n')
compare <- cbind(comparison('rfdoove10', 'rf10'),
comparison('rfdoove10', 'cart'),
comparison('rfdoove10', 'rfdoove100'))
compare <- cbind(rownames(compare), compare)
compare <- rbind(
c('', 'RF Doove MICE 10 vs', 'RF Doove MICE 10 vs',
'RF Doove MICE 10 vs'),
c('Coefficient', 'RFcont MICE 10',
'CART MICE', 'RF Doove MICE 100'),
compare)
rownames(compare) <- NULL
print(xtable(compare), include.rownames = FALSE,
include.colnames = FALSE, floating = FALSE,
hline.after = c(0, 2, nrow(compare)))
}
###################################################
### code chunk number 14: simstudy_survival.Rnw:699-702
###################################################
# Chunk 14
maketable(compareBias)
###################################################
### code chunk number 15: simstudy_survival.Rnw:711-714
###################################################
# Chunk 15
maketable(compareVariance)
###################################################
### code chunk number 16: simstudy_survival.Rnw:724-727
###################################################
# Chunk 16
maketable(compareCIlength)
###################################################
### code chunk number 17: simstudy_survival.Rnw:736-739
###################################################
# Chunk 17
maketable(compareCoverage)
###################################################
### code chunk number 18: simstudy_survival.Rnw:773-784
###################################################
# Chunk 18
showfunction <- function(functionname){
cat(paste(functionname, '<-',
paste(capture.output(print(get(functionname))),
collapse = '\n')))
cat('\n')
invisible(NULL)
}
showfunction('makeSurv')
showfunction('makeMarSurv')
###################################################
### code chunk number 19: simstudy_survival.Rnw:789-803
###################################################
# Chunk 19
showfunction('coxfull')
showfunction('coximpute')
showfunction('domissf')
showfunction('mice.impute.cart')
showfunction('mice.impute.rfdoove10')
showfunction('mice.impute.rfdoove100')
showfunction('mice.impute.rfcont5')
showfunction('mice.impute.rfcont10')
showfunction('mice.impute.rfcont20')
showfunction('mice.impute.rfcont100')
showfunction('domice')
showfunction('doanalysis')
###################################################
### code chunk number 20: simstudy_survival.Rnw:808-815
###################################################
# Chunk 20
showfunction('pstar')
showfunction('compareBias')
showfunction('compareVariance')
showfunction('compareCIlength')
showfunction('compareCoverage')
###################################################
### code chunk number 21: simstudy_survival.Rnw:820-825
###################################################
# Chunk 21
showfunction('getParams')
showfunction('showTable')
showfunction('maketable')
| /inst/doc/simstudy_survival.R | no_license | cran/CALIBERrfimpute | R | false | false | 19,598 | r | ### R code from vignette source 'simstudy_survival.Rnw'
### Encoding: UTF-8
###################################################
### code chunk number 1: simstudy_survival.Rnw:13-43
###################################################
# Chunk 1
library(CALIBERrfimpute)
library(missForest)
library(survival)
library(xtable)
library(rpart)
library(mice)
library(ranger)
kPmiss <- 0.2 # probability of missingness
kLogHR <- 0.5 # true log hazard ratio
# To analyse samples of more than 200 patients, (recommend about 2000,
# but this will slow down the program), set NPATS before running
# this vignette.
if (!exists('NPATS')){
kSampleSize <- 200 # number of patients in simulated datasets
} else {
kSampleSize <- NPATS
}
# e.g.
# NPATS <- 2000
# To analyse more than 3 samples, set N to a number greater than 3
# e.g.
# N <- 1000
# To use more than 4 imputations, set NIMPS to a number greater than 4
# e.g.
# NIMPS <- 10
###################################################
### code chunk number 2: simstudy_survival.Rnw:86-280
###################################################
# Chunk 2
#### DATA GENERATING FUNCTIONS ####
makeSurv <- function(n = 2000, loghr = kLogHR){
# Creates a survival cohort of n patients. Assumes that censoring is
# independent of all other variables
# x1 and x2 are random normal variables
data <- data.frame(x1 = rnorm(n), x2 = rnorm(n))
# Create the x3 variable
data$x3 <- 0.5 * (data$x1 + data$x2 - data$x1 * data$x2) + rnorm(n)
# Underlying log hazard ratio for all variables is the same
data$y <- with(data, loghr * (x1 + x2 + x3))
data$survtime <- rexp(n, exp(data$y))
# Censoring - assume uniform distribution of observation times
# up to a maximum
obstime <- runif(nrow(data), min = 0,
max = quantile(data$survtime, 0.5))
data$event <- as.integer(data$survtime <= obstime)
# Generate integer survival times
data$time <- ceiling(100 * pmin(data$survtime, obstime))
# Observed marginal cumulative hazard for imputation models
data$cumhaz <- nelsonaalen(data, time, event)
# True log hazard and survival time are not seen in the data
# so remove them
data$y <- NULL
data$survtime <- NULL
return(data)
}
makeMarSurv <- function(data, pmissing = kPmiss){
# Introduces missing data dependent on event indicator
# and cumulative hazard and x1 and x2
logistic <- function(x){
exp(x) / (1 + exp(x))
}
predictions <- function(lp, n){
# uses the vector of linear predictions (lp) from a logistic model
# and the expected number of positive responses (n) to generate
# a set of predictions by modifying the baseline
trialn <- function(lptrial){
sum(logistic(lptrial))
}
stepsize <- 32
lptrial <- lp
# To avoid errors due to missing linear predictors (ideally
# there should not be any missing), replace with the mean
if (any(is.na(lptrial))){
lp[is.na(lptrial)] <- mean(lptrial, na.rm = TRUE)
}
while(abs(trialn(lptrial) - n) > 1){
if (trialn(lptrial) > n){
# trialn bigger than required
lptrial <- lptrial - stepsize
} else {
lptrial <- lptrial + stepsize
}
stepsize <- stepsize / 2
}
# Generate predictions from binomial distribution
as.logical(rbinom(logical(length(lp)), 1, logistic(lptrial)))
}
data$x3[predictions(0.1 * data$x1 + 0.1 * data$x2 +
0.1 * data$cumhaz + 0.1 * data$event, nrow(data) * pmissing)] <- NA
return(data)
}
#### IMPUTATION FUNCTIONS FROM DOOVE AND VAN BUUREN ####
mice.impute.rfdoove10 <- function(y, ry, x, ...){
mice::mice.impute.rf(y = y, ry = ry, x = x, ntrees = 10)
}
mice.impute.rfdoove100 <- function(y, ry, x, ...){
mice::mice.impute.rf(y = y, ry = ry, x = x, ntrees = 100)
}
#### OUR MICE RANDOM FOREST FUNCTIONS ####
mice.impute.rfcont5 <- function(y, ry, x, ...){
CALIBERrfimpute::mice.impute.rfcont(
y = y, ry = ry, x = x, ntree_cont = 5)
}
mice.impute.rfcont10 <- function(y, ry, x, ...){
CALIBERrfimpute::mice.impute.rfcont(
y = y, ry = ry, x = x, ntree_cont = 10)
}
mice.impute.rfcont20 <- function(y, ry, x, ...){
CALIBERrfimpute::mice.impute.rfcont(
y = y, ry = ry, x = x, ntree_cont = 20)
}
mice.impute.rfcont50 <- function(y, ry, x, ...){
CALIBERrfimpute::mice.impute.rfcont(
y = y, ry = ry, x = x, ntree_cont = 50)
}
mice.impute.rfcont100 <- function(y, ry, x, ...){
CALIBERrfimpute::mice.impute.rfcont(
y = y, ry = ry, x = x, ntree_cont = 100)
}
#### FUNCTIONS TO DO THE ANALYSIS ####
coxfull <- function(data){
# Full data analysis
coefs <- as.data.frame(summary(coxph(myformula, data = data))$coef)
# return a data.frame of coefficients (est), upper and lower 95% limits
out <- data.frame(est = coefs[, 'coef'],
lo95 = coefs[, 'coef'] + qnorm(0.025) * coefs[, 'se(coef)'],
hi95 = coefs[, 'coef'] + qnorm(0.975) * coefs[, 'se(coef)'],
row.names = row.names(coefs))
out$cover <- kLogHR >= out$lo95 & kLogHR <= out$hi95
out
}
coximpute <- function(imputed_datasets){
# Analyses a list of imputed datasets
docoxmodel <- function(data){
coxph(myformula, data = data)
}
mirafits <- as.mira(lapply(imputed_datasets, docoxmodel))
coefs <- as.data.frame(summary(pool(mirafits)))
if ('term' %in% colnames(coefs)){
row.names(coefs) <- as.character(coefs$term)
}
if (!('lo 95' %in% colnames(coefs))){
# newer version of mice
# use normal approximation for now, as assume large sample
# and large degrees of freedom for t distribution
out <- data.frame(est = coefs$estimate,
lo95 = coefs$estimate + qnorm(0.025) * coefs$std.error,
hi95 = coefs$estimate + qnorm(0.975) * coefs$std.error,
row.names = row.names(coefs))
} else if ('lo 95' %in% colnames(coefs)){
# older version of mice
out <- data.frame(est = coefs$est,
lo95 = coefs[, 'lo 95'], hi95 = coefs[, 'hi 95'],
row.names = row.names(coefs))
} else {
stop('Unable to handle format of summary.mipo object')
}
# Whether this confidence interval contains the true hazard ratio
out$cover <- kLogHR >= out$lo95 & kLogHR <= out$hi95
out
}
domissf <- function(missdata, reps = NIMPS){
# Imputation by missForest
out <- list()
for (i in 1:reps){
invisible(capture.output(
out[[i]] <- missForest(missdata)$ximp))
}
out
}
domice <- function(missdata, functions, reps = NIMPS){
mids <- mice(missdata, defaultMethod = functions,
m = reps, visitSequence = 'monotone',
printFlag = FALSE, maxit = 10)
lapply(1:reps, function(x) complete(mids, x))
}
doanalysis <- function(x){
# Creates a dataset, analyses it using different methods, and outputs
# the result as a matrix of coefficients / SE and coverage
data <- makeSurv(kSampleSize)
missdata <- makeMarSurv(data)
out <- list()
out$full <- coxfull(data)
out$missf <- coximpute(domissf(missdata))
out$rf5 <- coximpute(domice(missdata, 'rfcont5'))
out$rf10 <- coximpute(domice(missdata, 'rfcont10'))
out$rf20 <- coximpute(domice(missdata, 'rfcont20'))
out$rf100 <- coximpute(domice(missdata, 'rfcont100'))
out$rfdoove10 <- coximpute(domice(missdata, 'rfdoove10'))
out$rfdoove100 <- coximpute(domice(missdata, 'rfdoove100'))
out$cart <- coximpute(domice(missdata, 'cart'))
out$mice <- coximpute(domice(missdata, 'norm'))
out
}
###################################################
### code chunk number 3: simstudy_survival.Rnw:284-290
###################################################
# Chunk 3
mydata <- makeSurv(200)
plot(mydata[, c('x1', 'x2', 'x3')],
main = "Associations between predictor variables in a sample dataset")
mydata <- makeSurv(20000)
###################################################
### code chunk number 4: simstudy_survival.Rnw:295-298
###################################################
# Chunk 4
summary(lm(x3 ~ x1*x2, data = mydata))
###################################################
### code chunk number 5: simstudy_survival.Rnw:301-314
###################################################
# Chunk 5
mydata <- makeSurv(2000)
mydata2 <- makeMarSurv(mydata)
# Plot non-missing data
plot(mydata$x1[!is.na(mydata2$x3)], mydata$x3[!is.na(mydata2$x3)],
pch = 19, xlab = 'x1', ylab = 'x3')
# Plot missing data
points(mydata$x1[is.na(mydata2$x3)], mydata$x3[is.na(mydata2$x3)],
col = 'red', pch = 19)
legend('bottomright', legend = c('x3 observed', 'x3 missing'),
col = c('black', 'red'), pch = 19)
title('Association of predictor variables x1 and x3')
###################################################
### code chunk number 6: simstudy_survival.Rnw:319-345
###################################################
# Chunk 6
# Cox proportional hazards analysis
myformula <- as.formula(Surv(time, event) ~ x1 + x2 + x3)
# Analysis with 10,000 simulated patients (or more
# if the variable REFERENCE_SAMPLESIZE exists)
if (!exists('REFERENCE_SAMPLESIZE')){
REFERENCE_SAMPLESIZE <- 10000
}
# Use parallel processing, if available, to create
# datasets more quickly.
if ('parallel' %in% loadedNamespaces() &&
!is.null(getOption('mc.cores')) &&
.Platform$OS.type == 'unix'){
REFERENCE_SAMPLESIZE <- REFERENCE_SAMPLESIZE %/%
getOption('mc.cores')
simdata <- parallel::mclapply(1:getOption('mc.cores'),
function(x) makeSurv(REFERENCE_SAMPLESIZE))
simdata <- do.call('rbind', simdata)
} else {
simdata <- makeSurv(REFERENCE_SAMPLESIZE)
}
summary(coxph(myformula, data = simdata))
###################################################
### code chunk number 7: simstudy_survival.Rnw:367-387
###################################################
# Chunk 7
# Setting analysis parameters: To analyse more than 3 samples,
# set N to the desired number before running this program
if (!exists('N')){
N <- 3
}
# Number of imputations (set to at least 10 when
# running an actual simulation)
if (!exists('NIMPS')){
NIMPS <- 4
}
# Use parallel processing if the 'parallel' package is loaded
if ('parallel' %in% loadedNamespaces() &&
.Platform$OS.type == 'unix'){
cat('Using parallel processing\n')
results <- parallel::mclapply(1:N, doanalysis)
} else {
results <- lapply(1:N, doanalysis)
}
###################################################
### code chunk number 8: simstudy_survival.Rnw:416-455
###################################################
# Chunk 8
getParams <- function(coef, method){
estimates <- sapply(results, function(x){
x[[method]][coef, 'est']
})
bias <- mean(estimates) - kLogHR
se_bias <- sd(estimates) / sqrt(length(estimates))
mse <- mean((estimates - kLogHR) ^ 2)
ci_len <- mean(sapply(results, function(x){
x[[method]][coef, 'hi95'] - x[[method]][coef, 'lo95']
}))
ci_cov <- mean(sapply(results, function(x){
x[[method]][coef, 'cover']
}))
out <- c(bias, se_bias, mse, sd(estimates), ci_len, ci_cov)
names(out) <- c('bias', 'se_bias', 'mse', 'sd', 'ci_len', 'ci_cov')
out
}
showTable <- function(coef){
methods <- c('full', 'missf', 'cart', 'rfdoove10',
'rfdoove100', 'rf5', 'rf10', 'rf20', 'rf100', 'mice')
methodnames <- c('Full data', 'missForest', 'CART MICE',
'RF Doove MICE 10', 'RF Doove MICE 100',
paste('RFcont MICE', c(5, 10, 20, 100)),
'Parametric MICE')
out <- t(sapply(methods, function(x){
getParams(coef, x)
}))
out <- formatC(out, digits = 3, format = 'fg')
out <- rbind(c('', 'Standard', 'Mean', 'SD of', 'Mean 95%',
'95% CI'), c('Bias', 'error of bias', 'square error', 'estimate',
'CI length', 'coverage'), out)
out <- cbind(c('', '', methodnames), out)
rownames(out) <- NULL
print(xtable(out), floating = FALSE, include.rownames = FALSE,
include.colnames = FALSE, hline.after = c(0, 2, nrow(out)))
}
###################################################
### code chunk number 9: simstudy_survival.Rnw:468-471
###################################################
# Chunk 9
showTable('x1')
###################################################
### code chunk number 10: simstudy_survival.Rnw:480-483
###################################################
# Chunk 10
showTable('x2')
###################################################
### code chunk number 11: simstudy_survival.Rnw:493-496
###################################################
# Chunk 11
showTable('x3')
###################################################
### code chunk number 12: simstudy_survival.Rnw:506-530
###################################################
# Chunk 12
numtrees <- c(5, 10, 20, 100)
bias <- sapply(numtrees, function(x){
getParams('x3', paste('rf', x, sep=''))['bias']
})
se_bias <- sapply(numtrees, function(x){
getParams('x3', paste('rf', x, sep=''))['se_bias']
})
lower_bias <- bias - 1.96*se_bias
upper_bias <- bias + 1.96*se_bias
# Blank plot
plot(-100, 0, type = 'p', pch = 15, cex = 1.3, ylab = 'Bias',
xlab = 'Number of trees', xlim = c(0,100),
ylim = c(min(lower_bias), max(upper_bias)))
# Zero bias line
lines(c(0,100), c(0,0), lty = 2, col = 'gray')
# Confidence interval lines
for (i in 1:5){lines(rep(numtrees[i], 2),
c(lower_bias[i], upper_bias[i]))}
# Points
points(numtrees, bias, pch = 15, cex = 1.3)
title('Bias in estimate of x3 coefficient after\nmultiple imputation using RFcont MICE')
###################################################
### code chunk number 13: simstudy_survival.Rnw:535-690
###################################################
# Chunk 13
# Comparing confidence interval coverage and bias between:
# RF MICE 100 trees
# RF MICE 10 trees
# Parametric MICE
# Names of the variables in the comparison
variables <- c('x1', 'x2', 'x3')
pstar <- function(x){
if (!is.na(x)){
if (x < 0.001){
'***'
} else if (x < 0.01){
'**'
} else if (x < 0.05){
'*'
} else {
''
}
} else {
''
}
}
compareBias <- function(method1, method2){
# Generates a table comparing bias
# Comparison statistic is the difference in absolute bias
# (negative means first method is better)
compareBiasVar <- function(varname){
# All coefficients should be kLogHR
bias1 <- sapply(results, function(x){
x[[method1]][varname, 'est']
}) - kLogHR
bias2 <- sapply(results, function(x){
x[[method2]][varname, 'est']
}) - kLogHR
if (sign(mean(bias1)) == -1){
bias1 <- -bias1
}
if (sign(mean(bias2)) == -1){
bias2 <- -bias2
}
paste(formatC(mean(bias1) - mean(bias2), format = 'fg', digits = 3),
pstar(t.test(bias1 - bias2)$p.value))
}
sapply(variables, compareBiasVar)
}
compareVariance <- function(method1, method2){
# Generates a table comparing precision between two methods
# Comparison statistic is ratio of variance
# (smaller means first method is better)
compareVarianceVar <- function(varname){
e1 <- sapply(results, function(x){
x[[method1]][varname, 'est']
})
e2 <- sapply(results, function(x){
x[[method2]][varname, 'est']
})
paste(formatC(var(e1) / var(e2), format = 'fg', digits = 3),
pstar(var.test(e1, e2)$p.value))
}
sapply(variables, compareVarianceVar)
}
compareCIlength <- function(method1, method2){
# Generates a table comparing coverage percentage between two methods
# Comparison statistic is the ratio of confidence interval lengths
# (less than 1 = first better)
compareCIlengthVar <- function(varname){
# Paired t test for bias (difference in estimate)
len1 <- sapply(results, function(x){
x[[method1]][varname, 'hi95'] -
x[[method1]][varname, 'lo95']
})
len2 <- sapply(results, function(x){
x[[method2]][varname, 'hi95'] -
x[[method2]][varname, 'lo95']
})
paste(formatC(mean(len1) / mean(len2),
format = 'fg', digits = 4),
pstar(t.test(len1 - len2)$p.value))
}
sapply(variables, compareCIlengthVar)
}
compareCoverage <- function(method1, method2){
# Generates a table comparing coverage percentage between two methods
# Comparison statistic is the difference in coverage
# (positive = first better)
compareCoverageVar <- function(varname){
# Paired t test for bias (difference in estimate)
cov1 <- sapply(results, function(x){
x[[method1]][varname, 'cover']
})
cov2 <- sapply(results, function(x){
x[[method2]][varname, 'cover']
})
paste(formatC(100 * (mean(cov1) - mean(cov2)), format = 'f',
digits = 1),
pstar(binom.test(c(sum(cov1 == TRUE & cov2 == FALSE),
sum(cov1 == FALSE & cov2 == TRUE)))$p.value))
}
sapply(variables, compareCoverageVar)
}
maketable <- function(comparison){
# comparison is a function such as compareCoverage, compareBias
compare <- cbind(comparison('rf10', 'mice'),
comparison('rf100', 'mice'),
comparison('rf10', 'rf100'))
compare <- cbind(rownames(compare), compare)
compare <- rbind(
c('', 'RFcont MICE 10 vs', 'RFcont MICE 100 vs',
'RFcont MICE 10 vs'),
c('Coefficient', 'parametric MICE',
'parametric MICE', 'RFcont MICE 100'),
compare)
rownames(compare) <- NULL
print(xtable(compare), include.rownames = FALSE,
include.colnames = FALSE, floating = FALSE,
hline.after = c(0, 2, nrow(compare)))
cat('\n\\vspace{1em}\n')
compare <- cbind(comparison('rfdoove10', 'rf10'),
comparison('rfdoove10', 'cart'),
comparison('rfdoove10', 'rfdoove100'))
compare <- cbind(rownames(compare), compare)
compare <- rbind(
c('', 'RF Doove MICE 10 vs', 'RF Doove MICE 10 vs',
'RF Doove MICE 10 vs'),
c('Coefficient', 'RFcont MICE 10',
'CART MICE', 'RF Doove MICE 100'),
compare)
rownames(compare) <- NULL
print(xtable(compare), include.rownames = FALSE,
include.colnames = FALSE, floating = FALSE,
hline.after = c(0, 2, nrow(compare)))
}
###################################################
### code chunk number 14: simstudy_survival.Rnw:699-702
###################################################
# Chunk 14
maketable(compareBias)
###################################################
### code chunk number 15: simstudy_survival.Rnw:711-714
###################################################
# Chunk 15
maketable(compareVariance)
###################################################
### code chunk number 16: simstudy_survival.Rnw:724-727
###################################################
# Chunk 16
maketable(compareCIlength)
###################################################
### code chunk number 17: simstudy_survival.Rnw:736-739
###################################################
# Chunk 17
maketable(compareCoverage)
###################################################
### code chunk number 18: simstudy_survival.Rnw:773-784
###################################################
# Chunk 18
showfunction <- function(functionname){
cat(paste(functionname, '<-',
paste(capture.output(print(get(functionname))),
collapse = '\n')))
cat('\n')
invisible(NULL)
}
showfunction('makeSurv')
showfunction('makeMarSurv')
###################################################
### code chunk number 19: simstudy_survival.Rnw:789-803
###################################################
# Chunk 19
showfunction('coxfull')
showfunction('coximpute')
showfunction('domissf')
showfunction('mice.impute.cart')
showfunction('mice.impute.rfdoove10')
showfunction('mice.impute.rfdoove100')
showfunction('mice.impute.rfcont5')
showfunction('mice.impute.rfcont10')
showfunction('mice.impute.rfcont20')
showfunction('mice.impute.rfcont100')
showfunction('domice')
showfunction('doanalysis')
###################################################
### code chunk number 20: simstudy_survival.Rnw:808-815
###################################################
# Chunk 20
showfunction('pstar')
showfunction('compareBias')
showfunction('compareVariance')
showfunction('compareCIlength')
showfunction('compareCoverage')
###################################################
### code chunk number 21: simstudy_survival.Rnw:820-825
###################################################
# Chunk 21
showfunction('getParams')
showfunction('showTable')
showfunction('maketable')
|
# Loading required packages
library(shiny)
library(XML)
# Reading and cleaning country data with average male and female BMI from Wiki
url<-"http://en.wikipedia.org/wiki/Body_mass_index"
table<-readHTMLTable(url)[[7]]
table<-table[,!grepl("Relative|Ratio|Average",names(table))]
table<-table[1:177,]
names(table)<-c("country","male","female")
table$country<-as.character(table$country)
table$male<-as.numeric(as.character(table$male))
table$female<-as.numeric(as.character(table$female))
# Defining shiny UI
shinyUI(pageWithSidebar(
headerPanel("Body Mass Index comparison"),
sidebarPanel(
selectInput("country", "Choose your country:",
as.list(table$country)),
selectInput("sex", "Choose your sex:",
list("Male","Female")),
numericInput("weight", "Type your weigth in kilograms",
80,min=1,step=1),
numericInput("height", "Type your heigth in meters",
1.80,min=0.01,step=0.01),
actionButton("go", "Calculate")
),
mainPanel(textOutput("text1"),
textOutput("text2"),
plotOutput("plot1",width="100%",height="100%")
)
)) | /ui.R | no_license | ivasche/dataproducts-project | R | false | false | 1,238 | r | # Loading required packages
library(shiny)
library(XML)
# Reading and cleaning country data with average male and female BMI from Wiki
url<-"http://en.wikipedia.org/wiki/Body_mass_index"
table<-readHTMLTable(url)[[7]]
table<-table[,!grepl("Relative|Ratio|Average",names(table))]
table<-table[1:177,]
names(table)<-c("country","male","female")
table$country<-as.character(table$country)
table$male<-as.numeric(as.character(table$male))
table$female<-as.numeric(as.character(table$female))
# Defining shiny UI
shinyUI(pageWithSidebar(
headerPanel("Body Mass Index comparison"),
sidebarPanel(
selectInput("country", "Choose your country:",
as.list(table$country)),
selectInput("sex", "Choose your sex:",
list("Male","Female")),
numericInput("weight", "Type your weigth in kilograms",
80,min=1,step=1),
numericInput("height", "Type your heigth in meters",
1.80,min=0.01,step=0.01),
actionButton("go", "Calculate")
),
mainPanel(textOutput("text1"),
textOutput("text2"),
plotOutput("plot1",width="100%",height="100%")
)
)) |
testlist <- list(A = structure(c(2.08997924505417e-236, 4.1223404161408e-294, 1.41355599731096e-303, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(2L, 10L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) | /multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613108460-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 278 | r | testlist <- list(A = structure(c(2.08997924505417e-236, 4.1223404161408e-294, 1.41355599731096e-303, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(2L, 10L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) |
################################################################################
# Aim: Download full text pdfs, given PMID and url
#
# Contact: Herm Lamberink, h.j.lamberink@umcutrecht.nl
# Date: 2018-03-19
#############################
.libPaths( c(.libPaths(), "/mnt/data/live02/stress/hlamberink/RLibrary" ) )
library( 'xml2' ) # used by rvest package
library( 'rvest' ) # web scraping package
library( "curl" )
library( "XML" )
library( "pbapply" ) # power bar during sapply
library( 'plyr' ); library( 'dplyr' )
library( 'tidyr' )
###################################
# FUNCTIONS
###################################
###
# Get pdf from given pmid
##
get.pdf <- function( pmid, url, outdr = outdir )
{
# prevent the function from shutting down due to an error
v <- tryCatch(
{
# output pdf
outpdf <- paste0( outdr, '/', pmid, '.pdf' )
if( ! file.exists( outpdf ) )
{
# set empty pdflink
pdflink <- NA
#######################
# pdflink per publisher
#######################
# url is from arvojournals
if( grepl( "arvojournals", url ) )
{
# url to pdf
pdflink <- get.pdflink.arvojournals( url )
}
# url is from JAMA
if( grepl( "jamanetwork.com", url ) )
{
# url to pdf
pdflink <- get.pdflink.jama( url )
}
# url is from PLOS
if( grepl( "dx.plos", url ) )
{
# url to pdf
pdflink <- get.pdflink.plos( url )
}
# url is from EHP
if( grepl( "/EHP", url ) )
{
pdflink <- get.pdflink.ehp( url )
}
# url is from doi/bjs
if( grepl( "/bjs", url ) )
{
pdflink <- get.pdflink.doibjs( url )
}
# url is from Wiley, via doi.org
#if( grepl( "dx.doi.org", url ) )
#{
# pdflink <- get.pdflink.doiwiley( url )
#}
# url is from wiley
if( grepl( "wiley.com", url ) )
{
pdflink <- get.pdflink.wileyreal( url )
}
# url is from bmj
if( grepl( "bmj.com", url ) )
{
pdflink <- get.pdflink.bmj( url )
}
# url is from cmaj
if( grepl( "cmaj.ca", url ) )
{
pdflink <- get.pdflink.cmaj( url )
}
# url is from nejm
if( grepl( "nejm.org", url ) )
{
pdflink <- get.pdflink.nejm( url )
}
# url is from scielo
if( grepl( "scielo.br", url ) )
{
pdflink <- get.pdflink.scielo( url )
}
# url is from academic.oup
if( grepl( "academic.oup", url ) )
{
pdflink <- get.pdflink.acoup( url )
}
# url is from annals
if( grepl( "annals", url ) )
{
pdflink <- get.pdflink.annals( url )
}
# url is from cambridge
if( grepl( "cambridge.org", url ) )
{
pdflink <- get.pdflink.cambridge( url )
}
# url is from OVID
if( grepl( "Insights.ovid", url ) )
{
# url to pdf
pdflink <- get.pdflink.ovid1( url )
if( length(pdflink) == 0 ) pdflink <- get.pdflink.ovid2( url )
}
# url is from iiar
if( grepl( "iiar", url ) )
{
pdflink <- get.pdflink.iiar( url )
}
# url is from ahajournals
if( grepl( "ahajournals", url ) )
{
pdflink <- get.pdflink.ahaj( url )
}
# url is from sciencedirect
if( grepl( "sciencedirect.com", url ) )
{
pdflink <- get.pdflink.sciencedirect( url )
}
# url is from asm
if( grepl( "asm", url ) )
{
pdflink <- get.pdflink.asm( url )
}
# url is from ajp
if( grepl( "ajp", url ) )
{
pdflink <- get.pdflink.ajp
}
# url is from apsjournals
if( grepl( "apsjournals", url ) )
{
pdflink <- get.pdflink.apsjournals( url )
}
# url is from arjournals
if( grepl( "arjournals", url ) )
{
pdflink <- get.pdflink.arjournals( url )
}
# url is from ascopubs
if( grepl( "ascopubs", url ) )
{
pdflink <- get.pdflink.ascopubs( url )
}
# url is from avmajournals
if( grepl( "avmajournals", url ) )
{
pdflink <- get.pdflink.avma( url )
}
# url is from bjgp
if( grepl( "bjgp", url ) )
{
pdflink <- get.pdflink.bjgp( url )
}
# url is from boneandjoint
if( grepl( "boneandjoint", url ) )
{
pdflink <- get.pdflink.boneandjoint( url )
}
# url is from aacrjournals
if( grepl( "aacrjournals", url ) )
{
pdflink <- get.pdflink.aacrjournals( url )
}
# url is from diabetesjournals
if( grepl( "diabetesjournals", url ) )
{
pdflink <- get.pdflink.diabetesjournals( url )
}
# url is from asnjournals
if( grepl( "asnjournals", url ) )
{
pdflink <- get.pdflink.asnjournals( url )
}
# url is from ersjournals
if( grepl( "ersjournals", url ) )
{
pdflink <- get.pdflink.ersjournals( url )
}
# url is from gacetamedicade
if( grepl( "gacetamedicade", url ) )
{
pdflink <- get.pdflink.gacetamedicade( url )
}
# url is from tums.ac.ir
if( grepl( "tums.ac.ir", url ) )
{
pdflink <- get.pdflink.tums( url )
}
# url is from nutrition.org
if( grepl( "nutrition.org", url ) )
{
pdflink <- get.pdflink.nutrition( url )
}
# url is from aota.org
if( grepl( "aota.org", url ) )
{
pdflink <- get.pdflink.aota( url )
}
# url is from physiology.org
if( grepl( "physiology.org", url ) )
{
pdflink <- get.pdflink.physiology( url )
}
# url is from asahq.org
if( grepl( "asahq.org", url ) )
{
pdflink <- get.pdflink.asahq( url )
}
# url is from upol.cz
if( grepl( "upol.cz", url ) )
{
pdflink <- get.pdflink.upol.cz( url )
}
# url is from rcpsych
if( grepl( "rcpsych.org", url ) )
{
pdflink <- get.pdflink.rcpsych( url )
}
# url is from sabinet.co.za
if( grepl( "sabinet.co.za", url ) )
{
pdflink <- get.pdflink.sabinet( url )
}
# url is from quintessenz
if( grepl( "quintessenz", url ) )
{
pdflink <- get.pdflink.quintessenz( url )
}
# url is from clinicalandtranslationalinvestigation
if( grepl( "clinicalandtranslationalinvestigation", url ) )
{
pdflink <- get.pdflink.clinicalandtranslationalinvestigation( url )
}
# url is from jaoa.org
if( grepl( "jaoa.org", url ) )
{
pdflink <- get.pdflink.jaoa( url )
}
# url is from snmjournals
if( grepl( "snmjournals", url ) )
{
pdflink <- get.pdflink.snmjournals( url )
}
# url is from umsha.ac.ir
if( grepl( "umsha" , url ) )
{
pdflink <- get.pdflink.umsha( url )
}
# url is from tokai
if( grepl( "tokai" , url ) )
{
pdflink <- get.pdflink.tokai( url )
}
# url is from pamw.pl
if( grepl( "pamw.pl", url ) )
{
pdflink <- get.pdflink.pamw( url )
}
# url is from aappublications
if( grepl( "aappublications", url ) )
{
pdflink <- get.pdflink.aappublications( url )
}
# url is from publisherspanel
if( grepl( "publisherspanel", url ) )
{
pdflink <- get.pdflink.publisherspanel( url )
}
# url is from rcseng
if( grepl( "rcseng", url ) )
{
pdflink <- get.pdflink.rcseng( url )
}
# url is from rsna
if( grepl( "rsna", url ) )
{
pdflink <- get.pdflink.rsna( url )
}
# url is from rcjournal
if( grepl( "rcjournal", url ) )
{
pdflink <- get.pdflink.rcjournal( url )
}
# url is from revistachirurgia
if( grepl( "revistachirurgia", url ) )
{
pdflink <- get.pdflink.revistachirurgia( url )
}
# url is from thejns
if( grepl( "thejns", url ) )
{
pdflink <- get.pdflink.thejns( url )
}
# url is from alphamedpress
if( grepl( "alphamedpress", url ) )
{
pdflink <- get.pdflink.alphamedpress( url )
}
# url is from aepress
if( grepl( "aepress", url ) )
{
pdflink <- get.pdflink.aepress( url )
}
# url is from ajronline
if( grepl( "ajronline", url ) )
{
pdflink <- get.pdflink.ajronline( url )
}
# url is from ajcn
if( grepl( "ajcn", url ) )
{
pdflink <- get.pdflink.ajcn( url )
}
# url is from ams.ac.ir
if( grepl( "ams.ac.ir", url ) )
{
pdflink <- get.pdflink.ams.ac.ir( url )
}
# url is from annfammed
if( grepl( "annfammed", url ) )
{
pdflink <- get.pdflink.annfammed( url )
}
# url is from annsaudimed
if( grepl( "annsaudimed", url ) )
{
pdflink <- get.pdflink.annsaudimed( url )
}
# url is from atsjournals
if( grepl( "atsjournals", url ) )
{
pdflink <- get.pdflink.atsjournals( url )
}
# url is from birpublications
if( grepl( "birpublications", url ) )
{
pdflink <- get.pdflink.birpublications( url )
}
# url is from bloodjournal
if( grepl( "bloodjournal", url ) )
{
pdflink <- get.pdflink.bloodjournal( url )
}
# url is from cfp
if( grepl( "cfp.org", url ) )
{
pdflink <- get.pdflink.cfp( url )
}
# url is from cmj.hr
if( grepl( "cmj.hr", url ) )
{
pdflink <- get.pdflink.cmj.hr( url )
}
# url is from cmj.org
if( grepl( "cmj.org", url ) )
{
pdflink <- get.pdflink.cmj.org( url )
}
# url is from danmedj
if( grepl( "danmedj", url ) )
{
pdflink <- get.pdflink.danmedj( url )
}
# url is from dirjournal
if( grepl( "dirjournal", url ) )
{
pdflink <- get.pdflink.dirjournal( url )
}
# url is from e-cmh
if( grepl( "e-cmh", url ) )
{
pdflink <- get.pdflink.ecmh( url )
}
# url is from ectrx
if( grepl( "ectrx", url ) )
{
pdflink <- get.pdflink.ectrx( url )
}
# url is from educationforhealth
if( grepl( "educationforhealth", url ) )
{
pdflink <- get.pdflink.educationforhealth( url )
}
# url is from eje-online
if( grepl( "eje-online", url ) )
{
pdflink <- get.pdflink.ejeonline( url )
}
# url is from europeanreview
if( grepl( "europeanreview", url ) )
{
pdflink <- get.pdflink.europeanreview( url )
}
# url is from haematologica
if( grepl( "haematologica", url ) )
{
pdflink <- get.pdflink.haematologica( url )
}
# url is from hdbp
if( grepl( "hdbp", url ) )
{
pdflink <- get.pdflink.hdbp( url )
}
# url is from healio
if( grepl( "healio", url ) )
{
pdflink <- get.pdflink.healio( url )
}
# url is from ijkd
if( grepl( "ijkd", url ) )
{
pdflink <- get.pdflink.ijkd( url )
}
# url is from ijo.in
if( grepl( "ijo.in", url ) )
{
pdflink <- get.pdflink.ijo.in( url )
}
# url is from impactjournals
if( grepl( "impactjournals", url ) )
{
pdflink <- get.pdflink.impactjournals( url )
}
# url is from inaactamedica
if( grepl( "inaactamedica", url ) )
{
pdflink <- get.pdflink.inaactamedica( url )
}
# url is from indianjcancer
if( grepl( "indianjcancer", url ) )
{
pdflink <- get.pdflink.indianjcancer( url )
}
# url is from intbrazjurol
if( grepl( "intbrazjurol", url ) )
{
pdflink <- url
}
# url is from jiaci
if( grepl( "jiaci", url ) )
{
pdflink <- get.pdflink.jiaci( url )
}
# url is from jmir
if( grepl( "jmir", url ) )
{
pdflink <- get.pdflink.jmir( url )
}
# url is from jneurosci
if( grepl( "jneurosci", url ) )
{
pdflink <- get.pdflink.jneurosci( url )
}
# url is from jospt
if( grepl( "jospt", url ) )
{
pdflink <- get.pdflink.jospt( url )
}
# url is from mdpi.com
if( grepl( "mdpi.com", url ) )
{
pdflink <- get.pdflink.mdpi.com( url )
}
# url is from painphysicianjournal
if( grepl( "painphysicianjournal", url ) )
{
pdflink <- get.pdflink.painphysicianjournal( url )
}
# url is from sjweh
if( grepl( "sjweh", url ) )
{
pdflink <- get.pdflink.sjweh( url )
}
# url is from tandfonline
if( grepl( "tandfonline", url ) )
{
pdflink <- get.pdflink.tandfonline( url )
}
# url is from thieme-connect
if( grepl( "thieme-connect", url ) )
{
pdflink <- get.pdflink.thieme( url )
}
# url is from wjgnet
if( grepl( "wjgnet", url ) )
{
pdflink <- get.pdflink.wjgnet( url )
}
# url is from degruyter
if( grepl( "degruyter", url ) )
{
pdflink <- get.pdflink.degruyter( url )
}
# url is from biomedcentral
if( grepl( "biomedcentral", url ) )
{
pdflink <- get.pdflink.biomedcentral( url )
}
# url is from karger
if( grepl( "karger", url ) )
{
pdflink <- get.pdflink.karger( url )
}
# url is from jkan.or.kr
if( grepl( "jkan.or.kr", url ) )
{
pdflink <- get.pdflink.jkan.or.kr( url )
}
# url is from medicaljournals.se
if( grepl( "medicaljournals.se", url ) )
{
pdflink <- get.pdflink.medicaljournals.se( url )
}
# url is from anesthesiology
if( grepl( "anesthesiology", url ) )
{
pdflink <- get.pdflink.anesthesiology( url )
}
# url is from linkinghub
if( grepl( "linkinghub", url ) )
{
pdflink <- get.pdflink.linkinghub( url )
}
# url contains 10.1038 (nature publishers)
if( grepl( "doi.org/10.1038", url ) )
{
pdflink <- get.pdflink.nature( url )
}
# url conains 10.1089 (acm journal)
if( grepl( "doi.org/10.1089", url ) )
{
pdflink <- get.pdflink.acm( url )
}
# url conains 10.1111 (acm journal)
if( grepl( "doi.org/10.1111", url ) )
{
pdflink <- get.pdflink.wiley( url )
}
# url conains 10.1002 (acm journal)
if( grepl( "doi.org/10.1002", url ) )
{
pdflink <- get.pdflink.wiley( url )
}
# url contains 10.1038 (springerlink)
if( grepl( "doi.org/10.1007", url ) )
{
pdflink <- get.pdflink.springerlink( url )
}
# psychiatryonline
if( grepl( "psychiatryonline", url ) )
{
pdflink <- get.pdflink.psychiatryonline( url )
}
#######################
# downoad pdf
#######################
# write pdf to output if link is available
if( ! is.na( pdflink ) )
{
# download pdf (only if output is yet downloaded)
download.file( url = pdflink, destfile = outpdf,
mode = "wb", quiet = TRUE )
}
}
return( NA )
},
error=function(err) {
#message(paste("URL does not seem to exist:", url))
#message("Here's the original error message:")
message(paste( pmid, err, "\n" ) )
# Choose a return value in case of error
return( paste( pmid, "URL does not seem to exist" ) )
},
warning=function(war) {
#message(paste("URL caused a warning:", url))
#message("Here's the original warning message: ")
message(paste( pmid, war, "\n" ) )
# Choose a return value in case of warning
return( paste( pmid, "warning, test if downloaded" ) )
}
#finally={
# NOTE:
# Here goes everything that should be executed at the end,
# regardless of success or error.
# If you want more than one expression to be executed, then you
# need to wrap them in curly brackets ({...}); otherwise you could
# just have written 'finally=<expression>'
#message(paste("Processed URL:", url))
#message("Some other message at the end")
#}
)
}
###
# Get full text pdf link from psychiatryonline.org full text website.
##
get.pdflink.psychiatryonline <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- ".show-pdf"
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
return( pdflink )
}
###
# Get full text pdf link from springerlink full text website.
##
get.pdflink.springerlink <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from nature full text website.
##
get.pdflink.nature <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf
css <- 'meta[name="citation_pdf_url"]'
# save pdflink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
if( identical( pdflink, character(0) ) )
{
css <- 'a[class="inline-block block-link pa10 pl0"]'
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
if( !identical( intermed1, character(0)))
{
pdflink <- paste0( "https://www.nature.com", intermed1[1] )
return( pdflink )
}
}
}
###
# Get full text pdf link from acm full text website.
##
get.pdflink.acm <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf
css <- '.pdfprint a'
# save pdflink
intermed <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
if( !identical( intermed, character(0) ) )
{
pdflink <- paste0( "http://online.liebertpub.com", intermed )
return( pdflink )
}
}
###
# Get full text pdf link from wiley full text website.
##
get.pdflink.wiley <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf
css <- 'meta[name="citation_pdf_url"]'
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from wiley full text website.
##
get.pdflink.wileyreal <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf
css <- 'meta[name="citation_pdf_url"]'
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
get.pdflink.sciencedirect <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
css <- 'input[name="redirectURL"]'
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "value" )
intermed2 <- URLdecode(intermed1)
page <- xml2::read_html( curl( intermed2, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# xpath of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css = 'meta[name="citation_pdf_url"]'
# get href to pdfLink
intermed3 <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
pdflink <- paste0( "https://www.sciencedirect.com", intermed3 )
return( pdflink )
}
###
# Get full text pdf link from springerlink full text website.
##
get.pdflink.springerlink <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from medicaljournals.se full text website.
##
get.pdflink.medicaljournals.se <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'li:nth-child(2) .btn-success2'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "https://www.medicaljournals.se", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from jkan.or.kr full text website.
##
get.pdflink.jkan.or.kr <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '#portlet_content_Format li:nth-child(4) a'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "https://www.jkan.or.kr", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from karger full text website.
##
get.pdflink.karger <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '.btn-karger'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "https://www.karger.com", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from degruyter full text website.
##
get.pdflink.degruyter <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '.pdf-link'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "https://www.degruyter.com", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from biomedcentral full text website.
##
get.pdflink.biomedcentral <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from wjgnet full text website.
##
get.pdflink.wjgnet <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '.left-articlenav li:nth-child(3) a'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
return( pdflink )
}
###
# Get full text pdf link from thieme-connect full text website.
##
get.pdflink.thieme <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '#articleTabs :nth-child(2) a'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
intermed2 <- paste0( "http://www.thieme-connect.com", intermed1 )
page2 <- xml2::read_html( curl( intermed2, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
css2 <- '#pdfLink'
intermed3 <- page2 %>% html_nodes( css = css2 ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.thieme-connect.com", intermed3 )
return( pdflink )
}
###
# Get full text pdf link from tandfonline full text website.
##
get.pdflink.tandfonline <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '.show-pdf'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.tandfonline.com", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from sjweh full text website.
##
get.pdflink.sjweh <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '.pdf-download'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.sjweh.fi/", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from painphysicianjournal full text website.
##
get.pdflink.painphysicianjournal <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '.row .float-right'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.painphysicianjournal.com", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from mdpi.com full text website.
##
get.pdflink.mdpi.com <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from jospt full text website.
##
get.pdflink.jospt <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'a[href^="/doi/pdf"]'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.jospt.org", intermed1[1] )
return( pdflink )
}
###
# Get full text pdf link from jneurosci full text website.
##
get.pdflink.jneurosci <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from jmir.org full text website.
##
get.pdflink.jmir.org <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_abstract_pdf_url"]'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
page2 <- xml2::read_html( curl( intermed1, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
css2 <- 'a[href^="http://www.jmir.org/article/download"]'
pdflink <- page2 %>% html_nodes( css = css2 ) %>% html_attr( "href" )
return( pdflink )
}
###
# Get full text pdf link from jiaci full text website.
##
get.pdflink.jiaci <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'li:nth-child(1) a:nth-child(2)'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.jiaci.org", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from indianjcancer full text website.
##
get.pdflink.indianjcancer <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
page2 <- xml2::read_html( curl( intermed1, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
css2 <- 'a[href$=".pdf"]'
intermed2 <- page2 %>% html_nodes( css = css2 ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.indianjcancer.com/", intermed2 )
return( pdflink )
}
###
# Get full text pdf link from inaactamedica full text website.
##
get.pdflink.inaactamedica <- function( url )
{
# get href to pdfLink
pdflink <- url
return( pdflink )
}
###
# Get full text pdf link from impactjournals full text website.
##
get.pdflink.impactjournals <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from ijo.in full text website.
##
get.pdflink.ijo.in <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
page2 <- xml2::read_html( curl( intermed1[1], handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
css2 <- 'a[href$=".pdf"]'
intermed2 <- page2 %>% html_nodes( css = css2 ) %>% html_attr( "href")
pdflink <- paste0( "http://www.ijo.in/", intermed2 )
return( pdflink )
}
###
# Get full text pdf link from ijkd full text website.
##
get.pdflink.ijkd <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'frame'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "src" )
page2 <- xml2::read_html( curl( intermed1[1], handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
css2 <- 'a[href^="http://www.ijkd"]'
pdflink <- page2 %>% html_nodes( css = css2 ) %>% html_attr( "href")
return( pdflink )
}
###
# Get full text pdf link from healio full text website.
##
get.pdflink.healio <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from hdbp full text website.
##
get.pdflink.hdbp <- function( url )
{
# get href to pdfLink
pdflink <- url
return( pdflink )
}
###
# Get full text pdf link from haematologica full text website.
##
get.pdflink.haematologica <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from europeanreview full text website.
##
get.pdflink.europeanreview <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '.right'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- sub( " http", "http", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from eje-online full text website.
##
get.pdflink.ejeonline <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from educationforhealth full text website.
##
get.pdflink.educationforhealth <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
page2 <- xml2::read_html( curl( intermed1, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
css2 <- 'a[href$=".pdf"]'
intermed2 <- page2 %>% html_nodes( css = css2 ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.educationforhealth.net/", intermed2)
return( pdflink )
}
###
# Get full text pdf link from ectrx full text website.
##
get.pdflink.ectrx <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'b a'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.ectrx.org/forms/", intermed1)
return( pdflink )
}
###
# Get full text pdf link from e-cmh full text website.
##
get.pdflink.ecmh <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="fulltext_pdf"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from dirjournal full text website.
##
get.pdflink.dirjournal <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'a[href$=".pdf"]'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.dirjournal.org", intermed1[2] )
return( pdflink )
}
###
# Get full text pdf link from danmedj full text website.
##
get.pdflink.danmedj <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'a[href$=".pdf"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
return( pdflink )
}
###
# Get full text pdf link from cmj.org full text website.
##
get.pdflink.cmj.org <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
page2 <- xml2::read_html( curl( intermed1, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
css2 <- 'p a:nth-child(1)'
intermed2 <- page2 %>% html_nodes( css = css2 ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.cmj.org/", intermed2 )
return( pdflink )
}
###
# Get full text pdf link from cmj.hr full text website.
##
get.pdflink.cmj.hr <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'frame[src^="http"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "src" )
return( pdflink )
}
###
# Get full text pdf link from cfp full text website.
##
get.pdflink.cfp <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from canjsurg full text website.
##
get.pdflink.canjsurg <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'p:nth-child(2) a:nth-child(2)'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
return( pdflink )
}
###
# Get full text pdf link from bloodjournal full text website.
##
get.pdflink.bloodjournal <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from birpublications full text website.
##
get.pdflink.birpublications <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '.show-pdf'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.birpublications.org", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from atsjournals full text website.
##
get.pdflink.atsjournals <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '.show-pdf'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.atsjournals.org", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from annsaudimed full text website.
##
get.pdflink.annsaudimed <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '.desc'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
return( pdflink )
}
###
# Get full text pdf link from annfammed.org full text website.
##
get.pdflink.annfammed <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '.full-text-pdf-view-link a'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
intermed2 <- sub( "\\+html", "", intermed1 )
pdflink <- paste0( "http://www.annfammed.org", intermed2 )
return( pdflink )
}
###
# Get full text pdf link from ams.ac.ir full text website.
##
get.pdflink.ams.ac.ir <- function( url )
{
pdflink <- url
return( pdflink )
}
###
# Get full text pdf link from ajronline full text website.
##
get.pdflink.ajronline <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '#refLinkList+ li .nowrap'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.ajronline.org", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from ajcn full text website.
##
get.pdflink.ajcn <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '.full-text-pdf-view-link a'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
intermed2 <- sub( "\\+html", "", intermed1 )
pdflink <- paste0( "http://www.ajcn.org", intermed2 )
return( pdflink )
}
###
# Get full text pdf link from aepress.sk full text website.
##
get.pdflink.aepress <- function( url )
{
pdflink <- url
return( pdflink )
}
###
# Get full text pdf link from alphamedpress full text website.
##
get.pdflink.alphamedpress <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from thejns full text website.
##
get.pdflink.thejns <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '.article-tools li:nth-child(2)'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://thejns.org", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from revistachirurgia full text website.
##
get.pdflink.revistachirurgia <- function( url )
{
pdflink <- url
return( pdflink )
}
###
# Get full text pdf link from rcjournal full text website.
##
get.pdflink.rcjournal <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from rsna full text website.
##
get.pdflink.rsna <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '.tab-nav li:nth-child(6) a'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://pubs.rsna.org", intermed1)
return( pdflink )
}
###
# Get full text pdf link from rcseng.ac.uk full text website.
##
get.pdflink.rcseng <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '.tab-nav li:nth-child(4) a'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://publishing.rcseng.ac.uk", intermed1)
return( pdflink )
}
###
# Get full text pdf link from publisherspanel full text website.
##
get.pdflink.publisherspanel <- function( url )
{
pdflink <- url
return( pdflink )
}
###
# Get full text pdf link from aappublications full text website.
##
get.pdflink.aappublications <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from pamw.pl full text website.
##
get.pdflink.pamw <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'div[class="field-item even"] a'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- intermed1[1]
return( pdflink )
}
###
# Get full text pdf link from tokai.com full text website.
##
get.pdflink.tokai <- function( url )
{
pdflink <- url
return( pdflink )
}
###
# Get full text pdf link from umsha.ac.ir full text website.
##
get.pdflink.umsha <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from aspet full text website.
##
get.pdflink.aspet <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from waocp full text website.
##
get.pdflink.waocp <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '.pdf'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
intermed2 <- sub( "./", "", intermed1 )
pdflink <- paste0( "http://journal.waocp.org/", intermed2 )
return( pdflink )
}
###
# Get full text pdf link from snmjournals full text website.
##
get.pdflink.snmjournals <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from jaoa.org full text website.
##
get.pdflink.jaoa <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from clinicalandtranslationalinvestigation full text website.
##
get.pdflink.clinicalandtranslationalinvestigation <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'a[href^="files/"]'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://clinicalandtranslationalinvestigation.com/", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from quintessenz full text website.
##
get.pdflink.quintessenz <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'a[class="tocbut"]'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
link1 <- strsplit( url, ".de" )
pdflink <- paste0( link1[[1]][1], ".de/", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from sabinet.co.za full text website.
##
get.pdflink.sabinet <- function( url )
{
pdflink <- url
return( pdflink )
}
###
# Get full text pdf link from rcpsych full text website.
##
get.pdflink.rcpsych <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'link[type="application/pdf"]'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
link1 <- strsplit( url, ".org" )
pdflink <- paste0( link1[[1]][1], ".org", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from upol.cz full text website.
##
get.pdflink.upol.cz <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from asahq.org full text website.
##
get.pdflink.asahq <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- "#pdfLink"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "data-article-url" )
link1 <- strsplit( url, ".org" )
pdflink <- paste0( link1[[1]][1], ".org", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from physiology full text website.
##
get.pdflink.physiology <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'link[type="application/pdf"]'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
link1 <- strsplit( url, ".org" )
pdflink <- paste0( link1[[1]][1], ".org", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from aota.org full text website.
##
get.pdflink.aota <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from nutrition.org full text website.
##
get.pdflink.nutrition <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- ".full-text-pdf-view-link a"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
link1 <- strsplit( url, ".org" )
intermed2 <- paste0( link1[[1]][1], ".org", intermed1 )
pdflink <- sub( "\\+html", "", intermed2)
return( pdflink )
}
###
# Get full text pdf link from tums.ac.ir full text website.
##
get.pdflink.tums <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- "#sidebarRTArticleTools .file"
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
return( pdflink )
}
###
# Get full text pdf link from arvojournals full text website.
##
get.pdflink.arvojournals <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- "#pdfLink"
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "data-article-url" )
pdflink <- paste0( "http://iovs.arvojournals.org/", pdflink )
return( pdflink )
}
###
# Get full text pdf link from JAMA full text website.
##
get.pdflink.jama <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- "#full-text-tab #pdf-link"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "data-article-url" )
link1 <- strsplit( url, ".com" )
pdflink <- paste0( link1[[1]][1], ".com", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from plos full text website.
##
get.pdflink.plos <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- "#downloadPdf"
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://journals.plos.org", pdflink )
return( pdflink )
}
###
# Get full text pdf link from bmj full text website.
##
get.pdflink.bmj <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- "link[type='application/pdf']"
# get href to pdfLink
intermed <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.bmj.com", intermed )
return( pdflink )
}
###
# Get full text pdf link from nejm full text website.
##
get.pdflink.nejm <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- "li a[href^='/doi/pdf']"
# get href to pdfLink
intermed <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.nejm.org", intermed )
return( pdflink )
}
###
# Get full text pdf link from academic.oup full text website.
##
get.pdflink.acoup <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- ".al-link"
# get href to pdfLink
intermed <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "https://academic.oup.com", intermed )
return( pdflink )
}
###
# Get full text pdf link from annals full text website.
##
get.pdflink.annals <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- "#tagmasterPDF"
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "data-article-url" )
pdflink <- paste0( "https://www.annals.org", pdflink )
return( pdflink )
}
###
# Get full text pdf link from cambridge full text website.
##
get.pdflink.cambridge <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- ".download-types li:nth-child(1) a"
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "https://www.cambridge.org", pdflink[1] )
return( pdflink )
}
###
# Get full text pdf link from OVID full text website.
##
get.pdflink.ovid1 <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
# p1 <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
# p2 <- xml2::read_html( curl( p1, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
p3 <- page %>% html_nodes( css = "script[type='text/javascript']")
if ( grepl( "ovidFullTextUrlForButtons = ", p3[2]) )
{
p4 <- p3[2]
p5 <- gsub( ".*ovidFullTextUrlForButtons = \"|PubMed.*", "", p4 )
p6 <- paste0( p5, "PubMed" )
}
page2 <- xml2::read_html( curl( p6, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ), options = "HUGE" )
pdflink <- page2 %>% html_nodes( css = "iframe" ) %>% html_attr( "src" )
#intermed2 <- paste0( "http://ovidsp.tx.ovid.com/", intermed1 )
#page3 <- xml2::read_html( curl( intermed2, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
#pdflink <- page3 %>% html_nodes( css = "iframe") %>% html_attr( "src" )
return( pdflink )
}
###
# Get full text pdf link from OVID full text website.
##
get.pdflink.ovid2 <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
p1 <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
if(identical(p1, character(0))){
p3 <- page %>% html_nodes( css = "script[type='text/javascript']")
if ( grepl( "ovidFullTextUrlForButtons = ", p3[2]) )
{
p4 <- p3[2]
p5 <- gsub( ".*ovidFullTextUrlForButtons = \"|PubMed.*", "", p4 )
p6 <- paste0( p5, "PubMed" )
}
page2 <- xml2::read_html( curl( p6, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ), options = "HUGE" )
pdflink <- page2 %>% html_nodes( css = "iframe" ) %>% html_attr( "src" )
}else{
p2 <- xml2::read_html( curl( p1, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
p3 <- p2 %>% html_nodes( css = "script[type='text/javascript']")
if ( grepl( "ovidFullTextUrlForButtons = ", p3[2]) )
{
p4 <- p3[2]
p5 <- gsub( ".*ovidFullTextUrlForButtons = \"|PubMed.*", "", p4 )
p6 <- paste0( p5, "PubMed" )
}
page3 <- xml2::read_html( curl( p6, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ), options = "HUGE" )
intermed1 <- page3 %>% html_nodes( css = "#pdf" ) %>% html_attr( "href" )
intermed2 <- paste0( "http://ovidsp.tx.ovid.com/", intermed1 )
page4 <- xml2::read_html( curl( intermed2, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
pdflink <- page4 %>% html_nodes( css = "iframe") %>% html_attr( "src" )
}
return( pdflink )
}
###
# Get full text pdf link from EHP full text website.
##
get.pdflink.ehp <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '.pdf_icon'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "https://ehp.niehs.nih.gov", pdflink )
return( pdflink )
}
###
# Get full text pdf link from Science Direct full text website.
##
get.pdflink.sciencedirect <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# xpath of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css = ".pdf-download-btn-link"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
intermed2 <- paste0( "http://www.sciencedirect.com", intermed1 )
page2 <- xml2::read_html( curl( intermed2, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
css2 = 'meta[content^="0;URL"]'
intermed3 <- page2 %>% html_nodes( css = css2 ) %>% html_attr( "content" )
pdflink <- strsplit(intermed3, "URL=")[[1]][2]
return( pdflink )
}
# for springerlink, retrieve the correct url
get.pdflink.linkinghub <- function( url )
{
# parse url further and get the specific node with the URL
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Mozilla/5.0" ) ) )
parsedfull <- htmlParse( page )
rootnode <- xmlRoot( parsedfull )
o <- getNodeSet( rootnode, "//input[@name='redirectURL']" )[[1]]
# convert to character
o2 <- capture.output(o)
# extract URL from character string
o3 <- data.frame( col = strsplit( o2, split = " " )[[1]] )
o4 <- separate( o3, col = "col", into = c("a", "b"), sep = "=", fill = "right" )
http <- o4[ o4$a == "value", "b" ]
http <- gsub( "\"", "", http )
outurl <- URLdecode(http)
# parse page
page <- xml2::read_html( curl( outurl, handle = curl::new_handle( "useragent" = "Mozilla/5.0" ) ) )
# xpath of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css = 'meta[name="citation_pdf_url"]'
# get href to pdfLink
intermed3 <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
pdflink1 <- sub( "amp;", "", intermed3 )
page2 <- xml2::read_html( pdflink1 )
css2 = 'div a'
pdflink <- page2 %>% html_nodes( css = css2 ) %>% html_attr( "href" )
return( pdflink )
}
###
# Get full text pdf link from scielo full text website.
##
get.pdflink.scielo <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
# only modification is "\" before the double quotes.
css <- "li:nth-child(2) a:nth-child(1)"
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.scielo.br", pdflink[1] )
return( pdflink )
}
###
# Get full text pdf link from hyper.ahajournals full text website.
##
get.pdflink.ahaj <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name=citation_pdf_url]'
".aha-icon-download"
# get href to following page, then repeat the above steps
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
# page1 <- xml2::read_html( curl( intermed1, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css <- ".input-text-url input"
# intermed2 <- page1 %>% html_nodes( css = css ) %>% html_attr( "value" )
# pdflink <- paste0( intermed2, ".full.pdf" )
return( pdflink )
}
###
# Get full text pdf link from cmaj full text website.
##
get.pdflink.cmaj <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
# only modification is "\" before the double quotes.
css <- ".full-text-pdf-view-link a"
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.cmaj.ca", pdflink )
pdflink <- sub( "+html", "", pdflink)
return( pdflink )
}
###
# Get full text pdf link from doi.org (Wiley) full text website.
##
get.pdflink.doiwiley <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
# only modification is "\" before the double quotes.
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
page2 <- xml2::read_html( curl( intermed1, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
css2 <- "#pdfDocument"
pdflink <- page2 %>% html_nodes( css = css2 ) %>% html_attr( "src" )
return( pdflink )
}
###
# Get full text pdf link from doi.org (bjs) full text website.
##
get.pdflink.doibjs <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
# only modification is "\" before the double quotes.
css <- ".js-infopane-epdf"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- sub( "epdf", "pdf", intermed1)
return( pdflink )
}
###
# Get full text pdf link from asm.org full text website.
##
get.pdflink.asm <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# get href to pdfLink
pdflink <- sub( "long", "full.pdf", url)
return( pdflink )
}
###
# Get full text pdf link from ajp... full text website.
##
get.pdflink.ajp <- function( url )
{
pdflink <- url
return( pdflink )
}
###
# Get full text pdf link from apsjournals full text website.
##
get.pdflink.apsjournals <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
# only modification is "\" before the double quotes.
css <- "li:nth-child(2) .nowrap"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://apsjournals.apsnet.org", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from arjournals full text website.
##
get.pdflink.arjournals <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
# only modification is "\" before the double quotes.
css <- "a[href^='/doi/pdf']"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://arjournals.annualreviews.org", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from ascopubs full text website.
##
get.pdflink.ascopubs <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
# only modification is "\" before the double quotes.
css <- ".show-pdf"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
intermed2 <- paste0( "http://ascopubs.org", intermed1 )
pdflink <- sub( "/pdf", "/pdfdirect", intermed2 )
return( pdflink )
}
###
# Get full text pdf link from avmajournals full text website.
##
get.pdflink.avma <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
# only modification is "\" before the double quotes.
css <- ".article_link td:nth-child(2) .header4"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://avmajournals.avma.org", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from bjgp full text website.
##
get.pdflink.bjgp <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
# only modification is "\" before the double quotes.
css <- "link[type='application/pdf']"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://bjgp.org", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from boneandjoint full text website.
##
get.pdflink.boneandjoint <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
# only modification is "\" before the double quotes.
css <- "link[type='application/pdf']"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://bjj.boneandjoint.org.uk", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from aacrjournals full text website.
##
get.pdflink.aacrjournals <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
# only modification is "\" before the double quotes.
css <- ".last .highwire-article-nav-jumplink"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
link1 <- strsplit(url, ".org")
pdflink <- paste0( link1[[1]][1], ".org", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from diabetesjournals full text website.
##
get.pdflink.diabetesjournals <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
# only modification is "\" before the double quotes.
css <- "link[type='application/pdf']"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
link1 <- strsplit(url, ".org")
pdflink <- paste0( link1[[1]][1], ".org", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from asnjournals full text website.
##
get.pdflink.asnjournals <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
# only modification is "\" before the double quotes.
css <- ".primary a"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
intermed2 <- sub( ".pdf\\+html", ".pdf", intermed1 )
link1 <- strsplit( url, ".org" )
pdflink <- paste0( link1[[1]][1], ".org", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from ersjournals full text website.
##
get.pdflink.ersjournals <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
# only modification is "\" before the double quotes.
css <- "link[type='application/pdf']"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
link1 <- strsplit( url, ".com" )
pdflink <- paste0( link1[[1]][1], ".com", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from gacetamedicade full text website.
##
get.pdflink.gacetamedicade <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
# only modification is "\" before the double quotes.
css <- ".col-sm-2 li:nth-child(1) a"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://gacetamedicademexico.com/", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from iiar full text website.
##
get.pdflink.iiar <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- ".full-text-pdf-view-link a"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
link1 <- strsplit( url, ".org" )
intermed2 <- paste0( link1[[1]][1], ".org", intermed1 )
pdflink <- sub( "\\+html", "", intermed2)
return( pdflink )
}
###
# Get full text pdf link from anesthesiology full text website.
##
get.pdflink.anesthesiology <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- "#pdfLink"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "data-article-url" )
link1 <- strsplit( url, ".org" )
pdflink <- paste0( link1[[1]][1], ".org", intermed1 )
return( pdflink )
}
###################################
# END FUNCTIONS
###################################
# output directory to store full text pdf
outdir <- 'pdfNEW/pdfs2'
# read data of missing pdfs
missings <- read.csv2( "missingsWithURL.csv", stringsAsFactors = F )
head(missings)
names(missings) <- c( "pmid", "url" )
min <- 220000
max <- length(missings[,1])
# set progress bar
progbar <- txtProgressBar( min = min, max = max, style = 3 )
# for every pmid, add url
for( i in min:max )
{
setTxtProgressBar( progbar, i )
# add url
pp <- data.frame( pmid = missings$pmid[ i ],
url = missings$url[ i ],
stringsAsFactors = FALSE )
get.pdf( pmid = pp$pmid, url = pp$url )
}
# quit R session
q( save = "no" ) | /scripts/obtainingPDFS/7_url.to.pdfdownloadRE23.R | permissive | wmotte/frrp | R | false | false | 83,855 | r | ################################################################################
# Aim: Download full text pdfs, given PMID and url
#
# Contact: Herm Lamberink, h.j.lamberink@umcutrecht.nl
# Date: 2018-03-19
#############################
.libPaths( c(.libPaths(), "/mnt/data/live02/stress/hlamberink/RLibrary" ) )
library( 'xml2' ) # used by rvest package
library( 'rvest' ) # web scraping package
library( "curl" )
library( "XML" )
library( "pbapply" ) # power bar during sapply
library( 'plyr' ); library( 'dplyr' )
library( 'tidyr' )
###################################
# FUNCTIONS
###################################
###
# Get pdf from given pmid
##
get.pdf <- function( pmid, url, outdr = outdir )
{
# prevent the function from shutting down due to an error
v <- tryCatch(
{
# output pdf
outpdf <- paste0( outdr, '/', pmid, '.pdf' )
if( ! file.exists( outpdf ) )
{
# set empty pdflink
pdflink <- NA
#######################
# pdflink per publisher
#######################
# url is from arvojournals
if( grepl( "arvojournals", url ) )
{
# url to pdf
pdflink <- get.pdflink.arvojournals( url )
}
# url is from JAMA
if( grepl( "jamanetwork.com", url ) )
{
# url to pdf
pdflink <- get.pdflink.jama( url )
}
# url is from PLOS
if( grepl( "dx.plos", url ) )
{
# url to pdf
pdflink <- get.pdflink.plos( url )
}
# url is from EHP
if( grepl( "/EHP", url ) )
{
pdflink <- get.pdflink.ehp( url )
}
# url is from doi/bjs
if( grepl( "/bjs", url ) )
{
pdflink <- get.pdflink.doibjs( url )
}
# url is from Wiley, via doi.org
#if( grepl( "dx.doi.org", url ) )
#{
# pdflink <- get.pdflink.doiwiley( url )
#}
# url is from wiley
if( grepl( "wiley.com", url ) )
{
pdflink <- get.pdflink.wileyreal( url )
}
# url is from bmj
if( grepl( "bmj.com", url ) )
{
pdflink <- get.pdflink.bmj( url )
}
# url is from cmaj
if( grepl( "cmaj.ca", url ) )
{
pdflink <- get.pdflink.cmaj( url )
}
# url is from nejm
if( grepl( "nejm.org", url ) )
{
pdflink <- get.pdflink.nejm( url )
}
# url is from scielo
if( grepl( "scielo.br", url ) )
{
pdflink <- get.pdflink.scielo( url )
}
# url is from academic.oup
if( grepl( "academic.oup", url ) )
{
pdflink <- get.pdflink.acoup( url )
}
# url is from annals
if( grepl( "annals", url ) )
{
pdflink <- get.pdflink.annals( url )
}
# url is from cambridge
if( grepl( "cambridge.org", url ) )
{
pdflink <- get.pdflink.cambridge( url )
}
# url is from OVID
if( grepl( "Insights.ovid", url ) )
{
# url to pdf
pdflink <- get.pdflink.ovid1( url )
if( length(pdflink) == 0 ) pdflink <- get.pdflink.ovid2( url )
}
# url is from iiar
if( grepl( "iiar", url ) )
{
pdflink <- get.pdflink.iiar( url )
}
# url is from ahajournals
if( grepl( "ahajournals", url ) )
{
pdflink <- get.pdflink.ahaj( url )
}
# url is from sciencedirect
if( grepl( "sciencedirect.com", url ) )
{
pdflink <- get.pdflink.sciencedirect( url )
}
# url is from asm
if( grepl( "asm", url ) )
{
pdflink <- get.pdflink.asm( url )
}
# url is from ajp
if( grepl( "ajp", url ) )
{
pdflink <- get.pdflink.ajp
}
# url is from apsjournals
if( grepl( "apsjournals", url ) )
{
pdflink <- get.pdflink.apsjournals( url )
}
# url is from arjournals
if( grepl( "arjournals", url ) )
{
pdflink <- get.pdflink.arjournals( url )
}
# url is from ascopubs
if( grepl( "ascopubs", url ) )
{
pdflink <- get.pdflink.ascopubs( url )
}
# url is from avmajournals
if( grepl( "avmajournals", url ) )
{
pdflink <- get.pdflink.avma( url )
}
# url is from bjgp
if( grepl( "bjgp", url ) )
{
pdflink <- get.pdflink.bjgp( url )
}
# url is from boneandjoint
if( grepl( "boneandjoint", url ) )
{
pdflink <- get.pdflink.boneandjoint( url )
}
# url is from aacrjournals
if( grepl( "aacrjournals", url ) )
{
pdflink <- get.pdflink.aacrjournals( url )
}
# url is from diabetesjournals
if( grepl( "diabetesjournals", url ) )
{
pdflink <- get.pdflink.diabetesjournals( url )
}
# url is from asnjournals
if( grepl( "asnjournals", url ) )
{
pdflink <- get.pdflink.asnjournals( url )
}
# url is from ersjournals
if( grepl( "ersjournals", url ) )
{
pdflink <- get.pdflink.ersjournals( url )
}
# url is from gacetamedicade
if( grepl( "gacetamedicade", url ) )
{
pdflink <- get.pdflink.gacetamedicade( url )
}
# url is from tums.ac.ir
if( grepl( "tums.ac.ir", url ) )
{
pdflink <- get.pdflink.tums( url )
}
# url is from nutrition.org
if( grepl( "nutrition.org", url ) )
{
pdflink <- get.pdflink.nutrition( url )
}
# url is from aota.org
if( grepl( "aota.org", url ) )
{
pdflink <- get.pdflink.aota( url )
}
# url is from physiology.org
if( grepl( "physiology.org", url ) )
{
pdflink <- get.pdflink.physiology( url )
}
# url is from asahq.org
if( grepl( "asahq.org", url ) )
{
pdflink <- get.pdflink.asahq( url )
}
# url is from upol.cz
if( grepl( "upol.cz", url ) )
{
pdflink <- get.pdflink.upol.cz( url )
}
# url is from rcpsych
if( grepl( "rcpsych.org", url ) )
{
pdflink <- get.pdflink.rcpsych( url )
}
# url is from sabinet.co.za
if( grepl( "sabinet.co.za", url ) )
{
pdflink <- get.pdflink.sabinet( url )
}
# url is from quintessenz
if( grepl( "quintessenz", url ) )
{
pdflink <- get.pdflink.quintessenz( url )
}
# url is from clinicalandtranslationalinvestigation
if( grepl( "clinicalandtranslationalinvestigation", url ) )
{
pdflink <- get.pdflink.clinicalandtranslationalinvestigation( url )
}
# url is from jaoa.org
if( grepl( "jaoa.org", url ) )
{
pdflink <- get.pdflink.jaoa( url )
}
# url is from snmjournals
if( grepl( "snmjournals", url ) )
{
pdflink <- get.pdflink.snmjournals( url )
}
# url is from umsha.ac.ir
if( grepl( "umsha" , url ) )
{
pdflink <- get.pdflink.umsha( url )
}
# url is from tokai
if( grepl( "tokai" , url ) )
{
pdflink <- get.pdflink.tokai( url )
}
# url is from pamw.pl
if( grepl( "pamw.pl", url ) )
{
pdflink <- get.pdflink.pamw( url )
}
# url is from aappublications
if( grepl( "aappublications", url ) )
{
pdflink <- get.pdflink.aappublications( url )
}
# url is from publisherspanel
if( grepl( "publisherspanel", url ) )
{
pdflink <- get.pdflink.publisherspanel( url )
}
# url is from rcseng
if( grepl( "rcseng", url ) )
{
pdflink <- get.pdflink.rcseng( url )
}
# url is from rsna
if( grepl( "rsna", url ) )
{
pdflink <- get.pdflink.rsna( url )
}
# url is from rcjournal
if( grepl( "rcjournal", url ) )
{
pdflink <- get.pdflink.rcjournal( url )
}
# url is from revistachirurgia
if( grepl( "revistachirurgia", url ) )
{
pdflink <- get.pdflink.revistachirurgia( url )
}
# url is from thejns
if( grepl( "thejns", url ) )
{
pdflink <- get.pdflink.thejns( url )
}
# url is from alphamedpress
if( grepl( "alphamedpress", url ) )
{
pdflink <- get.pdflink.alphamedpress( url )
}
# url is from aepress
if( grepl( "aepress", url ) )
{
pdflink <- get.pdflink.aepress( url )
}
# url is from ajronline
if( grepl( "ajronline", url ) )
{
pdflink <- get.pdflink.ajronline( url )
}
# url is from ajcn
if( grepl( "ajcn", url ) )
{
pdflink <- get.pdflink.ajcn( url )
}
# url is from ams.ac.ir
if( grepl( "ams.ac.ir", url ) )
{
pdflink <- get.pdflink.ams.ac.ir( url )
}
# url is from annfammed
if( grepl( "annfammed", url ) )
{
pdflink <- get.pdflink.annfammed( url )
}
# url is from annsaudimed
if( grepl( "annsaudimed", url ) )
{
pdflink <- get.pdflink.annsaudimed( url )
}
# url is from atsjournals
if( grepl( "atsjournals", url ) )
{
pdflink <- get.pdflink.atsjournals( url )
}
# url is from birpublications
if( grepl( "birpublications", url ) )
{
pdflink <- get.pdflink.birpublications( url )
}
# url is from bloodjournal
if( grepl( "bloodjournal", url ) )
{
pdflink <- get.pdflink.bloodjournal( url )
}
# url is from cfp
if( grepl( "cfp.org", url ) )
{
pdflink <- get.pdflink.cfp( url )
}
# url is from cmj.hr
if( grepl( "cmj.hr", url ) )
{
pdflink <- get.pdflink.cmj.hr( url )
}
# url is from cmj.org
if( grepl( "cmj.org", url ) )
{
pdflink <- get.pdflink.cmj.org( url )
}
# url is from danmedj
if( grepl( "danmedj", url ) )
{
pdflink <- get.pdflink.danmedj( url )
}
# url is from dirjournal
if( grepl( "dirjournal", url ) )
{
pdflink <- get.pdflink.dirjournal( url )
}
# url is from e-cmh
if( grepl( "e-cmh", url ) )
{
pdflink <- get.pdflink.ecmh( url )
}
# url is from ectrx
if( grepl( "ectrx", url ) )
{
pdflink <- get.pdflink.ectrx( url )
}
# url is from educationforhealth
if( grepl( "educationforhealth", url ) )
{
pdflink <- get.pdflink.educationforhealth( url )
}
# url is from eje-online
if( grepl( "eje-online", url ) )
{
pdflink <- get.pdflink.ejeonline( url )
}
# url is from europeanreview
if( grepl( "europeanreview", url ) )
{
pdflink <- get.pdflink.europeanreview( url )
}
# url is from haematologica
if( grepl( "haematologica", url ) )
{
pdflink <- get.pdflink.haematologica( url )
}
# url is from hdbp
if( grepl( "hdbp", url ) )
{
pdflink <- get.pdflink.hdbp( url )
}
# url is from healio
if( grepl( "healio", url ) )
{
pdflink <- get.pdflink.healio( url )
}
# url is from ijkd
if( grepl( "ijkd", url ) )
{
pdflink <- get.pdflink.ijkd( url )
}
# url is from ijo.in
if( grepl( "ijo.in", url ) )
{
pdflink <- get.pdflink.ijo.in( url )
}
# url is from impactjournals
if( grepl( "impactjournals", url ) )
{
pdflink <- get.pdflink.impactjournals( url )
}
# url is from inaactamedica
if( grepl( "inaactamedica", url ) )
{
pdflink <- get.pdflink.inaactamedica( url )
}
# url is from indianjcancer
if( grepl( "indianjcancer", url ) )
{
pdflink <- get.pdflink.indianjcancer( url )
}
# url is from intbrazjurol
if( grepl( "intbrazjurol", url ) )
{
pdflink <- url
}
# url is from jiaci
if( grepl( "jiaci", url ) )
{
pdflink <- get.pdflink.jiaci( url )
}
# url is from jmir
if( grepl( "jmir", url ) )
{
pdflink <- get.pdflink.jmir( url )
}
# url is from jneurosci
if( grepl( "jneurosci", url ) )
{
pdflink <- get.pdflink.jneurosci( url )
}
# url is from jospt
if( grepl( "jospt", url ) )
{
pdflink <- get.pdflink.jospt( url )
}
# url is from mdpi.com
if( grepl( "mdpi.com", url ) )
{
pdflink <- get.pdflink.mdpi.com( url )
}
# url is from painphysicianjournal
if( grepl( "painphysicianjournal", url ) )
{
pdflink <- get.pdflink.painphysicianjournal( url )
}
# url is from sjweh
if( grepl( "sjweh", url ) )
{
pdflink <- get.pdflink.sjweh( url )
}
# url is from tandfonline
if( grepl( "tandfonline", url ) )
{
pdflink <- get.pdflink.tandfonline( url )
}
# url is from thieme-connect
if( grepl( "thieme-connect", url ) )
{
pdflink <- get.pdflink.thieme( url )
}
# url is from wjgnet
if( grepl( "wjgnet", url ) )
{
pdflink <- get.pdflink.wjgnet( url )
}
# url is from degruyter
if( grepl( "degruyter", url ) )
{
pdflink <- get.pdflink.degruyter( url )
}
# url is from biomedcentral
if( grepl( "biomedcentral", url ) )
{
pdflink <- get.pdflink.biomedcentral( url )
}
# url is from karger
if( grepl( "karger", url ) )
{
pdflink <- get.pdflink.karger( url )
}
# url is from jkan.or.kr
if( grepl( "jkan.or.kr", url ) )
{
pdflink <- get.pdflink.jkan.or.kr( url )
}
# url is from medicaljournals.se
if( grepl( "medicaljournals.se", url ) )
{
pdflink <- get.pdflink.medicaljournals.se( url )
}
# url is from anesthesiology
if( grepl( "anesthesiology", url ) )
{
pdflink <- get.pdflink.anesthesiology( url )
}
# url is from linkinghub
if( grepl( "linkinghub", url ) )
{
pdflink <- get.pdflink.linkinghub( url )
}
# url contains 10.1038 (nature publishers)
if( grepl( "doi.org/10.1038", url ) )
{
pdflink <- get.pdflink.nature( url )
}
# url conains 10.1089 (acm journal)
if( grepl( "doi.org/10.1089", url ) )
{
pdflink <- get.pdflink.acm( url )
}
# url conains 10.1111 (acm journal)
if( grepl( "doi.org/10.1111", url ) )
{
pdflink <- get.pdflink.wiley( url )
}
# url conains 10.1002 (acm journal)
if( grepl( "doi.org/10.1002", url ) )
{
pdflink <- get.pdflink.wiley( url )
}
# url contains 10.1038 (springerlink)
if( grepl( "doi.org/10.1007", url ) )
{
pdflink <- get.pdflink.springerlink( url )
}
# psychiatryonline
if( grepl( "psychiatryonline", url ) )
{
pdflink <- get.pdflink.psychiatryonline( url )
}
#######################
# downoad pdf
#######################
# write pdf to output if link is available
if( ! is.na( pdflink ) )
{
# download pdf (only if output is yet downloaded)
download.file( url = pdflink, destfile = outpdf,
mode = "wb", quiet = TRUE )
}
}
return( NA )
},
error=function(err) {
#message(paste("URL does not seem to exist:", url))
#message("Here's the original error message:")
message(paste( pmid, err, "\n" ) )
# Choose a return value in case of error
return( paste( pmid, "URL does not seem to exist" ) )
},
warning=function(war) {
#message(paste("URL caused a warning:", url))
#message("Here's the original warning message: ")
message(paste( pmid, war, "\n" ) )
# Choose a return value in case of warning
return( paste( pmid, "warning, test if downloaded" ) )
}
#finally={
# NOTE:
# Here goes everything that should be executed at the end,
# regardless of success or error.
# If you want more than one expression to be executed, then you
# need to wrap them in curly brackets ({...}); otherwise you could
# just have written 'finally=<expression>'
#message(paste("Processed URL:", url))
#message("Some other message at the end")
#}
)
}
###
# Get full text pdf link from psychiatryonline.org full text website.
##
get.pdflink.psychiatryonline <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- ".show-pdf"
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
return( pdflink )
}
###
# Get full text pdf link from springerlink full text website.
##
get.pdflink.springerlink <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from nature full text website.
##
get.pdflink.nature <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf
css <- 'meta[name="citation_pdf_url"]'
# save pdflink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
if( identical( pdflink, character(0) ) )
{
css <- 'a[class="inline-block block-link pa10 pl0"]'
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
if( !identical( intermed1, character(0)))
{
pdflink <- paste0( "https://www.nature.com", intermed1[1] )
return( pdflink )
}
}
}
###
# Get full text pdf link from acm full text website.
##
get.pdflink.acm <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf
css <- '.pdfprint a'
# save pdflink
intermed <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
if( !identical( intermed, character(0) ) )
{
pdflink <- paste0( "http://online.liebertpub.com", intermed )
return( pdflink )
}
}
###
# Get full text pdf link from wiley full text website.
##
get.pdflink.wiley <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf
css <- 'meta[name="citation_pdf_url"]'
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from wiley full text website.
##
get.pdflink.wileyreal <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf
css <- 'meta[name="citation_pdf_url"]'
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
get.pdflink.sciencedirect <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
css <- 'input[name="redirectURL"]'
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "value" )
intermed2 <- URLdecode(intermed1)
page <- xml2::read_html( curl( intermed2, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# xpath of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css = 'meta[name="citation_pdf_url"]'
# get href to pdfLink
intermed3 <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
pdflink <- paste0( "https://www.sciencedirect.com", intermed3 )
return( pdflink )
}
###
# Get full text pdf link from springerlink full text website.
##
get.pdflink.springerlink <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from medicaljournals.se full text website.
##
get.pdflink.medicaljournals.se <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'li:nth-child(2) .btn-success2'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "https://www.medicaljournals.se", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from jkan.or.kr full text website.
##
get.pdflink.jkan.or.kr <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '#portlet_content_Format li:nth-child(4) a'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "https://www.jkan.or.kr", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from karger full text website.
##
get.pdflink.karger <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '.btn-karger'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "https://www.karger.com", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from degruyter full text website.
##
get.pdflink.degruyter <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '.pdf-link'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "https://www.degruyter.com", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from biomedcentral full text website.
##
get.pdflink.biomedcentral <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from wjgnet full text website.
##
get.pdflink.wjgnet <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '.left-articlenav li:nth-child(3) a'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
return( pdflink )
}
###
# Get full text pdf link from thieme-connect full text website.
##
get.pdflink.thieme <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '#articleTabs :nth-child(2) a'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
intermed2 <- paste0( "http://www.thieme-connect.com", intermed1 )
page2 <- xml2::read_html( curl( intermed2, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
css2 <- '#pdfLink'
intermed3 <- page2 %>% html_nodes( css = css2 ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.thieme-connect.com", intermed3 )
return( pdflink )
}
###
# Get full text pdf link from tandfonline full text website.
##
get.pdflink.tandfonline <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '.show-pdf'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.tandfonline.com", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from sjweh full text website.
##
get.pdflink.sjweh <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '.pdf-download'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.sjweh.fi/", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from painphysicianjournal full text website.
##
get.pdflink.painphysicianjournal <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '.row .float-right'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.painphysicianjournal.com", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from mdpi.com full text website.
##
get.pdflink.mdpi.com <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from jospt full text website.
##
get.pdflink.jospt <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'a[href^="/doi/pdf"]'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.jospt.org", intermed1[1] )
return( pdflink )
}
###
# Get full text pdf link from jneurosci full text website.
##
get.pdflink.jneurosci <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from jmir.org full text website.
##
get.pdflink.jmir.org <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_abstract_pdf_url"]'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
page2 <- xml2::read_html( curl( intermed1, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
css2 <- 'a[href^="http://www.jmir.org/article/download"]'
pdflink <- page2 %>% html_nodes( css = css2 ) %>% html_attr( "href" )
return( pdflink )
}
###
# Get full text pdf link from jiaci full text website.
##
get.pdflink.jiaci <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'li:nth-child(1) a:nth-child(2)'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.jiaci.org", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from indianjcancer full text website.
##
get.pdflink.indianjcancer <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
page2 <- xml2::read_html( curl( intermed1, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
css2 <- 'a[href$=".pdf"]'
intermed2 <- page2 %>% html_nodes( css = css2 ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.indianjcancer.com/", intermed2 )
return( pdflink )
}
###
# Get full text pdf link from inaactamedica full text website.
##
get.pdflink.inaactamedica <- function( url )
{
# get href to pdfLink
pdflink <- url
return( pdflink )
}
###
# Get full text pdf link from impactjournals full text website.
##
get.pdflink.impactjournals <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from ijo.in full text website.
##
get.pdflink.ijo.in <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
page2 <- xml2::read_html( curl( intermed1[1], handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
css2 <- 'a[href$=".pdf"]'
intermed2 <- page2 %>% html_nodes( css = css2 ) %>% html_attr( "href")
pdflink <- paste0( "http://www.ijo.in/", intermed2 )
return( pdflink )
}
###
# Get full text pdf link from ijkd full text website.
##
get.pdflink.ijkd <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'frame'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "src" )
page2 <- xml2::read_html( curl( intermed1[1], handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
css2 <- 'a[href^="http://www.ijkd"]'
pdflink <- page2 %>% html_nodes( css = css2 ) %>% html_attr( "href")
return( pdflink )
}
###
# Get full text pdf link from healio full text website.
##
get.pdflink.healio <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from hdbp full text website.
##
get.pdflink.hdbp <- function( url )
{
# get href to pdfLink
pdflink <- url
return( pdflink )
}
###
# Get full text pdf link from haematologica full text website.
##
get.pdflink.haematologica <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from europeanreview full text website.
##
get.pdflink.europeanreview <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '.right'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- sub( " http", "http", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from eje-online full text website.
##
get.pdflink.ejeonline <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from educationforhealth full text website.
##
get.pdflink.educationforhealth <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
page2 <- xml2::read_html( curl( intermed1, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
css2 <- 'a[href$=".pdf"]'
intermed2 <- page2 %>% html_nodes( css = css2 ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.educationforhealth.net/", intermed2)
return( pdflink )
}
###
# Get full text pdf link from ectrx full text website.
##
get.pdflink.ectrx <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'b a'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.ectrx.org/forms/", intermed1)
return( pdflink )
}
###
# Get full text pdf link from e-cmh full text website.
##
get.pdflink.ecmh <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="fulltext_pdf"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from dirjournal full text website.
##
get.pdflink.dirjournal <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'a[href$=".pdf"]'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.dirjournal.org", intermed1[2] )
return( pdflink )
}
###
# Get full text pdf link from danmedj full text website.
##
get.pdflink.danmedj <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'a[href$=".pdf"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
return( pdflink )
}
###
# Get full text pdf link from cmj.org full text website.
##
get.pdflink.cmj.org <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
page2 <- xml2::read_html( curl( intermed1, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
css2 <- 'p a:nth-child(1)'
intermed2 <- page2 %>% html_nodes( css = css2 ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.cmj.org/", intermed2 )
return( pdflink )
}
###
# Get full text pdf link from cmj.hr full text website.
##
get.pdflink.cmj.hr <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'frame[src^="http"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "src" )
return( pdflink )
}
###
# Get full text pdf link from cfp full text website.
##
get.pdflink.cfp <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from canjsurg full text website.
##
get.pdflink.canjsurg <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'p:nth-child(2) a:nth-child(2)'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
return( pdflink )
}
###
# Get full text pdf link from bloodjournal full text website.
##
get.pdflink.bloodjournal <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from birpublications full text website.
##
get.pdflink.birpublications <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '.show-pdf'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.birpublications.org", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from atsjournals full text website.
##
get.pdflink.atsjournals <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '.show-pdf'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.atsjournals.org", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from annsaudimed full text website.
##
get.pdflink.annsaudimed <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '.desc'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
return( pdflink )
}
###
# Get full text pdf link from annfammed.org full text website.
##
get.pdflink.annfammed <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '.full-text-pdf-view-link a'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
intermed2 <- sub( "\\+html", "", intermed1 )
pdflink <- paste0( "http://www.annfammed.org", intermed2 )
return( pdflink )
}
###
# Get full text pdf link from ams.ac.ir full text website.
##
get.pdflink.ams.ac.ir <- function( url )
{
pdflink <- url
return( pdflink )
}
###
# Get full text pdf link from ajronline full text website.
##
get.pdflink.ajronline <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '#refLinkList+ li .nowrap'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.ajronline.org", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from ajcn full text website.
##
get.pdflink.ajcn <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '.full-text-pdf-view-link a'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
intermed2 <- sub( "\\+html", "", intermed1 )
pdflink <- paste0( "http://www.ajcn.org", intermed2 )
return( pdflink )
}
###
# Get full text pdf link from aepress.sk full text website.
##
get.pdflink.aepress <- function( url )
{
pdflink <- url
return( pdflink )
}
###
# Get full text pdf link from alphamedpress full text website.
##
get.pdflink.alphamedpress <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from thejns full text website.
##
get.pdflink.thejns <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '.article-tools li:nth-child(2)'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://thejns.org", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from revistachirurgia full text website.
##
get.pdflink.revistachirurgia <- function( url )
{
pdflink <- url
return( pdflink )
}
###
# Get full text pdf link from rcjournal full text website.
##
get.pdflink.rcjournal <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from rsna full text website.
##
get.pdflink.rsna <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '.tab-nav li:nth-child(6) a'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://pubs.rsna.org", intermed1)
return( pdflink )
}
###
# Get full text pdf link from rcseng.ac.uk full text website.
##
get.pdflink.rcseng <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '.tab-nav li:nth-child(4) a'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://publishing.rcseng.ac.uk", intermed1)
return( pdflink )
}
###
# Get full text pdf link from publisherspanel full text website.
##
get.pdflink.publisherspanel <- function( url )
{
pdflink <- url
return( pdflink )
}
###
# Get full text pdf link from aappublications full text website.
##
get.pdflink.aappublications <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from pamw.pl full text website.
##
get.pdflink.pamw <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'div[class="field-item even"] a'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- intermed1[1]
return( pdflink )
}
###
# Get full text pdf link from tokai.com full text website.
##
get.pdflink.tokai <- function( url )
{
pdflink <- url
return( pdflink )
}
###
# Get full text pdf link from umsha.ac.ir full text website.
##
get.pdflink.umsha <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from aspet full text website.
##
get.pdflink.aspet <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from waocp full text website.
##
get.pdflink.waocp <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '.pdf'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
intermed2 <- sub( "./", "", intermed1 )
pdflink <- paste0( "http://journal.waocp.org/", intermed2 )
return( pdflink )
}
###
# Get full text pdf link from snmjournals full text website.
##
get.pdflink.snmjournals <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from jaoa.org full text website.
##
get.pdflink.jaoa <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from clinicalandtranslationalinvestigation full text website.
##
get.pdflink.clinicalandtranslationalinvestigation <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'a[href^="files/"]'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://clinicalandtranslationalinvestigation.com/", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from quintessenz full text website.
##
get.pdflink.quintessenz <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'a[class="tocbut"]'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
link1 <- strsplit( url, ".de" )
pdflink <- paste0( link1[[1]][1], ".de/", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from sabinet.co.za full text website.
##
get.pdflink.sabinet <- function( url )
{
pdflink <- url
return( pdflink )
}
###
# Get full text pdf link from rcpsych full text website.
##
get.pdflink.rcpsych <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'link[type="application/pdf"]'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
link1 <- strsplit( url, ".org" )
pdflink <- paste0( link1[[1]][1], ".org", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from upol.cz full text website.
##
get.pdflink.upol.cz <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from asahq.org full text website.
##
get.pdflink.asahq <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- "#pdfLink"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "data-article-url" )
link1 <- strsplit( url, ".org" )
pdflink <- paste0( link1[[1]][1], ".org", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from physiology full text website.
##
get.pdflink.physiology <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'link[type="application/pdf"]'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
link1 <- strsplit( url, ".org" )
pdflink <- paste0( link1[[1]][1], ".org", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from aota.org full text website.
##
get.pdflink.aota <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
return( pdflink )
}
###
# Get full text pdf link from nutrition.org full text website.
##
get.pdflink.nutrition <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- ".full-text-pdf-view-link a"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
link1 <- strsplit( url, ".org" )
intermed2 <- paste0( link1[[1]][1], ".org", intermed1 )
pdflink <- sub( "\\+html", "", intermed2)
return( pdflink )
}
###
# Get full text pdf link from tums.ac.ir full text website.
##
get.pdflink.tums <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- "#sidebarRTArticleTools .file"
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
return( pdflink )
}
###
# Get full text pdf link from arvojournals full text website.
##
get.pdflink.arvojournals <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- "#pdfLink"
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "data-article-url" )
pdflink <- paste0( "http://iovs.arvojournals.org/", pdflink )
return( pdflink )
}
###
# Get full text pdf link from JAMA full text website.
##
get.pdflink.jama <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- "#full-text-tab #pdf-link"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "data-article-url" )
link1 <- strsplit( url, ".com" )
pdflink <- paste0( link1[[1]][1], ".com", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from plos full text website.
##
get.pdflink.plos <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- "#downloadPdf"
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://journals.plos.org", pdflink )
return( pdflink )
}
###
# Get full text pdf link from bmj full text website.
##
get.pdflink.bmj <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- "link[type='application/pdf']"
# get href to pdfLink
intermed <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.bmj.com", intermed )
return( pdflink )
}
###
# Get full text pdf link from nejm full text website.
##
get.pdflink.nejm <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- "li a[href^='/doi/pdf']"
# get href to pdfLink
intermed <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.nejm.org", intermed )
return( pdflink )
}
###
# Get full text pdf link from academic.oup full text website.
##
get.pdflink.acoup <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- ".al-link"
# get href to pdfLink
intermed <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "https://academic.oup.com", intermed )
return( pdflink )
}
###
# Get full text pdf link from annals full text website.
##
get.pdflink.annals <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- "#tagmasterPDF"
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "data-article-url" )
pdflink <- paste0( "https://www.annals.org", pdflink )
return( pdflink )
}
###
# Get full text pdf link from cambridge full text website.
##
get.pdflink.cambridge <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- ".download-types li:nth-child(1) a"
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "https://www.cambridge.org", pdflink[1] )
return( pdflink )
}
###
# Get full text pdf link from OVID full text website.
##
get.pdflink.ovid1 <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
# p1 <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
# p2 <- xml2::read_html( curl( p1, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
p3 <- page %>% html_nodes( css = "script[type='text/javascript']")
if ( grepl( "ovidFullTextUrlForButtons = ", p3[2]) )
{
p4 <- p3[2]
p5 <- gsub( ".*ovidFullTextUrlForButtons = \"|PubMed.*", "", p4 )
p6 <- paste0( p5, "PubMed" )
}
page2 <- xml2::read_html( curl( p6, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ), options = "HUGE" )
pdflink <- page2 %>% html_nodes( css = "iframe" ) %>% html_attr( "src" )
#intermed2 <- paste0( "http://ovidsp.tx.ovid.com/", intermed1 )
#page3 <- xml2::read_html( curl( intermed2, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
#pdflink <- page3 %>% html_nodes( css = "iframe") %>% html_attr( "src" )
return( pdflink )
}
###
# Get full text pdf link from OVID full text website.
##
get.pdflink.ovid2 <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
p1 <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
if(identical(p1, character(0))){
p3 <- page %>% html_nodes( css = "script[type='text/javascript']")
if ( grepl( "ovidFullTextUrlForButtons = ", p3[2]) )
{
p4 <- p3[2]
p5 <- gsub( ".*ovidFullTextUrlForButtons = \"|PubMed.*", "", p4 )
p6 <- paste0( p5, "PubMed" )
}
page2 <- xml2::read_html( curl( p6, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ), options = "HUGE" )
pdflink <- page2 %>% html_nodes( css = "iframe" ) %>% html_attr( "src" )
}else{
p2 <- xml2::read_html( curl( p1, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
p3 <- p2 %>% html_nodes( css = "script[type='text/javascript']")
if ( grepl( "ovidFullTextUrlForButtons = ", p3[2]) )
{
p4 <- p3[2]
p5 <- gsub( ".*ovidFullTextUrlForButtons = \"|PubMed.*", "", p4 )
p6 <- paste0( p5, "PubMed" )
}
page3 <- xml2::read_html( curl( p6, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ), options = "HUGE" )
intermed1 <- page3 %>% html_nodes( css = "#pdf" ) %>% html_attr( "href" )
intermed2 <- paste0( "http://ovidsp.tx.ovid.com/", intermed1 )
page4 <- xml2::read_html( curl( intermed2, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
pdflink <- page4 %>% html_nodes( css = "iframe") %>% html_attr( "src" )
}
return( pdflink )
}
###
# Get full text pdf link from EHP full text website.
##
get.pdflink.ehp <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- '.pdf_icon'
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "https://ehp.niehs.nih.gov", pdflink )
return( pdflink )
}
###
# Get full text pdf link from Science Direct full text website.
##
get.pdflink.sciencedirect <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# xpath of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css = ".pdf-download-btn-link"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
intermed2 <- paste0( "http://www.sciencedirect.com", intermed1 )
page2 <- xml2::read_html( curl( intermed2, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
css2 = 'meta[content^="0;URL"]'
intermed3 <- page2 %>% html_nodes( css = css2 ) %>% html_attr( "content" )
pdflink <- strsplit(intermed3, "URL=")[[1]][2]
return( pdflink )
}
# for springerlink, retrieve the correct url
get.pdflink.linkinghub <- function( url )
{
# parse url further and get the specific node with the URL
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Mozilla/5.0" ) ) )
parsedfull <- htmlParse( page )
rootnode <- xmlRoot( parsedfull )
o <- getNodeSet( rootnode, "//input[@name='redirectURL']" )[[1]]
# convert to character
o2 <- capture.output(o)
# extract URL from character string
o3 <- data.frame( col = strsplit( o2, split = " " )[[1]] )
o4 <- separate( o3, col = "col", into = c("a", "b"), sep = "=", fill = "right" )
http <- o4[ o4$a == "value", "b" ]
http <- gsub( "\"", "", http )
outurl <- URLdecode(http)
# parse page
page <- xml2::read_html( curl( outurl, handle = curl::new_handle( "useragent" = "Mozilla/5.0" ) ) )
# xpath of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css = 'meta[name="citation_pdf_url"]'
# get href to pdfLink
intermed3 <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
pdflink1 <- sub( "amp;", "", intermed3 )
page2 <- xml2::read_html( pdflink1 )
css2 = 'div a'
pdflink <- page2 %>% html_nodes( css = css2 ) %>% html_attr( "href" )
return( pdflink )
}
###
# Get full text pdf link from scielo full text website.
##
get.pdflink.scielo <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
# only modification is "\" before the double quotes.
css <- "li:nth-child(2) a:nth-child(1)"
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.scielo.br", pdflink[1] )
return( pdflink )
}
###
# Get full text pdf link from hyper.ahajournals full text website.
##
get.pdflink.ahaj <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- 'meta[name=citation_pdf_url]'
".aha-icon-download"
# get href to following page, then repeat the above steps
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
# page1 <- xml2::read_html( curl( intermed1, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css <- ".input-text-url input"
# intermed2 <- page1 %>% html_nodes( css = css ) %>% html_attr( "value" )
# pdflink <- paste0( intermed2, ".full.pdf" )
return( pdflink )
}
###
# Get full text pdf link from cmaj full text website.
##
get.pdflink.cmaj <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
# only modification is "\" before the double quotes.
css <- ".full-text-pdf-view-link a"
# get href to pdfLink
pdflink <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://www.cmaj.ca", pdflink )
pdflink <- sub( "+html", "", pdflink)
return( pdflink )
}
###
# Get full text pdf link from doi.org (Wiley) full text website.
##
get.pdflink.doiwiley <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
# only modification is "\" before the double quotes.
css <- 'meta[name="citation_pdf_url"]'
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "content" )
page2 <- xml2::read_html( curl( intermed1, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
css2 <- "#pdfDocument"
pdflink <- page2 %>% html_nodes( css = css2 ) %>% html_attr( "src" )
return( pdflink )
}
###
# Get full text pdf link from doi.org (bjs) full text website.
##
get.pdflink.doibjs <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
# only modification is "\" before the double quotes.
css <- ".js-infopane-epdf"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- sub( "epdf", "pdf", intermed1)
return( pdflink )
}
###
# Get full text pdf link from asm.org full text website.
##
get.pdflink.asm <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# get href to pdfLink
pdflink <- sub( "long", "full.pdf", url)
return( pdflink )
}
###
# Get full text pdf link from ajp... full text website.
##
get.pdflink.ajp <- function( url )
{
pdflink <- url
return( pdflink )
}
###
# Get full text pdf link from apsjournals full text website.
##
get.pdflink.apsjournals <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
# only modification is "\" before the double quotes.
css <- "li:nth-child(2) .nowrap"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://apsjournals.apsnet.org", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from arjournals full text website.
##
get.pdflink.arjournals <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
# only modification is "\" before the double quotes.
css <- "a[href^='/doi/pdf']"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://arjournals.annualreviews.org", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from ascopubs full text website.
##
get.pdflink.ascopubs <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
# only modification is "\" before the double quotes.
css <- ".show-pdf"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
intermed2 <- paste0( "http://ascopubs.org", intermed1 )
pdflink <- sub( "/pdf", "/pdfdirect", intermed2 )
return( pdflink )
}
###
# Get full text pdf link from avmajournals full text website.
##
get.pdflink.avma <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
# only modification is "\" before the double quotes.
css <- ".article_link td:nth-child(2) .header4"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://avmajournals.avma.org", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from bjgp full text website.
##
get.pdflink.bjgp <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
# only modification is "\" before the double quotes.
css <- "link[type='application/pdf']"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://bjgp.org", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from boneandjoint full text website.
##
get.pdflink.boneandjoint <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
# only modification is "\" before the double quotes.
css <- "link[type='application/pdf']"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://bjj.boneandjoint.org.uk", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from aacrjournals full text website.
##
get.pdflink.aacrjournals <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
# only modification is "\" before the double quotes.
css <- ".last .highwire-article-nav-jumplink"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
link1 <- strsplit(url, ".org")
pdflink <- paste0( link1[[1]][1], ".org", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from diabetesjournals full text website.
##
get.pdflink.diabetesjournals <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
# only modification is "\" before the double quotes.
css <- "link[type='application/pdf']"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
link1 <- strsplit(url, ".org")
pdflink <- paste0( link1[[1]][1], ".org", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from asnjournals full text website.
##
get.pdflink.asnjournals <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
# only modification is "\" before the double quotes.
css <- ".primary a"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
intermed2 <- sub( ".pdf\\+html", ".pdf", intermed1 )
link1 <- strsplit( url, ".org" )
pdflink <- paste0( link1[[1]][1], ".org", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from ersjournals full text website.
##
get.pdflink.ersjournals <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
# only modification is "\" before the double quotes.
css <- "link[type='application/pdf']"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
link1 <- strsplit( url, ".com" )
pdflink <- paste0( link1[[1]][1], ".com", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from gacetamedicade full text website.
##
get.pdflink.gacetamedicade <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
# only modification is "\" before the double quotes.
css <- ".col-sm-2 li:nth-child(1) a"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
pdflink <- paste0( "http://gacetamedicademexico.com/", intermed1 )
return( pdflink )
}
###
# Get full text pdf link from iiar full text website.
##
get.pdflink.iiar <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- ".full-text-pdf-view-link a"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "href" )
link1 <- strsplit( url, ".org" )
intermed2 <- paste0( link1[[1]][1], ".org", intermed1 )
pdflink <- sub( "\\+html", "", intermed2)
return( pdflink )
}
###
# Get full text pdf link from anesthesiology full text website.
##
get.pdflink.anesthesiology <- function( url )
{
# parse page
page <- xml2::read_html( curl( url, handle = curl::new_handle( "useragent" = "Chrome/55.0" ) ) )
# css of pdf element selected with Selector Gadget Google Chrome plugin [http://selectorgadget.com/]
css <- "#pdfLink"
# get href to pdfLink
intermed1 <- page %>% html_nodes( css = css ) %>% html_attr( "data-article-url" )
link1 <- strsplit( url, ".org" )
pdflink <- paste0( link1[[1]][1], ".org", intermed1 )
return( pdflink )
}
###################################
# END FUNCTIONS
###################################
# output directory to store full text pdf
outdir <- 'pdfNEW/pdfs2'
# read data of missing pdfs
missings <- read.csv2( "missingsWithURL.csv", stringsAsFactors = F )
head(missings)
names(missings) <- c( "pmid", "url" )
min <- 220000
max <- length(missings[,1])
# set progress bar
progbar <- txtProgressBar( min = min, max = max, style = 3 )
# for every pmid, add url
for( i in min:max )
{
setTxtProgressBar( progbar, i )
# add url
pp <- data.frame( pmid = missings$pmid[ i ],
url = missings$url[ i ],
stringsAsFactors = FALSE )
get.pdf( pmid = pp$pmid, url = pp$url )
}
# quit R session
q( save = "no" ) |
#' List .csv files in a directory
#' @param csv.dir character or file.path, target directory
#' @return character, file names
#' @export
get_csv_files <- function(csv.dir){
csv.files <- list.files(file.path(root.dir, csv.dir), pattern = ".csv", full.names = TRUE)
return(csv.files)
}
#' Read .csv files to create a citation data.frame
#' @param csv.files character, file names
#' @return A data.frame
#' @export
get_citation_dataframe <- function(csv.files){
dataframe_list <- lapply(csv.files, function(fname) read.csv(fname, header = TRUE))
citation_dataframe <- do.call(rbind, dataframe_list)
return(citation_dataframe)
}
#' Read the citation data frame and store them into a named list
#' @param languages character, of one "english", "spanish", "portuguese"
#' @return a named list with `length(languages)` elements, each a data.frame
#' @export
get_language_dfs <- function(languages){
dfs <- lapply(languages, read_citation_dataframe)
names(dfs) <- languages
return(dfs)
}
#' Binds the separate languages data.frame into a meta data.frame
#' @param language_dfs a named list of data.frame
#' @return a data.frame
#' @export
get_meta_df <- function(language_dfs){
language_dfs <- lapply(language_dfs, function(df){
df %>%
dplyr::mutate_if(is.factor, iconv, from = "WINDOWS-1252", to = "UTF-8") %>%
dplyr::mutate_if(is.character, iconv, from = "WINDOWS-1252", to = "UTF-8")
})
do.call(rbind, language_dfs)
}
#' This function performs the cross-walk between human reading databases and topic model databases using document titles
#' @param titleInd_file a filename, if it exists, file content is read, if it does not exist the formatting and regexpr matching is performed
#' @param humanReadingDatabase human reading database with column "title", default to NULL
#' @param topicModelTitles titles of the topic model files, default to NULL
#' @return matching indices between the two databases
#' @export
get_titleInd <- function(
titleInd_file = system.file("extdata", "titleInd.Rds", package = "wateReview"),
humanReadingDatabase = NULL,
topicModelTitles = NULL
){
if(titleInd_file == ""){
# modify the format of titles so that match between topicModelTitles and humanReadingDatabase
titleHumanReading <- as.character(humanReadingDatabase$title)
titleHumanReading <- gsub(".pdf", "", titleHumanReading)
titleHumanReading <- gsub(" ", "_", titleHumanReading)
titleHumanReading <- gsub("[^\x20-\x7E]", "", titleHumanReading)
titleHumanReading <- gsub("\\(", "", titleHumanReading)
titleHumanReading <- gsub("\\)", "", titleHumanReading)
titleHumanReading <- gsub("\\'", "", titleHumanReading)
titleHumanReading <- gsub(",", "", titleHumanReading)
# look for matches
titleInd <- sapply(titleHumanReading, function(t) grep(t, topicModelTitles)[1])
saveRDS(titleInd, file.path(system.file("extdata", package = "wateReview"), "titleInd.Rds"))
} else {
titleInd <- readRDS(titleInd_file)
}
return(titleInd)
}
#' Read human reading database
#' @param scale_type One of "location", "spatial", "temporal", default to "location"
#' @return validation human-read data
#' @export
get_validationHumanReading <- function(scale_type = "location"){
validationHumanReading <- read.csv(system.file("extdata", paste0("validation_df_", scale_type, ".csv"), package = "wateReview"))
validationHumanReading <- validationHumanReading[validationHumanReading$title != "", ]
return(validationHumanReading)
}
#' Read topic model data
#' @param fname path to the topic model data
#' @return topic model data
#' @export
get_topicDocs <- function(fname = system.file("extdata", "topicDocs.Rds", package = "wateReview")){
topicDocs <- readRDS(fname)
return(topicDocs)
}
#' Read topic model file titles
#' @param fname path to the topic model file titles
#' @return topic model file titles
#' @export
get_titleDocs <- function(topicDocs, fname = system.file("extdata", "info.dat", package = "wateReview")){
titleDocs <- readLines(fname)
if (nrow(topicDocs) != length(titleDocs)) warning("Dimensions not matching")
return(titleDocs)
}
#' read training data (document-term matrix corresponding to webscrapped labels)
#' @param fname path to training data (document-term matrix corresponding to webscrapped labels)
#' @return training data (document-term matrix corresponding to webscrapped labels)
#' @export
get_webscrapped_validationDTM <- function(fname = system.file("extdata", "webscrapped_validationDTM.Rds", package = "wateReview")){
webscrapped_validationDTM <- readRDS(fname)
return(webscrapped_validationDTM)
}
#' read webscrapped training labels
#' @param fname path to webscrapped training labels data
#' @return webscrapped training labels
#' @export
get_webscrapped_trainingLabels <- function(fname = system.file("extdata", "webscrapped_trainingLabels.Rds", package = "wateReview")){
webscrapped_trainingLabels <- readRDS(fname)
return(webscrapped_trainingLabels)
}
#' read document-term matrix created by the text mining code
#' @param dtm_file path to saved document-term matrix
#' @return document-term matrix
#' @export
get_DocTermMatrix <- function(dtm_file= system.file("extdata", "obj_dtm_from_dfm_country.Rds", package = "wateReview")){
obj_dtm <- readRDS(dtm_file)
return(obj_dtm)
}
#' identifies the subset of paper with validation data and align databases
#' @param titleInd cross-walked indices between human-reading database and topic model
#' @param validationHumanReading human-reading database
#' @param topicDocs results from the topic model
#' @param DTM document-term matrix derived from topic modelled corpus
#' @return list with four elements: titleInd, validationHumanReading, validationTopicDocs, validationDTM
#' @export
align_humanReadingTopicModel <- function(titleInd, validationHumanReading, topicDocs, DTM){
validationHumanReading <- validationHumanReading[!is.na(titleInd), ]
titleInd <- na.omit(unlist(titleInd))
validationHumanReading <- validationHumanReading[!duplicated(titleInd), ]
titleInd <- unique(titleInd)
validationTopicDocs <- topicDocs[titleInd, ]
validationDTM <- DTM[titleInd, ]
res <- list(titleInd = titleInd,
validationHumanReading = validationHumanReading,
validationTopicDocs = validationTopicDocs,
validationDTM = validationDTM)
return(res)
}
#' Perform QA/QC on aligned data
#' @param alignedData list of aligned data between human reading and topic model
#' @param scale_type One of "location", "spatial", "temporal", default to "location"
#' @return list with three elements: validationHumanReading, validationTopicDocs, validationDTM
#' @export
QA_alignedData <- function(alignedData, scale_type = "location"){
validationHumanReading <- alignedData$validationHumanReading
validationTopicDocs <- alignedData$validationTopicDocs
validationDTM <- alignedData$validationDTM
# remove QA'd out papers
if (scale_type != "location"){
validationTopicDocs <- validationTopicDocs[validationHumanReading$country_location != 0, ]
validationDTM <- validationDTM[validationHumanReading$country_location != 0, ]
validationHumanReading <- validationHumanReading[validationHumanReading$country_location != 0, ]
}
# remove title, QA and location information
drops <- c("title", "country_location", "validation", "study_years")
validationHumanReading <- validationHumanReading[, !colnames(validationHumanReading) %in% drops]
# remove noisy information from human reading
if (scale_type %in% c("temporal", "spatial")){
validationHumanReading <- do.call(data.frame, lapply(validationHumanReading, function(x) as.character(x)))
validationHumanReading <- do.call(data.frame, lapply(validationHumanReading, function(x) replace(x, which(!x %in% c("0", "1")), "0")))
validationHumanReading <- do.call(data.frame, lapply(validationHumanReading, function(x) as.logical(as.integer(as.character(x)))))
}
# changing colnames
colnames(validationTopicDocs) <- paste0("Topic", seq(ncol(validationTopicDocs)))
validationDTM <- as.matrix(validationDTM)
# colnames(validationDTM) <- paste0("Term", seq(ncol(validationDTM)))
res <- list(validationHumanReading = validationHumanReading,
validationTopicDocs = validationTopicDocs,
validationDTM = validationDTM)
return(res)
}
| /R/data_loading.R | permissive | hrvg/wateReview | R | false | false | 8,212 | r | #' List .csv files in a directory
#' @param csv.dir character or file.path, target directory
#' @return character, file names
#' @export
get_csv_files <- function(csv.dir){
csv.files <- list.files(file.path(root.dir, csv.dir), pattern = ".csv", full.names = TRUE)
return(csv.files)
}
#' Read .csv files to create a citation data.frame
#' @param csv.files character, file names
#' @return A data.frame
#' @export
get_citation_dataframe <- function(csv.files){
dataframe_list <- lapply(csv.files, function(fname) read.csv(fname, header = TRUE))
citation_dataframe <- do.call(rbind, dataframe_list)
return(citation_dataframe)
}
#' Read the citation data frame and store them into a named list
#' @param languages character, of one "english", "spanish", "portuguese"
#' @return a named list with `length(languages)` elements, each a data.frame
#' @export
get_language_dfs <- function(languages){
dfs <- lapply(languages, read_citation_dataframe)
names(dfs) <- languages
return(dfs)
}
#' Binds the separate languages data.frame into a meta data.frame
#' @param language_dfs a named list of data.frame
#' @return a data.frame
#' @export
get_meta_df <- function(language_dfs){
language_dfs <- lapply(language_dfs, function(df){
df %>%
dplyr::mutate_if(is.factor, iconv, from = "WINDOWS-1252", to = "UTF-8") %>%
dplyr::mutate_if(is.character, iconv, from = "WINDOWS-1252", to = "UTF-8")
})
do.call(rbind, language_dfs)
}
#' This function performs the cross-walk between human reading databases and topic model databases using document titles
#' @param titleInd_file a filename, if it exists, file content is read, if it does not exist the formatting and regexpr matching is performed
#' @param humanReadingDatabase human reading database with column "title", default to NULL
#' @param topicModelTitles titles of the topic model files, default to NULL
#' @return matching indices between the two databases
#' @export
get_titleInd <- function(
titleInd_file = system.file("extdata", "titleInd.Rds", package = "wateReview"),
humanReadingDatabase = NULL,
topicModelTitles = NULL
){
if(titleInd_file == ""){
# modify the format of titles so that match between topicModelTitles and humanReadingDatabase
titleHumanReading <- as.character(humanReadingDatabase$title)
titleHumanReading <- gsub(".pdf", "", titleHumanReading)
titleHumanReading <- gsub(" ", "_", titleHumanReading)
titleHumanReading <- gsub("[^\x20-\x7E]", "", titleHumanReading)
titleHumanReading <- gsub("\\(", "", titleHumanReading)
titleHumanReading <- gsub("\\)", "", titleHumanReading)
titleHumanReading <- gsub("\\'", "", titleHumanReading)
titleHumanReading <- gsub(",", "", titleHumanReading)
# look for matches
titleInd <- sapply(titleHumanReading, function(t) grep(t, topicModelTitles)[1])
saveRDS(titleInd, file.path(system.file("extdata", package = "wateReview"), "titleInd.Rds"))
} else {
titleInd <- readRDS(titleInd_file)
}
return(titleInd)
}
#' Read human reading database
#' @param scale_type One of "location", "spatial", "temporal", default to "location"
#' @return validation human-read data
#' @export
get_validationHumanReading <- function(scale_type = "location"){
validationHumanReading <- read.csv(system.file("extdata", paste0("validation_df_", scale_type, ".csv"), package = "wateReview"))
validationHumanReading <- validationHumanReading[validationHumanReading$title != "", ]
return(validationHumanReading)
}
#' Read topic model data
#' @param fname path to the topic model data
#' @return topic model data
#' @export
get_topicDocs <- function(fname = system.file("extdata", "topicDocs.Rds", package = "wateReview")){
topicDocs <- readRDS(fname)
return(topicDocs)
}
#' Read topic model file titles
#' @param fname path to the topic model file titles
#' @return topic model file titles
#' @export
get_titleDocs <- function(topicDocs, fname = system.file("extdata", "info.dat", package = "wateReview")){
titleDocs <- readLines(fname)
if (nrow(topicDocs) != length(titleDocs)) warning("Dimensions not matching")
return(titleDocs)
}
#' read training data (document-term matrix corresponding to webscrapped labels)
#' @param fname path to training data (document-term matrix corresponding to webscrapped labels)
#' @return training data (document-term matrix corresponding to webscrapped labels)
#' @export
get_webscrapped_validationDTM <- function(fname = system.file("extdata", "webscrapped_validationDTM.Rds", package = "wateReview")){
webscrapped_validationDTM <- readRDS(fname)
return(webscrapped_validationDTM)
}
#' read webscrapped training labels
#' @param fname path to webscrapped training labels data
#' @return webscrapped training labels
#' @export
get_webscrapped_trainingLabels <- function(fname = system.file("extdata", "webscrapped_trainingLabels.Rds", package = "wateReview")){
webscrapped_trainingLabels <- readRDS(fname)
return(webscrapped_trainingLabels)
}
#' read document-term matrix created by the text mining code
#' @param dtm_file path to saved document-term matrix
#' @return document-term matrix
#' @export
get_DocTermMatrix <- function(dtm_file= system.file("extdata", "obj_dtm_from_dfm_country.Rds", package = "wateReview")){
obj_dtm <- readRDS(dtm_file)
return(obj_dtm)
}
#' identifies the subset of paper with validation data and align databases
#' @param titleInd cross-walked indices between human-reading database and topic model
#' @param validationHumanReading human-reading database
#' @param topicDocs results from the topic model
#' @param DTM document-term matrix derived from topic modelled corpus
#' @return list with four elements: titleInd, validationHumanReading, validationTopicDocs, validationDTM
#' @export
align_humanReadingTopicModel <- function(titleInd, validationHumanReading, topicDocs, DTM){
validationHumanReading <- validationHumanReading[!is.na(titleInd), ]
titleInd <- na.omit(unlist(titleInd))
validationHumanReading <- validationHumanReading[!duplicated(titleInd), ]
titleInd <- unique(titleInd)
validationTopicDocs <- topicDocs[titleInd, ]
validationDTM <- DTM[titleInd, ]
res <- list(titleInd = titleInd,
validationHumanReading = validationHumanReading,
validationTopicDocs = validationTopicDocs,
validationDTM = validationDTM)
return(res)
}
#' Perform QA/QC on aligned data
#' @param alignedData list of aligned data between human reading and topic model
#' @param scale_type One of "location", "spatial", "temporal", default to "location"
#' @return list with three elements: validationHumanReading, validationTopicDocs, validationDTM
#' @export
QA_alignedData <- function(alignedData, scale_type = "location"){
validationHumanReading <- alignedData$validationHumanReading
validationTopicDocs <- alignedData$validationTopicDocs
validationDTM <- alignedData$validationDTM
# remove QA'd out papers
if (scale_type != "location"){
validationTopicDocs <- validationTopicDocs[validationHumanReading$country_location != 0, ]
validationDTM <- validationDTM[validationHumanReading$country_location != 0, ]
validationHumanReading <- validationHumanReading[validationHumanReading$country_location != 0, ]
}
# remove title, QA and location information
drops <- c("title", "country_location", "validation", "study_years")
validationHumanReading <- validationHumanReading[, !colnames(validationHumanReading) %in% drops]
# remove noisy information from human reading
if (scale_type %in% c("temporal", "spatial")){
validationHumanReading <- do.call(data.frame, lapply(validationHumanReading, function(x) as.character(x)))
validationHumanReading <- do.call(data.frame, lapply(validationHumanReading, function(x) replace(x, which(!x %in% c("0", "1")), "0")))
validationHumanReading <- do.call(data.frame, lapply(validationHumanReading, function(x) as.logical(as.integer(as.character(x)))))
}
# changing colnames
colnames(validationTopicDocs) <- paste0("Topic", seq(ncol(validationTopicDocs)))
validationDTM <- as.matrix(validationDTM)
# colnames(validationDTM) <- paste0("Term", seq(ncol(validationDTM)))
res <- list(validationHumanReading = validationHumanReading,
validationTopicDocs = validationTopicDocs,
validationDTM = validationDTM)
return(res)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dataset-format.R
\name{FileFormat}
\alias{FileFormat}
\alias{ParquetFileFormat}
\alias{IpcFileFormat}
\alias{CsvFileFormat}
\title{Dataset file formats}
\description{
A \code{FileFormat} holds information about how to read and parse the files
included in a \code{Dataset}. There are subclasses corresponding to the supported
file formats (\code{ParquetFileFormat} and \code{IpcFileFormat}).
}
\section{Factory}{
\code{FileFormat$create()} takes the following arguments:
\itemize{
\item \code{format}: A string identifier of the file format. Currently supported values:
\itemize{
\item "parquet"
\item "ipc"/"arrow"/"feather", all aliases for each other; for Feather, note that
only version 2 files are supported
\item "csv"/"text", aliases for the same thing (because comma is the default
delimiter for text files
\item "tsv", equivalent to passing \verb{format = "text", delimiter = "\\t"}
}
\item \code{...}: Additional format-specific options
`format = "parquet"``:
\itemize{
\item \code{dict_columns}: Names of columns which should be read as dictionaries.
\item Any Parquet options from \link{FragmentScanOptions}.
}
\code{format = "text"}: see \link{CsvParseOptions}. Note that you can specify them either
with the Arrow C++ library naming ("delimiter", "quoting", etc.) or the
\code{readr}-style naming used in \code{\link[=read_csv_arrow]{read_csv_arrow()}} ("delim", "quote", etc.).
Not all \code{readr} options are currently supported; please file an issue if
you encounter one that \code{arrow} should support. Also, the following options are
supported. From \link{CsvReadOptions}:
\itemize{
\item \code{skip_rows}
\item \code{column_names}
\item \code{autogenerate_column_names}
From \link{CsvFragmentScanOptions} (these values can be overridden at scan time):
\item \code{convert_options}: a \link{CsvConvertOptions}
\item \code{block_size}
}
}
It returns the appropriate subclass of \code{FileFormat} (e.g. \code{ParquetFileFormat})
}
\examples{
\dontshow{if (arrow_with_dataset()) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
## Semi-colon delimited files
# Set up directory for examples
tf <- tempfile()
dir.create(tf)
on.exit(unlink(tf))
write.table(mtcars, file.path(tf, "file1.txt"), sep = ";", row.names = FALSE)
# Create FileFormat object
format <- FileFormat$create(format = "text", delimiter = ";")
open_dataset(tf, format = format)
\dontshow{\}) # examplesIf}
}
| /r/man/FileFormat.Rd | permissive | Sebastiaan-Alvarez-Rodriguez/arrow | R | false | true | 2,498 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dataset-format.R
\name{FileFormat}
\alias{FileFormat}
\alias{ParquetFileFormat}
\alias{IpcFileFormat}
\alias{CsvFileFormat}
\title{Dataset file formats}
\description{
A \code{FileFormat} holds information about how to read and parse the files
included in a \code{Dataset}. There are subclasses corresponding to the supported
file formats (\code{ParquetFileFormat} and \code{IpcFileFormat}).
}
\section{Factory}{
\code{FileFormat$create()} takes the following arguments:
\itemize{
\item \code{format}: A string identifier of the file format. Currently supported values:
\itemize{
\item "parquet"
\item "ipc"/"arrow"/"feather", all aliases for each other; for Feather, note that
only version 2 files are supported
\item "csv"/"text", aliases for the same thing (because comma is the default
delimiter for text files
\item "tsv", equivalent to passing \verb{format = "text", delimiter = "\\t"}
}
\item \code{...}: Additional format-specific options
`format = "parquet"``:
\itemize{
\item \code{dict_columns}: Names of columns which should be read as dictionaries.
\item Any Parquet options from \link{FragmentScanOptions}.
}
\code{format = "text"}: see \link{CsvParseOptions}. Note that you can specify them either
with the Arrow C++ library naming ("delimiter", "quoting", etc.) or the
\code{readr}-style naming used in \code{\link[=read_csv_arrow]{read_csv_arrow()}} ("delim", "quote", etc.).
Not all \code{readr} options are currently supported; please file an issue if
you encounter one that \code{arrow} should support. Also, the following options are
supported. From \link{CsvReadOptions}:
\itemize{
\item \code{skip_rows}
\item \code{column_names}
\item \code{autogenerate_column_names}
From \link{CsvFragmentScanOptions} (these values can be overridden at scan time):
\item \code{convert_options}: a \link{CsvConvertOptions}
\item \code{block_size}
}
}
It returns the appropriate subclass of \code{FileFormat} (e.g. \code{ParquetFileFormat})
}
\examples{
\dontshow{if (arrow_with_dataset()) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
## Semi-colon delimited files
# Set up directory for examples
tf <- tempfile()
dir.create(tf)
on.exit(unlink(tf))
write.table(mtcars, file.path(tf, "file1.txt"), sep = ";", row.names = FALSE)
# Create FileFormat object
format <- FileFormat$create(format = "text", delimiter = ";")
open_dataset(tf, format = format)
\dontshow{\}) # examplesIf}
}
|
#Auxiliary Functions
#to avoid using deprecated dplyr package, use it's mapvalues function
mapvalues <- function(x, from, to, warn_missing = TRUE) {
if (length(from) != length(to)) {
stop("`from` and `to` vectors are not the same length.")
}
if (!is.atomic(x)) {
stop("`x` must be an atomic vector.")
}
if (is.factor(x)) {
# If x is a factor, call self but operate on the levels
levels(x) <- mapvalues(levels(x), from, to, warn_missing)
return(x)
}
mapidx <- match(x, from)
mapidxNA <- is.na(mapidx)
# index of items in `from` that were found in `x`
from_found <- sort(unique(mapidx))
if (warn_missing && length(from_found) != length(from)) {
message("The following `from` values were not present in `x`: ",
paste(from[!(1:length(from) %in% from_found) ], collapse = ", "))
}
x[!mapidxNA] <- to[mapidx[!mapidxNA]]
x
}
ttoyear <- function(t){year=((as.numeric(t)-1) * tstep + year0); return(year);}
yeartot <- function(year){t=((as.numeric(as.character(year)) - year0) / tstep) + 1; return(t);}
convert_pdftopng <- F #converts all created pdfs to png for better quality (needs pdftopng.exe in your PATH. Download from http://www.xpdfreader.com/download.html)
saveplot <- function(plotname, width=7, height=5, text_size=10, plotdata=NULL, suffix="", transparent=FALSE, add_title=TRUE, forpaper=F){
if(!deploy_online) if(!dir.exists(graphdir)){dir.create(graphdir)} #create directory for graphs
if(figure_format!="png"){transparent = FALSE}
if(figure_format=="pdf"){plot_device=cairo_pdf}else{plot_device=figure_format}
if(figure_format=="eps"){plot_device=cairo_ps}
#device=cairo_pdf makes PDFs work with greek symbols etc.
if("t" %in% colnames(plotdata)){plotdata$t <- ttoyear(plotdata$t)}
if(!exists("legend_position")){legend_position = "bottom"}
if(legend_position=="bottom"){legend_direction="horizontal"}else{legend_direction="vertical"}
if(transparent){transparent_background <- theme(legend.background = element_blank(), panel.background = element_blank(), plot.background = element_rect(fill = "transparent",colour = NA))}else{transparent_background = NULL}
print(ggplot2::last_plot())
if(!deploy_online){
ggsave(filename=file.path(graphdir,paste0(as.character(gsub("[ |_|-]", "_", plotname)),suffix,".",figure_format)), plot = ggplot2::last_plot() + if(add_title){labs(title=plotname)}else{labs(title="")} + theme(text = element_text(size=text_size), legend.position=legend_position, legend.direction = legend_direction, legend.key = element_rect(colour = NA), legend.title=element_blank()), width=width, height=height, bg = "transparent", device = plot_device)
if(figure_format=="pdf" & convert_pdftopng) shell(str_glue('pdftopng.exe {file.path(graphdir, paste0(as.character(gsub(" ", "_", plotname)),".", figure_format))} - > {file.path(graphdir, paste0(as.character(gsub(" ", "_", plotname)),".", "png"))}'))
if(!is.null(plotdata) & write_plotdata_csv){write.xlsx(subset(plotdata), file = file.path(graphdir,paste0(as.character(gsub("[ |_|-]", "_", plotname, suffix)),".xlsx")))}
if(forpaper){
if(!dir.exists(file.path(graphdir, "forpaper"))){dir.create(file.path(graphdir, "forpaper"))}
file.copy(file.path(graphdir,paste0(as.character(gsub("[ |_|-]", "_", plotname)),suffix,".",figure_format)), file.path(graphdir, "forpaper", paste0(as.character(gsub("[ |_|-]", "_", plotname)),suffix,".",figure_format)), overwrite = T)
}
}
}
filetosep <- function(df, type = "separate", names = "file_new", sep = "_"){
#type: separate or just last; name = name of the new column(s)
# for historical or cvalidation, instead turn to NA
df_hist <- df %>% filter(str_detect(file, "valid|historical"))
if(nrow(df_hist)>0){
for(n in names) df_hist[n] <- NA
df <- df %>% filter(!str_detect(file, "valid|historical"))
}
if(type == "separate") {
numsep = max(str_count(unique(df$file), pattern = sep))
if(names[1]=="file_new") name <- paste0("f",seq(numsep))
df <- df %>% mutate(file_new=file) %>% separate(file_new, names, sep = sep)
}
if (type == "last") {df$fx <- word(df$file,-1,sep = paste0("\\",sep)); setnames(df, "fx", names)}
if (type == "first") {df$fx <- word(df$file,1,sep = paste0("\\",sep)); setnames(df, "fx", names)}
if(nrow(df_hist)>0) df <- rbind(df, df_hist)
return(df)
}
ssptriple <- function(df) #Function converts a single "file" columns to three with SSP, RCP, SPA
{
scenario <- df$file
triple <- as.data.frame(matrix(0, ncol = 0, nrow = length(scenario)))
triple$ssp=substr(scenario, 1, 4)
triple$rcp=substr(scenario, 6, 9)
triple$spa=substr(scenario, 11, 14)
triple$spa <- str_replace(triple$spa, "spa[1-5]", "spaX")
#special cases for BAU
if(length(triple[str_detect(triple$rcp, "bau"),1])>0){triple[str_detect(triple$rcp, "bau"),]$rcp <- "bau"}
if(length(triple[str_detect(triple$rcp, "bau"),1])>0){triple[str_detect(triple$rcp, "bau"),]$spa <- "spa0"}
df_new <- cbind(df, triple)
df_new$file <- NULL
return(df_new)
}
readkey <- function()
{
cat ("Press [enter] to continue")
line <- readline()
}
convert_stochastic_gdx <- function(allfilesdata){
for(.file in unique(allfilesdata$file)){
tempstochdata <- subset(allfilesdata, file==.file)
if('10_1' %in% tempstochdata$t){
tempstochdata_before_resolution <- subset(tempstochdata, !grepl("_", t))
tempstochdata <- subset(tempstochdata, grepl("_", t))
tempstochdata$file <- paste0(.file, "(b",str_sub(tempstochdata$t, -1),")")
branches <- unique(str_sub(tempstochdata$t, -1))
tempstochdata$t <- str_sub(tempstochdata$t, 1,2)
for(.branch in branches){
tempstochdata_before_resolution$file <- paste0(.file, "(b",.branch,")")
tempstochdata <-rbind(tempstochdata,tempstochdata_before_resolution)}
}
if(.file==unique(allfilesdata$file)[1]){allfilesdata_stoch_converted=tempstochdata}else{allfilesdata_stoch_converted <-rbind(allfilesdata_stoch_converted,tempstochdata)}
}
return(allfilesdata_stoch_converted)
}
unit_conversion <- function(variable_name, unit="", convert=1){
#if unit is not "", keep its unit and convert using convert factor
if(unit!=""){
unit_plot <- unit; unit_conversion <- convert
}else{
#automatic unit and conversion factor
variable_description <- ""
if(variable_name %in% all_var_descriptions$name) variable_description <- all_var_descriptions$description[match(variable_name, all_var_descriptions$name)]
unit_witch <- gsub(".*\\[(.*).*", "\\1", sub(" *\\].*", "", variable_description))
if(is.na(unit_witch) | unit_witch==""){unit_witch="na"}
unit_conversion_table <-"witch_unit plot_unit conversion_factor
TWh EJ 0.0036
T$ 'billion USD' 1e3
T$/TWh $/GJ 277777.777778
GtCe GtCO2 3.67
TW GW 1e3
T$/GTon $/tCO2 272.727272727
T$/GTonC $/tCO2 272.727272727
T$/GtCeq $/tCO2 272.727272727
GTonC GtCO2 3.67
GtonC GtCO2 3.67
GtCe GtCO2 3.67
na na 1
'deg C above preindustrial levels' 'degree C' 1
"
unit_conversion_table <- read.table(textConnection(unit_conversion_table), sep="", head=T, dec=".")
unit_plot = unit_witch;unit_conversion=1 #by default, keep original
if(!is.na(match(unit_witch, unit_conversion_table$witch_unit))){
unit_plot <- as.character(unit_conversion_table$plot_unit[match(unit_witch, unit_conversion_table$witch_unit)])
unit_conversion <- unit_conversion_table$conversion_factor[match(unit_witch, unit_conversion_table$witch_unit)]}
}
#Finally, for specific variables apply custom unit and conversion
unit_conversion_user_specific <-"varname plot_unit conversion_factor
tpes EJ 0.0036
tpes_kali EJ 0.0036
ei_kali MJ/$ 1
"
unit_conversion_user_specific <- read.table(textConnection(unit_conversion_user_specific), sep="", head=T, dec=".")
if(variable_name %in% unit_conversion_user_specific$varname){
unit_plot <- unit_conversion_user_specific$plot_unit[unit_conversion_user_specific$varname==variable_name]
unit_conversion <- unit_conversion_user_specific$conversion_factor[unit_conversion_user_specific$varname==variable_name]
}
#dollar deflator conversion if other base year than 2005
usd_deflator = 1 #by default, all values in 2005 USD
#usd_deflator = 108.686/91.987 #2014 USD
#usd_deflator = 1.10774 #2010 USD
if(str_detect(unit_plot, "$") | str_detect(unit_plot, "USD")){unit_conversion <- unit_conversion * usd_deflator}
return(list(unit=unit_plot, convert=unit_conversion))
}
# default meta param (taken from scaling.R)
default_meta_param <- function(){
"parameter,value
I,sum
K,sum
Q,sum
BAU_Q,sum
COST_EN,mean
COST_FUEL,mean
I_EN,sum
K_EN,sum
MCOST_INV,mean
MCOST_FUEL,mean
Q_EN,sum
Q_IN,sum
Q_FUEL,sum
SHARE_EL,mean
COST_EMI,mean
CUM_EMI,sum
Q_EMI,sum
BAU_Q_EMI,sum
I_RD,sum
K_RD,sum
K_RD_F,sum
SPILL,mean
ABAT,sum
Q_WBIO,sum
Q_REDD,sum
MCOST_EMI,mean
I_EN_WINDOFF,sum
I_EN_WINDON,sum
K_EN_WINDOFF,sum
K_EN_WINDON,sum
Q_EN_WINDOFF,sum
Q_EN_WINDON,sum
I_EN_PV,sum
I_EN_CSP,sum
K_EN_PV,sum
K_EN_CSP,sum
Q_EN_PV,sum
Q_EN_CSP,sum
Q_EL_FLEX,sum
K_EN_GRID,sum
I_EN_GRID,sum
ADDOILCAP,sum
COST_OIL,mean
CUM_OIL,sum
I_OIL,sum
I_OUT,sum
OILCAP,sum
OILPROD,sum
Q_EMI_OUT,sum
Q_OUT,sum
RF,max
TEMP,max
TRF,max
W_EMI,max
WCUM_EMI,max
OMEGA,mean
QEL_EDV,sum
QEL_FR_EDV,sum
emi_cap,sum
ken_policy,sum
ren_share,mean
temp_valid_hadcrut4,mean
ctax,mean
carbonprice,mean
CPRICE,mean
FPRICE,mean
" -> defmap
dm <- fread(defmap)
dm[,type:="nagg"]
dm = rbind(dm,data.table(parameter=dm$parameter, type="nweight", value="gdp"))
setcolorder(dm,c("parameter", "type", "value"))
return(dm)
}
| /R/auxiliary_functions.R | permissive | Mareasunami/witch-plot | R | false | false | 9,995 | r | #Auxiliary Functions
#to avoid using deprecated dplyr package, use it's mapvalues function
mapvalues <- function(x, from, to, warn_missing = TRUE) {
if (length(from) != length(to)) {
stop("`from` and `to` vectors are not the same length.")
}
if (!is.atomic(x)) {
stop("`x` must be an atomic vector.")
}
if (is.factor(x)) {
# If x is a factor, call self but operate on the levels
levels(x) <- mapvalues(levels(x), from, to, warn_missing)
return(x)
}
mapidx <- match(x, from)
mapidxNA <- is.na(mapidx)
# index of items in `from` that were found in `x`
from_found <- sort(unique(mapidx))
if (warn_missing && length(from_found) != length(from)) {
message("The following `from` values were not present in `x`: ",
paste(from[!(1:length(from) %in% from_found) ], collapse = ", "))
}
x[!mapidxNA] <- to[mapidx[!mapidxNA]]
x
}
ttoyear <- function(t){year=((as.numeric(t)-1) * tstep + year0); return(year);}
yeartot <- function(year){t=((as.numeric(as.character(year)) - year0) / tstep) + 1; return(t);}
convert_pdftopng <- F #converts all created pdfs to png for better quality (needs pdftopng.exe in your PATH. Download from http://www.xpdfreader.com/download.html)
saveplot <- function(plotname, width=7, height=5, text_size=10, plotdata=NULL, suffix="", transparent=FALSE, add_title=TRUE, forpaper=F){
if(!deploy_online) if(!dir.exists(graphdir)){dir.create(graphdir)} #create directory for graphs
if(figure_format!="png"){transparent = FALSE}
if(figure_format=="pdf"){plot_device=cairo_pdf}else{plot_device=figure_format}
if(figure_format=="eps"){plot_device=cairo_ps}
#device=cairo_pdf makes PDFs work with greek symbols etc.
if("t" %in% colnames(plotdata)){plotdata$t <- ttoyear(plotdata$t)}
if(!exists("legend_position")){legend_position = "bottom"}
if(legend_position=="bottom"){legend_direction="horizontal"}else{legend_direction="vertical"}
if(transparent){transparent_background <- theme(legend.background = element_blank(), panel.background = element_blank(), plot.background = element_rect(fill = "transparent",colour = NA))}else{transparent_background = NULL}
print(ggplot2::last_plot())
if(!deploy_online){
ggsave(filename=file.path(graphdir,paste0(as.character(gsub("[ |_|-]", "_", plotname)),suffix,".",figure_format)), plot = ggplot2::last_plot() + if(add_title){labs(title=plotname)}else{labs(title="")} + theme(text = element_text(size=text_size), legend.position=legend_position, legend.direction = legend_direction, legend.key = element_rect(colour = NA), legend.title=element_blank()), width=width, height=height, bg = "transparent", device = plot_device)
if(figure_format=="pdf" & convert_pdftopng) shell(str_glue('pdftopng.exe {file.path(graphdir, paste0(as.character(gsub(" ", "_", plotname)),".", figure_format))} - > {file.path(graphdir, paste0(as.character(gsub(" ", "_", plotname)),".", "png"))}'))
if(!is.null(plotdata) & write_plotdata_csv){write.xlsx(subset(plotdata), file = file.path(graphdir,paste0(as.character(gsub("[ |_|-]", "_", plotname, suffix)),".xlsx")))}
if(forpaper){
if(!dir.exists(file.path(graphdir, "forpaper"))){dir.create(file.path(graphdir, "forpaper"))}
file.copy(file.path(graphdir,paste0(as.character(gsub("[ |_|-]", "_", plotname)),suffix,".",figure_format)), file.path(graphdir, "forpaper", paste0(as.character(gsub("[ |_|-]", "_", plotname)),suffix,".",figure_format)), overwrite = T)
}
}
}
filetosep <- function(df, type = "separate", names = "file_new", sep = "_"){
#type: separate or just last; name = name of the new column(s)
# for historical or cvalidation, instead turn to NA
df_hist <- df %>% filter(str_detect(file, "valid|historical"))
if(nrow(df_hist)>0){
for(n in names) df_hist[n] <- NA
df <- df %>% filter(!str_detect(file, "valid|historical"))
}
if(type == "separate") {
numsep = max(str_count(unique(df$file), pattern = sep))
if(names[1]=="file_new") name <- paste0("f",seq(numsep))
df <- df %>% mutate(file_new=file) %>% separate(file_new, names, sep = sep)
}
if (type == "last") {df$fx <- word(df$file,-1,sep = paste0("\\",sep)); setnames(df, "fx", names)}
if (type == "first") {df$fx <- word(df$file,1,sep = paste0("\\",sep)); setnames(df, "fx", names)}
if(nrow(df_hist)>0) df <- rbind(df, df_hist)
return(df)
}
ssptriple <- function(df) #Function converts a single "file" columns to three with SSP, RCP, SPA
{
scenario <- df$file
triple <- as.data.frame(matrix(0, ncol = 0, nrow = length(scenario)))
triple$ssp=substr(scenario, 1, 4)
triple$rcp=substr(scenario, 6, 9)
triple$spa=substr(scenario, 11, 14)
triple$spa <- str_replace(triple$spa, "spa[1-5]", "spaX")
#special cases for BAU
if(length(triple[str_detect(triple$rcp, "bau"),1])>0){triple[str_detect(triple$rcp, "bau"),]$rcp <- "bau"}
if(length(triple[str_detect(triple$rcp, "bau"),1])>0){triple[str_detect(triple$rcp, "bau"),]$spa <- "spa0"}
df_new <- cbind(df, triple)
df_new$file <- NULL
return(df_new)
}
readkey <- function()
{
cat ("Press [enter] to continue")
line <- readline()
}
convert_stochastic_gdx <- function(allfilesdata){
for(.file in unique(allfilesdata$file)){
tempstochdata <- subset(allfilesdata, file==.file)
if('10_1' %in% tempstochdata$t){
tempstochdata_before_resolution <- subset(tempstochdata, !grepl("_", t))
tempstochdata <- subset(tempstochdata, grepl("_", t))
tempstochdata$file <- paste0(.file, "(b",str_sub(tempstochdata$t, -1),")")
branches <- unique(str_sub(tempstochdata$t, -1))
tempstochdata$t <- str_sub(tempstochdata$t, 1,2)
for(.branch in branches){
tempstochdata_before_resolution$file <- paste0(.file, "(b",.branch,")")
tempstochdata <-rbind(tempstochdata,tempstochdata_before_resolution)}
}
if(.file==unique(allfilesdata$file)[1]){allfilesdata_stoch_converted=tempstochdata}else{allfilesdata_stoch_converted <-rbind(allfilesdata_stoch_converted,tempstochdata)}
}
return(allfilesdata_stoch_converted)
}
unit_conversion <- function(variable_name, unit="", convert=1){
#if unit is not "", keep its unit and convert using convert factor
if(unit!=""){
unit_plot <- unit; unit_conversion <- convert
}else{
#automatic unit and conversion factor
variable_description <- ""
if(variable_name %in% all_var_descriptions$name) variable_description <- all_var_descriptions$description[match(variable_name, all_var_descriptions$name)]
unit_witch <- gsub(".*\\[(.*).*", "\\1", sub(" *\\].*", "", variable_description))
if(is.na(unit_witch) | unit_witch==""){unit_witch="na"}
unit_conversion_table <-"witch_unit plot_unit conversion_factor
TWh EJ 0.0036
T$ 'billion USD' 1e3
T$/TWh $/GJ 277777.777778
GtCe GtCO2 3.67
TW GW 1e3
T$/GTon $/tCO2 272.727272727
T$/GTonC $/tCO2 272.727272727
T$/GtCeq $/tCO2 272.727272727
GTonC GtCO2 3.67
GtonC GtCO2 3.67
GtCe GtCO2 3.67
na na 1
'deg C above preindustrial levels' 'degree C' 1
"
unit_conversion_table <- read.table(textConnection(unit_conversion_table), sep="", head=T, dec=".")
unit_plot = unit_witch;unit_conversion=1 #by default, keep original
if(!is.na(match(unit_witch, unit_conversion_table$witch_unit))){
unit_plot <- as.character(unit_conversion_table$plot_unit[match(unit_witch, unit_conversion_table$witch_unit)])
unit_conversion <- unit_conversion_table$conversion_factor[match(unit_witch, unit_conversion_table$witch_unit)]}
}
#Finally, for specific variables apply custom unit and conversion
unit_conversion_user_specific <-"varname plot_unit conversion_factor
tpes EJ 0.0036
tpes_kali EJ 0.0036
ei_kali MJ/$ 1
"
unit_conversion_user_specific <- read.table(textConnection(unit_conversion_user_specific), sep="", head=T, dec=".")
if(variable_name %in% unit_conversion_user_specific$varname){
unit_plot <- unit_conversion_user_specific$plot_unit[unit_conversion_user_specific$varname==variable_name]
unit_conversion <- unit_conversion_user_specific$conversion_factor[unit_conversion_user_specific$varname==variable_name]
}
#dollar deflator conversion if other base year than 2005
usd_deflator = 1 #by default, all values in 2005 USD
#usd_deflator = 108.686/91.987 #2014 USD
#usd_deflator = 1.10774 #2010 USD
if(str_detect(unit_plot, "$") | str_detect(unit_plot, "USD")){unit_conversion <- unit_conversion * usd_deflator}
return(list(unit=unit_plot, convert=unit_conversion))
}
# default meta param (taken from scaling.R)
default_meta_param <- function(){
"parameter,value
I,sum
K,sum
Q,sum
BAU_Q,sum
COST_EN,mean
COST_FUEL,mean
I_EN,sum
K_EN,sum
MCOST_INV,mean
MCOST_FUEL,mean
Q_EN,sum
Q_IN,sum
Q_FUEL,sum
SHARE_EL,mean
COST_EMI,mean
CUM_EMI,sum
Q_EMI,sum
BAU_Q_EMI,sum
I_RD,sum
K_RD,sum
K_RD_F,sum
SPILL,mean
ABAT,sum
Q_WBIO,sum
Q_REDD,sum
MCOST_EMI,mean
I_EN_WINDOFF,sum
I_EN_WINDON,sum
K_EN_WINDOFF,sum
K_EN_WINDON,sum
Q_EN_WINDOFF,sum
Q_EN_WINDON,sum
I_EN_PV,sum
I_EN_CSP,sum
K_EN_PV,sum
K_EN_CSP,sum
Q_EN_PV,sum
Q_EN_CSP,sum
Q_EL_FLEX,sum
K_EN_GRID,sum
I_EN_GRID,sum
ADDOILCAP,sum
COST_OIL,mean
CUM_OIL,sum
I_OIL,sum
I_OUT,sum
OILCAP,sum
OILPROD,sum
Q_EMI_OUT,sum
Q_OUT,sum
RF,max
TEMP,max
TRF,max
W_EMI,max
WCUM_EMI,max
OMEGA,mean
QEL_EDV,sum
QEL_FR_EDV,sum
emi_cap,sum
ken_policy,sum
ren_share,mean
temp_valid_hadcrut4,mean
ctax,mean
carbonprice,mean
CPRICE,mean
FPRICE,mean
" -> defmap
dm <- fread(defmap)
dm[,type:="nagg"]
dm = rbind(dm,data.table(parameter=dm$parameter, type="nweight", value="gdp"))
setcolorder(dm,c("parameter", "type", "value"))
return(dm)
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/reverse_ip.R
\name{reverse_ip}
\alias{reverse_ip}
\title{Reverse IP}
\usage{
reverse_ip(ip, limit = NULL)
}
\arguments{
\item{ip}{IP address to perform the reverse IP query}
\item{limit}{Limits the size of the domain list than can appear in a
response. The limit is applied per-IP address, not for the entire
request.}
}
\value{
a \code{list} of result detais for the \code{ip}
}
\description{
The Reverse IP API provides a list of domain names that share the same
Internet host (i.e. the same IP address).
}
\note{
In rare cases, you may request an IP for which no recent Whois
record is available. If that occurs, the system will respond with
an error.
}
\examples{
reverse_ip("64.246.165.240")
}
\references{
\url{http://www.domaintools.com/resources/api-documentation/reverse-ip/}
}
| /man/reverse_ip.Rd | no_license | cneskey/domaintools | R | false | false | 887 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/reverse_ip.R
\name{reverse_ip}
\alias{reverse_ip}
\title{Reverse IP}
\usage{
reverse_ip(ip, limit = NULL)
}
\arguments{
\item{ip}{IP address to perform the reverse IP query}
\item{limit}{Limits the size of the domain list than can appear in a
response. The limit is applied per-IP address, not for the entire
request.}
}
\value{
a \code{list} of result detais for the \code{ip}
}
\description{
The Reverse IP API provides a list of domain names that share the same
Internet host (i.e. the same IP address).
}
\note{
In rare cases, you may request an IP for which no recent Whois
record is available. If that occurs, the system will respond with
an error.
}
\examples{
reverse_ip("64.246.165.240")
}
\references{
\url{http://www.domaintools.com/resources/api-documentation/reverse-ip/}
}
|
setwd("C:/Users/Cecilia/Google Drive/STAT/STAT research/Urban")
landuse.load = FALSE
## Ceci you probably don't have this?
source("code/plotcode/plotinit.R")
##------------------------------------##
### crime data
## import
crime.pre <- read.csv("data/csv/crimeupdatedcsv.csv",
sep = ",",
header = T,
stringsAsFactors = F)
## create coords
temp.point <- substr(crime.pre$SHAPE, 8, nchar(crime.pre$SHAPE))
temp.space <- regexpr(" ", temp.point)
temp.pointX <- as.numeric(substr(temp.point, 1, temp.space - 1))
temp.pointY <- as.numeric(substr(temp.point, temp.space + 1, nchar(temp.point) - 1))
## actually they have the same NAs, but just in case
rem.ind.points <- is.na(temp.pointX) | is.na(temp.pointY)
## want to make this exact same as old file. Remove crap points, keep order:
crimes.new <- crime.pre[!rem.ind.points,c('DC_DIST', 'PSA', 'UCR_GENERAL',
'TEXT_GENERAL_CODE')]
crimes.new$POINT_X <- temp.pointX[!rem.ind.points]
crimes.new$POINT_Y <- temp.pointY[!rem.ind.points]
## crime type oh yes
crimetypes <- c('murder', 'rape', 'robbery', 'assault',
'burglary', 'theft', 'motortheft',
'otherassault', 'arson', 'forgery',
'fraud', 'embezzlement', 'receivestolen',
'vandalism', 'weaponviolation',
'prostitution', 'sexoffense', 'drugviolation',
'gambling', 'familyoffense', 'dui', 'liquorlaw',
'publicdrunk', 'disorderly', 'vagrancy',
'other')
crimeframe <- data.frame(ucr = sort(unique(crime.pre$UCR_GENERAL)),
crimetype = crimetypes)
temp.crimetype = crimeframe$crimetype[match(crime.pre$UCR_GENERAL, crimeframe$ucr)]
crimes.new$crimetype = temp.crimetype[!rem.ind.points]
## there's a timedate col, but it's in 12h... so I won't use it
temp.date = as.Date(crime.pre$DISPATCH_DATE)
temp.time = crime.pre$DISPATCH_TIME
crimes.new$date = temp.date[!rem.ind.points]
temp.timedate = as.POSIXct(paste(temp.date, temp.time), tz = 'EST')
crimes.new$timedate = temp.timedate[!rem.ind.points]
## add time with any given Sunday (only so that it has a Sunday zero point
## for week plotting)
temp.time <- format(crimes.new$timedate, format = "%H:%M:%S")
given.sunday <- '2016-05-08'
crimes.new$time <- as.POSIXct(paste(given.sunday, temp.time), tz = "EST")
## add weekday
crimes.new$weekday <- weekdays(crimes.new$timedate)
##------------------------------------##
## add block, blockgroup
## CECI: this is potentially diff from what you have!
## spatial situ
pts <- SpatialPoints(crimes.new[ ,c('POINT_X', 'POINT_Y')])
## now over - lists
## you'd save time by just doing blocks and then matching blocks to
## blockgroups, but not much time, so copying this code was easier
## than writing a simple match
crimebyblock <- over(phillyblock, pts, returnList = T)
crimebyblockgroup <- over(phillyblockgroup, pts, returnList = T)
## we want to record which block a crime happens in, in the dataframe
crimes.new$block <- NA
crimes.new$blockgroup <- NA
## ## easy code, but slow:
## for(i in 1:length(crimebyblock)){
## crimes$block[crimebyblock[[i]]] = i
## }
## replace with:
crimevector <- unlist(crimebyblock)
crimeblocklength <- lapply(crimebyblock, length)
crimeind.temp <- c(0, cumsum(crimeblocklength))
for(i in 1:length(crimebyblock)){
temp.index <- (crimeind.temp[i] + 1): (crimeind.temp[i + 1])
crimes.new$block[crimevector[temp.index]] = i
Iprint(i, 10)
}
crimevector <- unlist(crimebyblockgroup)
crimeblockgrouplength <- lapply(crimebyblockgroup, length)
crimeind.temp <- c(0, cumsum(crimeblockgrouplength))
for(i in 1:length(crimebyblockgroup)){
temp.index <- (crimeind.temp[i] + 1): (crimeind.temp[i + 1])
crimes.new$blockgroup[crimevector[temp.index]] = i
Iprint(i, 10)
}
##------------------------------------##
## add year
temp.year = format(crimes.new$date, '%Y')
crimes.new$year <- as.numeric(temp.year)
## sort the whole thing
crimes.new <- crimes.new[order(crimes.new$timedate),]
## a small fraction (863 or 0.04%) aren't in Philly (according to our shapefile)
## remove them
## (note: each crime either has both a block and blockgroup, or neither)
crimes.new <- crimes.new[!is.na(crimes.new$block),]
##------------------------------------##
save(crimes.new, file = 'data/crime/crimes.new.rdata')
##------------------------------------##
## now we want to tabulate crimes by block, and blockgroup
bbg <- c('block', 'blockgroup')
temp.wide.pre <- list()
temp.wide <- list()
for(bg in bbg){
temp.wide.pre[[bg]] <- table(crimes.new[,bg], crimes.new$UCR_GENERAL)
## need to add back zero rows
## i.e. blocks with no recorded crimes
## first add them at the bottom, get the names of the nonempties
rowsused <- as.numeric(rownames(temp.wide.pre[[bg]]))
temp.wide[[bg]] <- rbind(temp.wide.pre[[bg]],
matrix(0,
nrow(phillydata[[bg]]) - length(rowsused),
ncol(temp.wide.pre[[bg]])))
## give the blank rows names
rownames(temp.wide[[bg]]) <- c(rowsused, (1:nrow(phillydata[[bg]]))[-rowsused])
## order by name
temp.wide[[bg]] <- temp.wide[[bg]][order(as.numeric(rownames(temp.wide[[bg]]))),]
temp.wide[[bg]] <- as.data.frame(temp.wide[[bg]])
}
## add crimes to name, if you want!
temp.names = names(temp.wide[['block']])
names(temp.wide[['block']]) <- paste0(crimeframe$crimetype, '.', temp.names)
temp.names = names(temp.wide[['blockgroup']])
names(temp.wide[['blockgroup']]) <- paste0(crimeframe$crimetype, '.', temp.names)
save(temp.wide, file = "data/cleaned/crime.temp.wide.rdata")
| /get_data/subsetup/setupcrime_colman.R | no_license | rzgross/Urban-project | R | false | false | 5,923 | r | setwd("C:/Users/Cecilia/Google Drive/STAT/STAT research/Urban")
landuse.load = FALSE
## Ceci you probably don't have this?
source("code/plotcode/plotinit.R")
##------------------------------------##
### crime data
## import
crime.pre <- read.csv("data/csv/crimeupdatedcsv.csv",
sep = ",",
header = T,
stringsAsFactors = F)
## create coords
temp.point <- substr(crime.pre$SHAPE, 8, nchar(crime.pre$SHAPE))
temp.space <- regexpr(" ", temp.point)
temp.pointX <- as.numeric(substr(temp.point, 1, temp.space - 1))
temp.pointY <- as.numeric(substr(temp.point, temp.space + 1, nchar(temp.point) - 1))
## actually they have the same NAs, but just in case
rem.ind.points <- is.na(temp.pointX) | is.na(temp.pointY)
## want to make this exact same as old file. Remove crap points, keep order:
crimes.new <- crime.pre[!rem.ind.points,c('DC_DIST', 'PSA', 'UCR_GENERAL',
'TEXT_GENERAL_CODE')]
crimes.new$POINT_X <- temp.pointX[!rem.ind.points]
crimes.new$POINT_Y <- temp.pointY[!rem.ind.points]
## crime type oh yes
crimetypes <- c('murder', 'rape', 'robbery', 'assault',
'burglary', 'theft', 'motortheft',
'otherassault', 'arson', 'forgery',
'fraud', 'embezzlement', 'receivestolen',
'vandalism', 'weaponviolation',
'prostitution', 'sexoffense', 'drugviolation',
'gambling', 'familyoffense', 'dui', 'liquorlaw',
'publicdrunk', 'disorderly', 'vagrancy',
'other')
crimeframe <- data.frame(ucr = sort(unique(crime.pre$UCR_GENERAL)),
crimetype = crimetypes)
temp.crimetype = crimeframe$crimetype[match(crime.pre$UCR_GENERAL, crimeframe$ucr)]
crimes.new$crimetype = temp.crimetype[!rem.ind.points]
## there's a timedate col, but it's in 12h... so I won't use it
temp.date = as.Date(crime.pre$DISPATCH_DATE)
temp.time = crime.pre$DISPATCH_TIME
crimes.new$date = temp.date[!rem.ind.points]
temp.timedate = as.POSIXct(paste(temp.date, temp.time), tz = 'EST')
crimes.new$timedate = temp.timedate[!rem.ind.points]
## add time with any given Sunday (only so that it has a Sunday zero point
## for week plotting)
temp.time <- format(crimes.new$timedate, format = "%H:%M:%S")
given.sunday <- '2016-05-08'
crimes.new$time <- as.POSIXct(paste(given.sunday, temp.time), tz = "EST")
## add weekday
crimes.new$weekday <- weekdays(crimes.new$timedate)
##------------------------------------##
## add block, blockgroup
## CECI: this is potentially diff from what you have!
## spatial situ
pts <- SpatialPoints(crimes.new[ ,c('POINT_X', 'POINT_Y')])
## now over - lists
## you'd save time by just doing blocks and then matching blocks to
## blockgroups, but not much time, so copying this code was easier
## than writing a simple match
crimebyblock <- over(phillyblock, pts, returnList = T)
crimebyblockgroup <- over(phillyblockgroup, pts, returnList = T)
## we want to record which block a crime happens in, in the dataframe
crimes.new$block <- NA
crimes.new$blockgroup <- NA
## ## easy code, but slow:
## for(i in 1:length(crimebyblock)){
## crimes$block[crimebyblock[[i]]] = i
## }
## replace with:
crimevector <- unlist(crimebyblock)
crimeblocklength <- lapply(crimebyblock, length)
crimeind.temp <- c(0, cumsum(crimeblocklength))
for(i in 1:length(crimebyblock)){
temp.index <- (crimeind.temp[i] + 1): (crimeind.temp[i + 1])
crimes.new$block[crimevector[temp.index]] = i
Iprint(i, 10)
}
crimevector <- unlist(crimebyblockgroup)
crimeblockgrouplength <- lapply(crimebyblockgroup, length)
crimeind.temp <- c(0, cumsum(crimeblockgrouplength))
for(i in 1:length(crimebyblockgroup)){
temp.index <- (crimeind.temp[i] + 1): (crimeind.temp[i + 1])
crimes.new$blockgroup[crimevector[temp.index]] = i
Iprint(i, 10)
}
##------------------------------------##
## add year
temp.year = format(crimes.new$date, '%Y')
crimes.new$year <- as.numeric(temp.year)
## sort the whole thing
crimes.new <- crimes.new[order(crimes.new$timedate),]
## a small fraction (863 or 0.04%) aren't in Philly (according to our shapefile)
## remove them
## (note: each crime either has both a block and blockgroup, or neither)
crimes.new <- crimes.new[!is.na(crimes.new$block),]
##------------------------------------##
save(crimes.new, file = 'data/crime/crimes.new.rdata')
##------------------------------------##
## now we want to tabulate crimes by block, and blockgroup
bbg <- c('block', 'blockgroup')
temp.wide.pre <- list()
temp.wide <- list()
for(bg in bbg){
temp.wide.pre[[bg]] <- table(crimes.new[,bg], crimes.new$UCR_GENERAL)
## need to add back zero rows
## i.e. blocks with no recorded crimes
## first add them at the bottom, get the names of the nonempties
rowsused <- as.numeric(rownames(temp.wide.pre[[bg]]))
temp.wide[[bg]] <- rbind(temp.wide.pre[[bg]],
matrix(0,
nrow(phillydata[[bg]]) - length(rowsused),
ncol(temp.wide.pre[[bg]])))
## give the blank rows names
rownames(temp.wide[[bg]]) <- c(rowsused, (1:nrow(phillydata[[bg]]))[-rowsused])
## order by name
temp.wide[[bg]] <- temp.wide[[bg]][order(as.numeric(rownames(temp.wide[[bg]]))),]
temp.wide[[bg]] <- as.data.frame(temp.wide[[bg]])
}
## add crimes to name, if you want!
temp.names = names(temp.wide[['block']])
names(temp.wide[['block']]) <- paste0(crimeframe$crimetype, '.', temp.names)
temp.names = names(temp.wide[['blockgroup']])
names(temp.wide[['blockgroup']]) <- paste0(crimeframe$crimetype, '.', temp.names)
save(temp.wide, file = "data/cleaned/crime.temp.wide.rdata")
|
## Ankurjha17 - Coursera - R Programming - Assignment 2
## Functions and their descriptions:
#-------------------------------------------------------------------
## - makeCacheMatrix: This function creates a special "matrix" object that can cache its inverse
#-------------------------------------------------------------------
makeCacheMatrix <- function(x = matrix()) {
z <- NULL # Define the function
set <- function(y)
{
x <<- y # Value setting
z <<- NULL # Cache Clearing
}
get <- function() x
setInverse <- function(inverse) z <<- inverse # Defining function to get the inverse
getInverse <- function() z # List with functions returns
list(
set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
#-------------------------------------------------------------------
## - cacheSolve: This function computes the inverse of the special "matrix" returned by makeCacheMatrix above
#-------------------------------------------------------------------
cacheSolve <- function(x) {
## Return a matrix that is the inverse of 'x'
z <- x$getInverse()
if(!is.null(z))
{
message("extracting cached data")
return(z)
}
data <- x$get() # Getting the value of matrix
z <- solve(data) # Inverse calculation
x$setInverse(z)
z # Prints Inversed matrix
}
#------------------------------------------------------------------- | /cachematrix.R | no_license | ankurjha17/R-_prog_assign | R | false | false | 1,389 | r | ## Ankurjha17 - Coursera - R Programming - Assignment 2
## Functions and their descriptions:
#-------------------------------------------------------------------
## - makeCacheMatrix: This function creates a special "matrix" object that can cache its inverse
#-------------------------------------------------------------------
makeCacheMatrix <- function(x = matrix()) {
z <- NULL # Define the function
set <- function(y)
{
x <<- y # Value setting
z <<- NULL # Cache Clearing
}
get <- function() x
setInverse <- function(inverse) z <<- inverse # Defining function to get the inverse
getInverse <- function() z # List with functions returns
list(
set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
#-------------------------------------------------------------------
## - cacheSolve: This function computes the inverse of the special "matrix" returned by makeCacheMatrix above
#-------------------------------------------------------------------
cacheSolve <- function(x) {
## Return a matrix that is the inverse of 'x'
z <- x$getInverse()
if(!is.null(z))
{
message("extracting cached data")
return(z)
}
data <- x$get() # Getting the value of matrix
z <- solve(data) # Inverse calculation
x$setInverse(z)
z # Prints Inversed matrix
}
#------------------------------------------------------------------- |
#' Remove observations with missing values
#'
#' `step_naomit` creates a *specification* of a recipe step that
#' will remove observations (rows of data) if they contain `NA`
#' or `NaN` values.
#'
#' @param recipe A recipe object. The step will be added to the sequence of
#' operations for this recipe.
#' @param ... One or more selector functions to choose which
#' variables will be used to remove observations containing `NA` or `NaN`
#' values. See [selections()] for more details.
#' @param role Unused, include for consistency with other steps.
#' @param trained A logical to indicate if the quantities for preprocessing
#' have been estimated. Again included for consistency.
#' @param columns A character string of variable names that will
#' be populated (eventually) by the `terms` argument.
#' @param id A character string that is unique to this step to identify it.
#' @param skip A logical. Should the step be skipped when the
#' recipe is baked by [bake.recipe()]? While all operations are baked
#' when [prep.recipe()] is run, some operations may not be able to be
#' conducted on new data (e.g. processing the outcome variable(s)).
#' Care should be taken when using `skip = FALSE`; in most instances that
#' affect the rows of the data being predicted, this step probably should not
#' be applied.
#'
#' @rdname step_naomit
#' @return An updated version of `recipe` with the
#' new step added to the sequence of existing steps (if any).
#' @export
#'
#' @examples
#'
#' recipe(Ozone ~ ., data = airquality) %>%
#' step_naomit(Solar.R) %>%
#' prep(airquality, verbose = FALSE) %>%
#' bake(new_data = NULL)
#'
#' @seealso [recipe()] [prep.recipe()] [bake.recipe()]
step_naomit <- function(recipe, ..., role = NA, trained = FALSE,
columns = NULL, skip = FALSE,
id = rand_id("naomit")) {
add_step(
recipe,
step_naomit_new(
terms = ellipse_check(...),
role = role,
trained = trained,
columns = columns,
skip = skip,
id = id
)
)
}
step_naomit_new <- function(terms, role, trained, columns, skip, id) {
step(
subclass = "naomit",
terms = terms,
role = role,
trained = trained,
columns = columns,
skip = skip,
id = id
)
}
#' @export
prep.step_naomit <- function(x, training, info = NULL, ...) {
step_naomit_new(
terms = x$terms,
role = x$role,
trained = TRUE,
columns = terms_select(x$terms, info = info),
skip = x$skip,
id = x$id
)
}
#' @export
bake.step_naomit <- function(object, new_data, ...) {
tibble::as_tibble(tidyr::drop_na(new_data, object$columns))
}
print.step_naomit <-
function(x, width = max(20, options()$width - 30), ...) {
cat("Removing rows with NA values in ", sep = "")
cat(format_selectors(x$terms, width = width))
cat("\n")
invisible(x)
}
#' @rdname step_naomit
#' @param x A `step_naomit` object.
#' @export
tidy.step_naomit <- function(x, ...) {
res <-simple_terms(x, ...)
res$id <- x$id
res
}
| /R/naomit.R | no_license | p-hunter/recipes | R | false | false | 3,049 | r | #' Remove observations with missing values
#'
#' `step_naomit` creates a *specification* of a recipe step that
#' will remove observations (rows of data) if they contain `NA`
#' or `NaN` values.
#'
#' @param recipe A recipe object. The step will be added to the sequence of
#' operations for this recipe.
#' @param ... One or more selector functions to choose which
#' variables will be used to remove observations containing `NA` or `NaN`
#' values. See [selections()] for more details.
#' @param role Unused, include for consistency with other steps.
#' @param trained A logical to indicate if the quantities for preprocessing
#' have been estimated. Again included for consistency.
#' @param columns A character string of variable names that will
#' be populated (eventually) by the `terms` argument.
#' @param id A character string that is unique to this step to identify it.
#' @param skip A logical. Should the step be skipped when the
#' recipe is baked by [bake.recipe()]? While all operations are baked
#' when [prep.recipe()] is run, some operations may not be able to be
#' conducted on new data (e.g. processing the outcome variable(s)).
#' Care should be taken when using `skip = FALSE`; in most instances that
#' affect the rows of the data being predicted, this step probably should not
#' be applied.
#'
#' @rdname step_naomit
#' @return An updated version of `recipe` with the
#' new step added to the sequence of existing steps (if any).
#' @export
#'
#' @examples
#'
#' recipe(Ozone ~ ., data = airquality) %>%
#' step_naomit(Solar.R) %>%
#' prep(airquality, verbose = FALSE) %>%
#' bake(new_data = NULL)
#'
#' @seealso [recipe()] [prep.recipe()] [bake.recipe()]
step_naomit <- function(recipe, ..., role = NA, trained = FALSE,
columns = NULL, skip = FALSE,
id = rand_id("naomit")) {
add_step(
recipe,
step_naomit_new(
terms = ellipse_check(...),
role = role,
trained = trained,
columns = columns,
skip = skip,
id = id
)
)
}
step_naomit_new <- function(terms, role, trained, columns, skip, id) {
step(
subclass = "naomit",
terms = terms,
role = role,
trained = trained,
columns = columns,
skip = skip,
id = id
)
}
#' @export
prep.step_naomit <- function(x, training, info = NULL, ...) {
step_naomit_new(
terms = x$terms,
role = x$role,
trained = TRUE,
columns = terms_select(x$terms, info = info),
skip = x$skip,
id = x$id
)
}
#' @export
bake.step_naomit <- function(object, new_data, ...) {
tibble::as_tibble(tidyr::drop_na(new_data, object$columns))
}
print.step_naomit <-
function(x, width = max(20, options()$width - 30), ...) {
cat("Removing rows with NA values in ", sep = "")
cat(format_selectors(x$terms, width = width))
cat("\n")
invisible(x)
}
#' @rdname step_naomit
#' @param x A `step_naomit` object.
#' @export
tidy.step_naomit <- function(x, ...) {
res <-simple_terms(x, ...)
res$id <- x$id
res
}
|
# The aim of this code is to prepare the mortality data
# for use in estimating smoking transition probabilities
# and for use in the simulation model
library(data.table)
library(mort.tools)
library(readxl)
library(ggplot2)
library(magrittr)
# Load the mortality data
# This is stored on the university's X drive
# after having been processed into an aggregated form on the secure heta_study virtual machine
# Point to the location of the X drive
#root_dir <- "X:/"
root_dir <- "/Volumes/Shared/"
# Load the processed mortality data
#tob_mort_data <- fread(paste0(root_dir,
# "ScHARR/PR_Mortality_data_TA/Code/model_inputs/Output/tob_death_rates_national_2019-05-06_mort.tools_1.0.0.csv"))
#saveRDS(tob_mort_data, "intermediate_data/tob_mort_data.rds")
tob_mort_data <- readRDS("intermediate_data/tob_mort_data.rds")
# Filter data
tob_mort_data <- tob_mort_data[age %in% 11:89 & !is.na(cause) , c("age",
"sex",
"imd_quintile",
"year",
"cause",
"n_deaths",
"pops"), with = F]
# For the estimation of smoking transition probabilities -----------------
# Collapse data to remove stratification by cause
tob_mort_data_trans <- tob_mort_data[, list(n_deaths = sum(n_deaths, na.rm = T),
pops = unique(pops)), by = c("age", "sex", "imd_quintile", "year")]
# Recalculate the central death rates
tob_mort_data_trans[ , mx := n_deaths / pops]
# Remove variables not needed
tob_mort_data_trans[ , `:=`(n_deaths = NULL, pops = NULL)]
# Sort data
setorderv(tob_mort_data_trans, c("age", "year", "sex", "imd_quintile"), c(1, 1, 1, 1))
# Save the data for use in estimating smoking transition probabilities
saveRDS(tob_mort_data_trans, "intermediate_data/tob_mort_data_trans.rds")
rm(tob_mort_data_trans)
gc()
# For the esimulation model -----------------
# Conduct a forecast of cause-specific mortality rates
# Load the paramaters that control the smoothing and forecast methods for each cause
# these parameters have been tuned for each cause so that they produce a plausible looking forecast
params <- read_xlsx("tools/tobacco mortality forecasting parameters.xlsx") %>% setDT
# Create mx column
tob_mort_data[ , mx_cause := n_deaths / pops]
# Run the forecast
# This produces a cause-specific forecast and an all-cause forecast
# It writes a large folder of cause-specific diagnostics to the project folder
cforecast <- mort.tools::CombinedForecast(
data = tob_mort_data,
forecast_params = params,
n_years = 2100 - 2016 # time horizon - jumpoff year
)
# Grab the cause-specific forecast
tob_mort_data_cause <- copy(cforecast$mx_data_cause)
# Change variable names
setnames(tob_mort_data_cause, c("cause", "mx"), c("condition", "mix"))
# Save the data for use in the simulation model
saveRDS(tob_mort_data_cause, "intermediate_data/tob_mort_data_cause.rds")
rm(tob_mort_data_cause)
gc()
| /src/15_prep_mortality.R | no_license | VictimOfMaths/smoking_intervention_analysis | R | false | false | 3,264 | r |
# The aim of this code is to prepare the mortality data
# for use in estimating smoking transition probabilities
# and for use in the simulation model
library(data.table)
library(mort.tools)
library(readxl)
library(ggplot2)
library(magrittr)
# Load the mortality data
# This is stored on the university's X drive
# after having been processed into an aggregated form on the secure heta_study virtual machine
# Point to the location of the X drive
#root_dir <- "X:/"
root_dir <- "/Volumes/Shared/"
# Load the processed mortality data
#tob_mort_data <- fread(paste0(root_dir,
# "ScHARR/PR_Mortality_data_TA/Code/model_inputs/Output/tob_death_rates_national_2019-05-06_mort.tools_1.0.0.csv"))
#saveRDS(tob_mort_data, "intermediate_data/tob_mort_data.rds")
tob_mort_data <- readRDS("intermediate_data/tob_mort_data.rds")
# Filter data
tob_mort_data <- tob_mort_data[age %in% 11:89 & !is.na(cause) , c("age",
"sex",
"imd_quintile",
"year",
"cause",
"n_deaths",
"pops"), with = F]
# For the estimation of smoking transition probabilities -----------------
# Collapse data to remove stratification by cause
tob_mort_data_trans <- tob_mort_data[, list(n_deaths = sum(n_deaths, na.rm = T),
pops = unique(pops)), by = c("age", "sex", "imd_quintile", "year")]
# Recalculate the central death rates
tob_mort_data_trans[ , mx := n_deaths / pops]
# Remove variables not needed
tob_mort_data_trans[ , `:=`(n_deaths = NULL, pops = NULL)]
# Sort data
setorderv(tob_mort_data_trans, c("age", "year", "sex", "imd_quintile"), c(1, 1, 1, 1))
# Save the data for use in estimating smoking transition probabilities
saveRDS(tob_mort_data_trans, "intermediate_data/tob_mort_data_trans.rds")
rm(tob_mort_data_trans)
gc()
# For the esimulation model -----------------
# Conduct a forecast of cause-specific mortality rates
# Load the paramaters that control the smoothing and forecast methods for each cause
# these parameters have been tuned for each cause so that they produce a plausible looking forecast
params <- read_xlsx("tools/tobacco mortality forecasting parameters.xlsx") %>% setDT
# Create mx column
tob_mort_data[ , mx_cause := n_deaths / pops]
# Run the forecast
# This produces a cause-specific forecast and an all-cause forecast
# It writes a large folder of cause-specific diagnostics to the project folder
cforecast <- mort.tools::CombinedForecast(
data = tob_mort_data,
forecast_params = params,
n_years = 2100 - 2016 # time horizon - jumpoff year
)
# Grab the cause-specific forecast
tob_mort_data_cause <- copy(cforecast$mx_data_cause)
# Change variable names
setnames(tob_mort_data_cause, c("cause", "mx"), c("condition", "mix"))
# Save the data for use in the simulation model
saveRDS(tob_mort_data_cause, "intermediate_data/tob_mort_data_cause.rds")
rm(tob_mort_data_cause)
gc()
|
#' @import purrr
#' @import stats
#' @importFrom magrittr %>%
#' @details
#' Linear Regression with Little Bag of Bootstraps
utils::globalVariables(c("."))
#' Bag of Little Boostraps Linear Regression
#' @param formula formula
#' @param data dataset of interest
#' @param m int: number of groups to split data into, used in bootstrapping process
#' @param B int: number of bootstrap samples
#' @param parallel boolean: defines whether or not to use parallelization. Note: if TRUE, run furrr::plan(multisession, worker = desired_number_of_CPUs) in console before using function.
#' @param cl int: desired number of clusters
#' @return linear regression model
#' @export
blblm <- function(formula, data, m = 10, B = 5000, parallel = FALSE, cl = NULL) {
# function layout:
# i) if not using parallelization, run blblm_under(), otherwise run blblm_under_parallel()
if (parallel == FALSE) {
blblm_under(formula, data, m = m, B = B)
}
else {
blblm_under_parallel(formula, data, clusters = cl, m = m, B = B)
}
}
blblm_under <- function(formula, data, m = 10, B = 5000) {
# function layout:
# i) split data into m approximately equal parts, store in a list called data_list
# ii) map these sub-datasets to lm_each_subsample(), store results in nested list called estimates
# iii) store estimates, and formula in a list called res
data_list <- split_data(data, m)
estimates <- map(
data_list,
~ lm_each_subsample(formula = formula, data = ., n = nrow(data), B = B)
)
res <- list(estimates = estimates, formula = formula)
class(res) <- "blblm"
invisible(res)
}
blblm_under_parallel <- function(formula, data, clusters, m = 10, B = 5000) {
# function layout:
# i) split data into m approximately equal parts, store in a list called data_list
# ii) initiate clusters
# iii) use parallel Lapply to map these sub-datasets to lm_each_subsample(),
# store results in nested list called estimates
# iv) stop clusters
# v) store estimates, and formula in a list called res
data_list <- split_data(data, m)
cl <- makeCluster(clusters)
estimates <- parLapply(cl, data_list,
function(data, formula, n, B) {
lm_each_subsample(formula, data = data, n = n, B = B)
},
formula = formula, n = nrow(data), B = B
)
stopCluster(cl)
res <- list(estimates = estimates, formula = formula)
class(res) <- "blblm"
invisible(res)
}
split_data <- function(data, m) {
# function layout:
# i) sample m integers from the interval [1, number of rows in dataset]
# ii) split dataset based off these indexes
idx <- sample.int(m, nrow(data), replace = TRUE)
data %>% split(idx)
}
lm_each_subsample <- function(formula, data, n, B) {
# function layout:
# i) replicate lm_each_boot() B times
replicate(B, lm_each_boot(formula, data, n), simplify = FALSE)
}
lm_each_boot <- function(formula, data, n) {
freqs <- rmultinom(1, n, rep(1, nrow(data)))
lm1(formula, data, freqs)
}
lm1 <- function(formula, data, freqs) {
# function layout:
# i) set to current environmnet
# ii) use glm() to fit a linear regression model to data
# iii) store coefficients and sigma in a list
environment(formula) <- environment()
fit <- lm(formula, data, weights = freqs)
list(coef = blbcoef(fit), sigma = blbsigma(fit))
}
blbcoef <- function(fit) {
coef(fit)
}
blbsigma <- function(fit) {
p <- fit$rank
y <- model.extract(fit$model, "response")
e <- fitted(fit) - y
w <- fit$weights
sqrt(sum(w * (e^2)) / (sum(w) - p))
}
#' Print
#' @param x blblm object
#' @param ... unused arguments
#' @return prints model
#' @export
#' @method print blblm
print.blblm <- function(x, ...) {
cat("blblm model:", capture.output(x$formula))
cat("\n")
}
#' Standard Deviation
#' @param object blb_logreg object
#' @param confidence boolean: confidence interval
#' @param level int: confidence level
#' @param ... unused arguments
#' @return standard deviation
#' @export
#' @method sigma blblm
sigma.blblm <- function(object, confidence = FALSE, level = 0.95, ...) {
#function layout:
# i) obtain estimates
# ii) obtain average sigma
# iii) obstain confidence interval
est <- object$estimates
sigma <- mean(map_dbl(est, ~ mean(map_dbl(., "sigma"))))
if (confidence) {
alpha <- 1 - 0.95
limits <- est %>%
map_mean(~ quantile(map_dbl(., "sigma"), c(alpha / 2, 1 - alpha / 2))) %>%
set_names(NULL)
return(c(sigma = sigma, lwr = limits[1], upr = limits[2]))
} else {
return(sigma)
}
}
#' Coefficients
#' @param object blb_logreg object
#' @param ... unused arguments
#' @return coefficients of parameters in linear regression model
#' @export
#' @method coef blblm
coef.blblm <- function(object, ...) {
# function layout
# i) obtain estimates
# ii) obtain means of coefficients
est <- object$estimates
map_mean(est, ~ map_cbind(., "coef") %>% rowMeans())
}
#' Confidence interval of Mean
#' @param object blblm object
#' @param parm string/character vector: parameter(s) of interest
#' @param level numeric: confidence level
#' @param ... unused arguments
#' @return confidence interval of parameters of interest, all parameters by default
#' @export
#' @method confint blblm
confint.blblm <- function(object, parm = NULL, level = 0.95, ...) {
# function layout:
# i) obtain parameters
# ii) use map_rbind to obtain a confidence interval for parameters
# iii) label confidence intervals with corresponding parameter
if (is.null(parm)) {
parm <- attr(terms(object$formula), "term.labels")
}
alpha <- 1 - level
est <- object$estimates
out <- map_rbind(parm, function(p) {
map_mean(est, ~ map_dbl(., list("coef", p)) %>% quantile(c(alpha / 2, 1 - alpha / 2)))
})
if (is.vector(out)) {
out <- as.matrix(t(out))
}
dimnames(out)[[1]] <- parm
out
}
#' Predict
#' @param object blblm object
#' @param new_data data set of interest
#' @param confidence boolean: confidence interval for predicted value
#' @param level numeric: confidence level
#' @param ... unused arguments
#' @return vector of predicted values of response variable
#' @export
#' @method predict blblm
predict.blblm <- function(object, new_data, confidence = FALSE, level = 0.95, ...) {
# function layout:
# i)obtain estimates
# ii) obtain design matrix of predictors
# iii) obtain predictions; include confidence intervals if confidence = TRUE in arguments
est <- object$estimates
X <- model.matrix(reformulate(attr(terms(object$formula), "term.labels")), new_data)
if (confidence) {
map_mean(est, ~ map_cbind(., ~ X %*% .$coef) %>%
apply(1, mean_lwr_upr, level = level) %>%
t())
} else {
map_mean(est, ~ map_cbind(., ~ X %*% .$coef) %>% rowMeans())
}
}
# map then find upper and lower .025 and .975 quantiles from the mean
mean_lwr_upr <- function(x, level = 0.95) {
alpha <- 1 - level
c(fit = mean(x), quantile(x, c(alpha / 2, 1 - alpha / 2)) %>% set_names(c("lwr", "upr")))
}
# map then find mean of results
map_mean <- function(.x, .f, ...) {
(map(.x, .f, ...) %>% reduce(`+`)) / length(.x)
}
# map and column bind
map_cbind <- function(.x, .f, ...) {
map(.x, .f, ...) %>% reduce(cbind)
}
# map and rowbind
map_rbind <- function(.x, .f, ...) {
map(.x, .f, ...) %>% reduce(rbind)
}
| /R/blblm.R | permissive | jwebb1197/Bag_of_Little_Bootstraps | R | false | false | 7,270 | r | #' @import purrr
#' @import stats
#' @importFrom magrittr %>%
#' @details
#' Linear Regression with Little Bag of Bootstraps
utils::globalVariables(c("."))
#' Bag of Little Boostraps Linear Regression
#' @param formula formula
#' @param data dataset of interest
#' @param m int: number of groups to split data into, used in bootstrapping process
#' @param B int: number of bootstrap samples
#' @param parallel boolean: defines whether or not to use parallelization. Note: if TRUE, run furrr::plan(multisession, worker = desired_number_of_CPUs) in console before using function.
#' @param cl int: desired number of clusters
#' @return linear regression model
#' @export
blblm <- function(formula, data, m = 10, B = 5000, parallel = FALSE, cl = NULL) {
# function layout:
# i) if not using parallelization, run blblm_under(), otherwise run blblm_under_parallel()
if (parallel == FALSE) {
blblm_under(formula, data, m = m, B = B)
}
else {
blblm_under_parallel(formula, data, clusters = cl, m = m, B = B)
}
}
blblm_under <- function(formula, data, m = 10, B = 5000) {
# function layout:
# i) split data into m approximately equal parts, store in a list called data_list
# ii) map these sub-datasets to lm_each_subsample(), store results in nested list called estimates
# iii) store estimates, and formula in a list called res
data_list <- split_data(data, m)
estimates <- map(
data_list,
~ lm_each_subsample(formula = formula, data = ., n = nrow(data), B = B)
)
res <- list(estimates = estimates, formula = formula)
class(res) <- "blblm"
invisible(res)
}
blblm_under_parallel <- function(formula, data, clusters, m = 10, B = 5000) {
# function layout:
# i) split data into m approximately equal parts, store in a list called data_list
# ii) initiate clusters
# iii) use parallel Lapply to map these sub-datasets to lm_each_subsample(),
# store results in nested list called estimates
# iv) stop clusters
# v) store estimates, and formula in a list called res
data_list <- split_data(data, m)
cl <- makeCluster(clusters)
estimates <- parLapply(cl, data_list,
function(data, formula, n, B) {
lm_each_subsample(formula, data = data, n = n, B = B)
},
formula = formula, n = nrow(data), B = B
)
stopCluster(cl)
res <- list(estimates = estimates, formula = formula)
class(res) <- "blblm"
invisible(res)
}
split_data <- function(data, m) {
# function layout:
# i) sample m integers from the interval [1, number of rows in dataset]
# ii) split dataset based off these indexes
idx <- sample.int(m, nrow(data), replace = TRUE)
data %>% split(idx)
}
lm_each_subsample <- function(formula, data, n, B) {
# function layout:
# i) replicate lm_each_boot() B times
replicate(B, lm_each_boot(formula, data, n), simplify = FALSE)
}
lm_each_boot <- function(formula, data, n) {
freqs <- rmultinom(1, n, rep(1, nrow(data)))
lm1(formula, data, freqs)
}
lm1 <- function(formula, data, freqs) {
# function layout:
# i) set to current environmnet
# ii) use glm() to fit a linear regression model to data
# iii) store coefficients and sigma in a list
environment(formula) <- environment()
fit <- lm(formula, data, weights = freqs)
list(coef = blbcoef(fit), sigma = blbsigma(fit))
}
blbcoef <- function(fit) {
coef(fit)
}
blbsigma <- function(fit) {
p <- fit$rank
y <- model.extract(fit$model, "response")
e <- fitted(fit) - y
w <- fit$weights
sqrt(sum(w * (e^2)) / (sum(w) - p))
}
#' Print
#' @param x blblm object
#' @param ... unused arguments
#' @return prints model
#' @export
#' @method print blblm
print.blblm <- function(x, ...) {
cat("blblm model:", capture.output(x$formula))
cat("\n")
}
#' Standard Deviation
#' @param object blb_logreg object
#' @param confidence boolean: confidence interval
#' @param level int: confidence level
#' @param ... unused arguments
#' @return standard deviation
#' @export
#' @method sigma blblm
sigma.blblm <- function(object, confidence = FALSE, level = 0.95, ...) {
#function layout:
# i) obtain estimates
# ii) obtain average sigma
# iii) obstain confidence interval
est <- object$estimates
sigma <- mean(map_dbl(est, ~ mean(map_dbl(., "sigma"))))
if (confidence) {
alpha <- 1 - 0.95
limits <- est %>%
map_mean(~ quantile(map_dbl(., "sigma"), c(alpha / 2, 1 - alpha / 2))) %>%
set_names(NULL)
return(c(sigma = sigma, lwr = limits[1], upr = limits[2]))
} else {
return(sigma)
}
}
#' Coefficients
#' @param object blb_logreg object
#' @param ... unused arguments
#' @return coefficients of parameters in linear regression model
#' @export
#' @method coef blblm
coef.blblm <- function(object, ...) {
# function layout
# i) obtain estimates
# ii) obtain means of coefficients
est <- object$estimates
map_mean(est, ~ map_cbind(., "coef") %>% rowMeans())
}
#' Confidence interval of Mean
#' @param object blblm object
#' @param parm string/character vector: parameter(s) of interest
#' @param level numeric: confidence level
#' @param ... unused arguments
#' @return confidence interval of parameters of interest, all parameters by default
#' @export
#' @method confint blblm
confint.blblm <- function(object, parm = NULL, level = 0.95, ...) {
# function layout:
# i) obtain parameters
# ii) use map_rbind to obtain a confidence interval for parameters
# iii) label confidence intervals with corresponding parameter
if (is.null(parm)) {
parm <- attr(terms(object$formula), "term.labels")
}
alpha <- 1 - level
est <- object$estimates
out <- map_rbind(parm, function(p) {
map_mean(est, ~ map_dbl(., list("coef", p)) %>% quantile(c(alpha / 2, 1 - alpha / 2)))
})
if (is.vector(out)) {
out <- as.matrix(t(out))
}
dimnames(out)[[1]] <- parm
out
}
#' Predict
#' @param object blblm object
#' @param new_data data set of interest
#' @param confidence boolean: confidence interval for predicted value
#' @param level numeric: confidence level
#' @param ... unused arguments
#' @return vector of predicted values of response variable
#' @export
#' @method predict blblm
predict.blblm <- function(object, new_data, confidence = FALSE, level = 0.95, ...) {
# function layout:
# i)obtain estimates
# ii) obtain design matrix of predictors
# iii) obtain predictions; include confidence intervals if confidence = TRUE in arguments
est <- object$estimates
X <- model.matrix(reformulate(attr(terms(object$formula), "term.labels")), new_data)
if (confidence) {
map_mean(est, ~ map_cbind(., ~ X %*% .$coef) %>%
apply(1, mean_lwr_upr, level = level) %>%
t())
} else {
map_mean(est, ~ map_cbind(., ~ X %*% .$coef) %>% rowMeans())
}
}
# map then find upper and lower .025 and .975 quantiles from the mean
mean_lwr_upr <- function(x, level = 0.95) {
alpha <- 1 - level
c(fit = mean(x), quantile(x, c(alpha / 2, 1 - alpha / 2)) %>% set_names(c("lwr", "upr")))
}
# map then find mean of results
map_mean <- function(.x, .f, ...) {
(map(.x, .f, ...) %>% reduce(`+`)) / length(.x)
}
# map and column bind
map_cbind <- function(.x, .f, ...) {
map(.x, .f, ...) %>% reduce(cbind)
}
# map and rowbind
map_rbind <- function(.x, .f, ...) {
map(.x, .f, ...) %>% reduce(rbind)
}
|
#Examine LST file
library(tidyverse)
library(HOAL)
library(magrittr)
dir <- "D:/PhD/HOAL/modelling/HGS_model/HOAL_PEST_PP_3zn_run"
lst <- readLines(con = list.files(path = dir, pattern = "(\\.lst)$", full.names = T))
ln_sol <- grep("(SOLUTION FOR TIMESTEP)", x = lst)
ln_wb <- grep("(FLUID BALANCE, TIME:)", x = lst)
df_lst <- data.frame()
for (ii in 1:length(ln_sol)){
st <- ln_sol[ii] + 2 # first line of data (Global target time)
ed <- ln_wb[ii]- 3 # last line of data (multiplier)
# following line numbers could be a vector of multiple values
gt <- st - 1 + grep("(Global target time)", x = lst[st:ed]) # global target time
iter <- st - 1 + grep("(Summary of nonlinear iteration)", x = lst[st:ed]) # start of iteration summary
mp <- st - 1 + grep("(Dt multiplier)", x = lst[st:ed]) # start of multiplier table
# number of times the time step was cut
dtcut = length(iter) - 1
# Global target time
t_target <- as.numeric(stringr::str_extract(lst[tail(gt,1)], "(?<=Global target time:).+(?=\\()"))
# Time stepping information
df_tstep <-
trimws(lst[tail(iter, 1) - 3]) %>%
strsplit(., split = "\\s+",fixed = F) %>%
unlist() %>%
.[1:4] %>%
as.numeric() %>%
t() %>%
data.frame() %>%
`colnames<-` (unlist(strsplit(trimws(lst[tail(gt, 1) + 1]), split = "\\s+",fixed = F)))
# Summary of nonlinear iteration
df_iter <-
trimws(lst[tail(mp,1) - 2]) %>%
strsplit(split = "\\s+", fixed = F) %>%
unlist() %>%
t() %>%
data.frame(stringsAsFactors = F) %>%
`colnames<-` (unlist(strsplit(trimws(lst[tail(iter,1) + 1]), split = "\\s+",fixed = F)))
df_iter[,1:9] <- sapply(df_iter[, 1:9], as.numeric)
# Multiplier table
df_mp <-
# read table lines and split them by white spaces
strsplit(trimws(lst[tail(mp,1):(ed-1)]), split = "\\s{2,}",fixed = F) %>%
unlist() %>%
matrix(nrow = (ed - tail(mp,1)), byrow = T) %>%
data.frame(stringsAsFactors = F) %>%
# unlist and convert to data frame without the second line (just "======")
`colnames<-` (c("Variable", "Max.change", "Target.change", "Dt.multiplier", "At.node")) %>%
.[-2:-1,] %>%
# make a column out of every combination of Variable and other statistics
mutate(Time = df_tstep[,2]) %>%
pivot_longer(cols = c("Max.change", "Target.change", "Dt.multiplier", "At.node"),
names_to = "Stat") %>%
pivot_wider(id_cols = Time, names_from = c(Variable, Stat), values_from = value)
df_mp[,] <- sapply(df_mp[,], as.numeric)
# Fluid (water) balance
#time_wb <- trimws(stringr::str_extract(lst[ln_wb[ii]], "(?<=TIME:).+"))
# filling the dataframe
df_lst <- rbind(df_lst, data.frame(T_target = t_target,
dT_cut = dtcut,
df_tstep,
df_iter,
df_mp, stringsAsFactors = F))
}
# Time step
ggplot(df_lst,
aes(Time, log10(delta_t))) +
geom_path()
# Max change
ggplot(df_lst,
aes(Time)) +
geom_path(aes(y = Head_Max.change, color = "Head")) +
geom_path(aes(y = 100*Water.depth_Max.change, color = "Water Depth")) +
geom_path(aes(y = 100*Saturation_Max.change, color = "Saturation"))
# cutting timesteps
ggplot(df_lst,
aes(Time, dT_cut))+
geom_path()
df_wb <- read.tecplot(list.files(path = dir, pattern = "(water_balance.dat)", full.names = T))
left_join(df_lst, df_wb) %>%
pivot_longer(cols = c(`outflow-boundary`,ET_PET, ET_AET, rain, dT_cut, delta_t)) %>%
ggplot(., aes(Time, value, color = name)) +
geom_path()+
facet_wrap(name~., scales = "free_y", ncol = 1)
# water balance error
left_join(df_lst, df_wb) %>%
ggplot(., aes(Time)) +
geom_path(aes(y =`Error percent`))+
geom_path(aes(y = -ET_AET/1000), color = "red") +
geom_path(aes(y = rain/1000), color = "blue")
# comparison to observed
MW.d <- (to_timestep(HOAL.data::HOAL.Discharge[, "MW"],"days"))
fortify.zoo(MW.d["2016/2018"]) %>%
mutate(Time = as.double(Index - as.POSIXct("1950-01-01", tz = "Etc/GMT-1"), unit = "days"),
MW = MW/1000*86400) %>%
full_join(.,df_wb) %T>% View %>%
ggplot(., aes(Time)) +
geom_path(aes(y=-`outflow-boundary`, color = "Sim")) +
geom_path(aes(y = MW, color = "Obs"))
MW.d <- (to_timestep(HOAL.data::HOAL.Discharge[, "MW"],"days"))
fortify.zoo(MW.d["2013/2018"]) %>%
mutate(Time = as.double(Index - as.POSIXct("2013-01-01", tz = "Etc/GMT-1"), unit = "days"),
MW = MW/1000*86400) %>%
full_join(.,wb_mp) %>%
ggplot(., aes(Time)) +
geom_path(aes(y=-`outflow-boundary`, color = "Sim")) +
geom_path(aes(y = MW, color = "Obs"))
fortify.zoo(MW.d["2013"]) %>%
mutate(Time = as.double(Index - as.POSIXct("2013-01-01", tz = "Etc/GMT-1"), unit = "days"),
MW = MW/1000*86400) %>%
full_join(.,wb_mp) %$%
hydroGOF::gof(sim = -`outflow-boundary`, obs = MW, na.rm=T)
| /HGSlst.R | no_license | pavlopavlin/RHGS | R | false | false | 4,928 | r | #Examine LST file
library(tidyverse)
library(HOAL)
library(magrittr)
dir <- "D:/PhD/HOAL/modelling/HGS_model/HOAL_PEST_PP_3zn_run"
lst <- readLines(con = list.files(path = dir, pattern = "(\\.lst)$", full.names = T))
ln_sol <- grep("(SOLUTION FOR TIMESTEP)", x = lst)
ln_wb <- grep("(FLUID BALANCE, TIME:)", x = lst)
df_lst <- data.frame()
for (ii in 1:length(ln_sol)){
st <- ln_sol[ii] + 2 # first line of data (Global target time)
ed <- ln_wb[ii]- 3 # last line of data (multiplier)
# following line numbers could be a vector of multiple values
gt <- st - 1 + grep("(Global target time)", x = lst[st:ed]) # global target time
iter <- st - 1 + grep("(Summary of nonlinear iteration)", x = lst[st:ed]) # start of iteration summary
mp <- st - 1 + grep("(Dt multiplier)", x = lst[st:ed]) # start of multiplier table
# number of times the time step was cut
dtcut = length(iter) - 1
# Global target time
t_target <- as.numeric(stringr::str_extract(lst[tail(gt,1)], "(?<=Global target time:).+(?=\\()"))
# Time stepping information
df_tstep <-
trimws(lst[tail(iter, 1) - 3]) %>%
strsplit(., split = "\\s+",fixed = F) %>%
unlist() %>%
.[1:4] %>%
as.numeric() %>%
t() %>%
data.frame() %>%
`colnames<-` (unlist(strsplit(trimws(lst[tail(gt, 1) + 1]), split = "\\s+",fixed = F)))
# Summary of nonlinear iteration
df_iter <-
trimws(lst[tail(mp,1) - 2]) %>%
strsplit(split = "\\s+", fixed = F) %>%
unlist() %>%
t() %>%
data.frame(stringsAsFactors = F) %>%
`colnames<-` (unlist(strsplit(trimws(lst[tail(iter,1) + 1]), split = "\\s+",fixed = F)))
df_iter[,1:9] <- sapply(df_iter[, 1:9], as.numeric)
# Multiplier table
df_mp <-
# read table lines and split them by white spaces
strsplit(trimws(lst[tail(mp,1):(ed-1)]), split = "\\s{2,}",fixed = F) %>%
unlist() %>%
matrix(nrow = (ed - tail(mp,1)), byrow = T) %>%
data.frame(stringsAsFactors = F) %>%
# unlist and convert to data frame without the second line (just "======")
`colnames<-` (c("Variable", "Max.change", "Target.change", "Dt.multiplier", "At.node")) %>%
.[-2:-1,] %>%
# make a column out of every combination of Variable and other statistics
mutate(Time = df_tstep[,2]) %>%
pivot_longer(cols = c("Max.change", "Target.change", "Dt.multiplier", "At.node"),
names_to = "Stat") %>%
pivot_wider(id_cols = Time, names_from = c(Variable, Stat), values_from = value)
df_mp[,] <- sapply(df_mp[,], as.numeric)
# Fluid (water) balance
#time_wb <- trimws(stringr::str_extract(lst[ln_wb[ii]], "(?<=TIME:).+"))
# filling the dataframe
df_lst <- rbind(df_lst, data.frame(T_target = t_target,
dT_cut = dtcut,
df_tstep,
df_iter,
df_mp, stringsAsFactors = F))
}
# Time step
ggplot(df_lst,
aes(Time, log10(delta_t))) +
geom_path()
# Max change
ggplot(df_lst,
aes(Time)) +
geom_path(aes(y = Head_Max.change, color = "Head")) +
geom_path(aes(y = 100*Water.depth_Max.change, color = "Water Depth")) +
geom_path(aes(y = 100*Saturation_Max.change, color = "Saturation"))
# cutting timesteps
ggplot(df_lst,
aes(Time, dT_cut))+
geom_path()
df_wb <- read.tecplot(list.files(path = dir, pattern = "(water_balance.dat)", full.names = T))
left_join(df_lst, df_wb) %>%
pivot_longer(cols = c(`outflow-boundary`,ET_PET, ET_AET, rain, dT_cut, delta_t)) %>%
ggplot(., aes(Time, value, color = name)) +
geom_path()+
facet_wrap(name~., scales = "free_y", ncol = 1)
# water balance error
left_join(df_lst, df_wb) %>%
ggplot(., aes(Time)) +
geom_path(aes(y =`Error percent`))+
geom_path(aes(y = -ET_AET/1000), color = "red") +
geom_path(aes(y = rain/1000), color = "blue")
# comparison to observed
MW.d <- (to_timestep(HOAL.data::HOAL.Discharge[, "MW"],"days"))
fortify.zoo(MW.d["2016/2018"]) %>%
mutate(Time = as.double(Index - as.POSIXct("1950-01-01", tz = "Etc/GMT-1"), unit = "days"),
MW = MW/1000*86400) %>%
full_join(.,df_wb) %T>% View %>%
ggplot(., aes(Time)) +
geom_path(aes(y=-`outflow-boundary`, color = "Sim")) +
geom_path(aes(y = MW, color = "Obs"))
MW.d <- (to_timestep(HOAL.data::HOAL.Discharge[, "MW"],"days"))
fortify.zoo(MW.d["2013/2018"]) %>%
mutate(Time = as.double(Index - as.POSIXct("2013-01-01", tz = "Etc/GMT-1"), unit = "days"),
MW = MW/1000*86400) %>%
full_join(.,wb_mp) %>%
ggplot(., aes(Time)) +
geom_path(aes(y=-`outflow-boundary`, color = "Sim")) +
geom_path(aes(y = MW, color = "Obs"))
fortify.zoo(MW.d["2013"]) %>%
mutate(Time = as.double(Index - as.POSIXct("2013-01-01", tz = "Etc/GMT-1"), unit = "days"),
MW = MW/1000*86400) %>%
full_join(.,wb_mp) %$%
hydroGOF::gof(sim = -`outflow-boundary`, obs = MW, na.rm=T)
|
# Effect of high-wage job growth on housing demand on the San Francisco Bay Area
# Analysis using the Longitudinal Employer-Household Dynamics Origin-Destination
# Employment Statistics (LODES) data 2008-2013 and the American Community Survey
#
# Alex Karner, alex.karner@coa.gatech.edu
# Chris Benner, cbenner@ucsc.edu
#
# Purpose:
# Create and visualize metrics of internal capture and commute distance.
#
# Output:
# Visualizations of internal capture and commute distance by year and jurisdiction.
# Uncomment this line by removing the '#' in front..
# setwd("C:/My Directory/LEHD")
# .. in order to set your current working directory.
# setwd("D:/Dropbox/Work/high-wage job growth")
options(scipen = 999) # Supress scientific notation so we can see census geocodes
library(plyr); library(dplyr)
library(R.utils)
library(ggmap) # To query driving distances
library(reshape2)
library(grid) # unit() functionality
library(rgdal) # interface for the Geospatial Abstraction Library
library(rgeos)
library(scales)
# Load previously saved dataset
# containing block level flows for California
load("data/BayAreaLEHD_od_FINAL.RData")
load("data/MTCandCountySkims_Google.RData")
# Calculate weighted mean commute distance by place of work
for(year in years.to.download) {
this.year <- eval(parse(text = paste0("od.", year, ".place")))
# Remove ", CA" suffix to facilitate join
this.year$h_plc <- gsub(", CA", "", this.year$h_plc)
this.year$w_plc <- gsub(", CA", "", this.year$w_plc)
# Join skims to table
this.year <- left_join(this.year, bay.area.od, by = c("h_plc" = "o_place", "w_plc" = "d_place"))
places.to.skim$h_plc <- as.character(places.to.skim$h_plc)
places.to.skim$w_cty <- as.integer(places.to.skim$w_cty)
# The remaining places are counties of residence outside the Bay Area
this.year <- left_join(this.year, select(places.to.skim, h_plc, w_cty, miles), by = c("h_plc" = "h_plc",
"w_cty" = "w_cty"))
# Update the skim with the county data
this.year$skim <- ifelse(is.na(this.year$skim), this.year$miles, this.year$skim)
this.year <- mutate(this.year,
s000_d = S000 * skim,
sa01_d = SA01 * skim,
sa02_d = SA02 * skim,
sa03_d = SA03 * skim,
se01_d = SE01 * skim,
se02_d = SE02 * skim,
se03_d = SE03 * skim,
t1t2_d = t1t2 * skim,
si01_d = SI01 * skim,
si02_d = SI02 * skim,
si03_d = SI03 * skim)
this.year <- group_by(this.year, w_plc)
this.year <- summarize(this.year,
S000 = sum(s000_d) / sum(S000),
SA01 = sum(sa01_d) / sum(SA01), SA02 = sum(sa02_d) / sum(SA02), SA03 = sum(sa03_d) / sum(SA03),
SE01 = sum(se01_d) / sum(SE01), SE02 = sum(se02_d) / sum(SE02), SE03 = sum(se03_d) / sum(SE03),
SI01 = sum(si01_d) / sum(SI01), SI02 = sum(si02_d) / sum(SI02), SI03 = sum(si03_d) / sum(SI03),
t1t2 = sum(t1t2_d) / sum(t1t2))
# Add year identifier
this.year$year <- year
this.year <- melt(this.year, id = c("w_plc", "year"))
assign(paste0("commutes.", year), this.year)
rm(this.year)
}
# Plot results
# identify top 25 workplaces for visualization
load("data/Top25Places.RData")
places <- gsub(", CA", "", places)
commutes.plot <- rbind(commutes.2013, commutes.2012, commutes.2011, commutes.2010, commutes.2009, commutes.2008)
commutes.plot <- filter(commutes.plot, w_plc %in% places)
commutes.plot$w_plc <- gsub(" city", "", commutes.plot$w_plc)
commutes.plot$w_plc[commutes.plot$w_plc == "Unincorporated Sonoma county"] <- "Uninc. Sonoma County"
# Income groups
inc <- ggplot(filter(commutes.plot, variable %in% c("SE01", "SE02", "SE03")),
aes(x = year, y = value, col = variable)) + geom_point() + geom_line(aes(group = variable)) +
scale_color_brewer(palette = "Dark2",
labels = c("tier 1 jobs", "tier 2 jobs", "tier 3 jobs")) +
xlab(NULL) + ylab("distance for workers commuting into city (miles)") +
theme_bw() +
theme(plot.title = element_text(face = "bold"), legend.title = element_blank(),
axis.text.x = element_text(angle=45, vjust = 0.5),
legend.position = "bottom")
inc + facet_wrap(~ w_plc)
ggsave("output_2013/Commute_Income.png", width = 14, height = 15, scale = 0.6)
# industry categories
ind <- ggplot(filter(commutes.plot, variable %in% c("SI01", "SI02", "SI03")),
aes(x = year, y = value, col = variable)) + geom_point() + geom_line(aes(group = variable)) +
scale_color_brewer(palette = "Set1",
labels = c("goods producing", "trade, transport, utilities", "other services")) +
xlab(NULL) + ylab("distance for workers commuting into city (miles)") +
theme_bw() +
theme(plot.title = element_text(face = "bold"), legend.title = element_blank(),
axis.text.x = element_text(angle=45, vjust = 0.5),
legend.position = "bottom")
ind + facet_wrap(~ w_plc)
ggsave("output_2013/Commute_Industry.png", width = 14, height = 15, scale = 0.6)
# Age
age <- ggplot(filter(commutes.plot, variable %in% c("SA01", "SA02", "SA03")),
aes(x = year, y = value, col = variable)) + geom_point() + geom_line(aes(group = variable)) +
scale_color_brewer(palette = "Set1",
labels = c("< 29", "30-54", "> 55")) +
xlab(NULL) + ylab("distance for workers commuting into city (miles)") +
theme_bw() +
theme(plot.title = element_text(face = "bold"), legend.title = element_blank(),
axis.text.x = element_text(angle=45, vjust = 0.5),
legend.position = "bottom")
age + facet_wrap(~ w_plc)
ggsave("output_2013/Commute_age.png", width = 14, height = 15, scale = 0.6)
# Internal capture -------------------------------------------------------------
# Use the total number of jobs as the denominator
# We want jurisdictions to be punished for having too little housing
for(year in years.to.download) {
this.year <- eval(parse(text = paste0("od.", year, ".place")))
this.year.total <- group_by(this.year, w_plc)
# Summary by home location
this.year.total <- summarize(this.year.total,
S000 = sum(S000),
SA01 = sum(SA01), SA02 = sum(SA02), SA03 = sum(SA03),
SE01 = sum(SE01), SE02 = sum(SE02), SE03 = sum(SE03),
SI01 = sum(SI01), SI02 = sum(SI02), SI03 = sum(SI03))
# Extract internal flows
this.year.internal <- this.year[this.year$h_plc == this.year$w_plc, ]
names(this.year.internal)[4:13] <- paste0("i_", names(this.year.internal)[4:13])
this.year.internal <- left_join(this.year.internal, this.year.total, by = c("w_plc" = "w_plc"))
names(this.year.internal)[15:24] <- paste0("t_", names(this.year.internal)[15:24])
internal.cap <- cbind("plc" = this.year.internal$w_plc, this.year.internal[, 4:13] / this.year.internal[, 15:24])
internal.cap$year <- year
internal.cap <- melt(internal.cap, id = c("plc", "year"))
assign(paste0("internal.cap.", year), internal.cap)
rm(internal.cap)
}
internal.plot <- rbind(internal.cap.2013, internal.cap.2012,
internal.cap.2011, internal.cap.2010, internal.cap.2009, internal.cap.2008)
internal.plot <- filter(internal.plot, plc %in% places)
internal.plot$plc <- gsub(", CA", "", internal.plot$plc)
internal.plot$plc[internal.plot$plc == "Unincorporated Sonoma county"] <- "Uninc. Sonoma County"
internal.plot$plc <- gsub(" city", "", internal.plot$plc)
# Income groups
inc <- ggplot(filter(internal.plot, variable %in% c("i_SE01", "i_SE02", "i_SE03")),
aes(x = year, y = value, col = variable)) + geom_point() + geom_line(aes(group = variable)) +
scale_color_brewer(palette = "Dark2",
labels = c("tier 1 jobs", "tier 2 jobs", "tier 3 jobs")) +
xlab(NULL) + ylab("internal capture (%)") +
theme_bw() +
theme(plot.title = element_text(face = "bold"), legend.title = element_blank(),
axis.text.x = element_text(angle=45, vjust = 0.5),
legend.position = "bottom")
inc + facet_wrap(~ plc)
ggsave("output_2013/Internal capture_Income.png", width = 14, height = 15, scale = 0.6)
# Industry groups
ind <- ggplot(filter(internal.plot, variable %in% c("i_SI01", "i_SI02", "i_SI03")),
aes(x = year, y = value, col = variable)) + geom_point() + geom_line(aes(group = variable)) +
scale_color_brewer(palette = "Set1",
labels = c("goods producing", "trade, transport, utilities", "other services")) +
xlab(NULL) + ylab("internal capture (%)") +
theme_bw() +
theme(plot.title = element_text(face = "bold"), legend.title = element_blank(),
axis.text.x = element_text(angle=45, vjust = 0.5),
legend.position = "bottom")
ind + facet_wrap(~ plc)
ggsave("output_2013/Internal capture_Industry.png", width = 14, height = 15, scale = 0.6)
# Age
age <- ggplot(filter(internal.plot, variable %in% c("i_SA01", "i_SA02", "i_SA03")),
aes(x = year, y = value, col = variable)) + geom_point() + geom_line(aes(group = variable)) +
scale_color_brewer(palette = "Set1",
labels = c("< 29", "30 - 54", "> 55")) +
xlab(NULL) + ylab("internal capture (%)") +
theme_bw() +
theme(plot.title = element_text(face = "bold"), legend.title = element_blank(),
axis.text.x = element_text(angle=45, vjust = 0.5),
legend.position = "bottom")
age + facet_wrap(~ plc)
ggsave("output_2013/Internal capture_age.png", width = 14, height = 15, scale = 0.6) | /04_Analyze flow data.R | no_license | aakarner/job-growth-affordability | R | false | false | 9,006 | r | # Effect of high-wage job growth on housing demand on the San Francisco Bay Area
# Analysis using the Longitudinal Employer-Household Dynamics Origin-Destination
# Employment Statistics (LODES) data 2008-2013 and the American Community Survey
#
# Alex Karner, alex.karner@coa.gatech.edu
# Chris Benner, cbenner@ucsc.edu
#
# Purpose:
# Create and visualize metrics of internal capture and commute distance.
#
# Output:
# Visualizations of internal capture and commute distance by year and jurisdiction.
# Uncomment this line by removing the '#' in front..
# setwd("C:/My Directory/LEHD")
# .. in order to set your current working directory.
# setwd("D:/Dropbox/Work/high-wage job growth")
options(scipen = 999) # Supress scientific notation so we can see census geocodes
library(plyr); library(dplyr)
library(R.utils)
library(ggmap) # To query driving distances
library(reshape2)
library(grid) # unit() functionality
library(rgdal) # interface for the Geospatial Abstraction Library
library(rgeos)
library(scales)
# Load previously saved dataset
# containing block level flows for California
load("data/BayAreaLEHD_od_FINAL.RData")
load("data/MTCandCountySkims_Google.RData")
# Calculate weighted mean commute distance by place of work
for(year in years.to.download) {
this.year <- eval(parse(text = paste0("od.", year, ".place")))
# Remove ", CA" suffix to facilitate join
this.year$h_plc <- gsub(", CA", "", this.year$h_plc)
this.year$w_plc <- gsub(", CA", "", this.year$w_plc)
# Join skims to table
this.year <- left_join(this.year, bay.area.od, by = c("h_plc" = "o_place", "w_plc" = "d_place"))
places.to.skim$h_plc <- as.character(places.to.skim$h_plc)
places.to.skim$w_cty <- as.integer(places.to.skim$w_cty)
# The remaining places are counties of residence outside the Bay Area
this.year <- left_join(this.year, select(places.to.skim, h_plc, w_cty, miles), by = c("h_plc" = "h_plc",
"w_cty" = "w_cty"))
# Update the skim with the county data
this.year$skim <- ifelse(is.na(this.year$skim), this.year$miles, this.year$skim)
this.year <- mutate(this.year,
s000_d = S000 * skim,
sa01_d = SA01 * skim,
sa02_d = SA02 * skim,
sa03_d = SA03 * skim,
se01_d = SE01 * skim,
se02_d = SE02 * skim,
se03_d = SE03 * skim,
t1t2_d = t1t2 * skim,
si01_d = SI01 * skim,
si02_d = SI02 * skim,
si03_d = SI03 * skim)
this.year <- group_by(this.year, w_plc)
this.year <- summarize(this.year,
S000 = sum(s000_d) / sum(S000),
SA01 = sum(sa01_d) / sum(SA01), SA02 = sum(sa02_d) / sum(SA02), SA03 = sum(sa03_d) / sum(SA03),
SE01 = sum(se01_d) / sum(SE01), SE02 = sum(se02_d) / sum(SE02), SE03 = sum(se03_d) / sum(SE03),
SI01 = sum(si01_d) / sum(SI01), SI02 = sum(si02_d) / sum(SI02), SI03 = sum(si03_d) / sum(SI03),
t1t2 = sum(t1t2_d) / sum(t1t2))
# Add year identifier
this.year$year <- year
this.year <- melt(this.year, id = c("w_plc", "year"))
assign(paste0("commutes.", year), this.year)
rm(this.year)
}
# Plot results
# identify top 25 workplaces for visualization
load("data/Top25Places.RData")
places <- gsub(", CA", "", places)
commutes.plot <- rbind(commutes.2013, commutes.2012, commutes.2011, commutes.2010, commutes.2009, commutes.2008)
commutes.plot <- filter(commutes.plot, w_plc %in% places)
commutes.plot$w_plc <- gsub(" city", "", commutes.plot$w_plc)
commutes.plot$w_plc[commutes.plot$w_plc == "Unincorporated Sonoma county"] <- "Uninc. Sonoma County"
# Income groups
inc <- ggplot(filter(commutes.plot, variable %in% c("SE01", "SE02", "SE03")),
aes(x = year, y = value, col = variable)) + geom_point() + geom_line(aes(group = variable)) +
scale_color_brewer(palette = "Dark2",
labels = c("tier 1 jobs", "tier 2 jobs", "tier 3 jobs")) +
xlab(NULL) + ylab("distance for workers commuting into city (miles)") +
theme_bw() +
theme(plot.title = element_text(face = "bold"), legend.title = element_blank(),
axis.text.x = element_text(angle=45, vjust = 0.5),
legend.position = "bottom")
inc + facet_wrap(~ w_plc)
ggsave("output_2013/Commute_Income.png", width = 14, height = 15, scale = 0.6)
# industry categories
ind <- ggplot(filter(commutes.plot, variable %in% c("SI01", "SI02", "SI03")),
aes(x = year, y = value, col = variable)) + geom_point() + geom_line(aes(group = variable)) +
scale_color_brewer(palette = "Set1",
labels = c("goods producing", "trade, transport, utilities", "other services")) +
xlab(NULL) + ylab("distance for workers commuting into city (miles)") +
theme_bw() +
theme(plot.title = element_text(face = "bold"), legend.title = element_blank(),
axis.text.x = element_text(angle=45, vjust = 0.5),
legend.position = "bottom")
ind + facet_wrap(~ w_plc)
ggsave("output_2013/Commute_Industry.png", width = 14, height = 15, scale = 0.6)
# Age
age <- ggplot(filter(commutes.plot, variable %in% c("SA01", "SA02", "SA03")),
aes(x = year, y = value, col = variable)) + geom_point() + geom_line(aes(group = variable)) +
scale_color_brewer(palette = "Set1",
labels = c("< 29", "30-54", "> 55")) +
xlab(NULL) + ylab("distance for workers commuting into city (miles)") +
theme_bw() +
theme(plot.title = element_text(face = "bold"), legend.title = element_blank(),
axis.text.x = element_text(angle=45, vjust = 0.5),
legend.position = "bottom")
age + facet_wrap(~ w_plc)
ggsave("output_2013/Commute_age.png", width = 14, height = 15, scale = 0.6)
# Internal capture -------------------------------------------------------------
# Use the total number of jobs as the denominator
# We want jurisdictions to be punished for having too little housing
for(year in years.to.download) {
this.year <- eval(parse(text = paste0("od.", year, ".place")))
this.year.total <- group_by(this.year, w_plc)
# Summary by home location
this.year.total <- summarize(this.year.total,
S000 = sum(S000),
SA01 = sum(SA01), SA02 = sum(SA02), SA03 = sum(SA03),
SE01 = sum(SE01), SE02 = sum(SE02), SE03 = sum(SE03),
SI01 = sum(SI01), SI02 = sum(SI02), SI03 = sum(SI03))
# Extract internal flows
this.year.internal <- this.year[this.year$h_plc == this.year$w_plc, ]
names(this.year.internal)[4:13] <- paste0("i_", names(this.year.internal)[4:13])
this.year.internal <- left_join(this.year.internal, this.year.total, by = c("w_plc" = "w_plc"))
names(this.year.internal)[15:24] <- paste0("t_", names(this.year.internal)[15:24])
internal.cap <- cbind("plc" = this.year.internal$w_plc, this.year.internal[, 4:13] / this.year.internal[, 15:24])
internal.cap$year <- year
internal.cap <- melt(internal.cap, id = c("plc", "year"))
assign(paste0("internal.cap.", year), internal.cap)
rm(internal.cap)
}
internal.plot <- rbind(internal.cap.2013, internal.cap.2012,
internal.cap.2011, internal.cap.2010, internal.cap.2009, internal.cap.2008)
internal.plot <- filter(internal.plot, plc %in% places)
internal.plot$plc <- gsub(", CA", "", internal.plot$plc)
internal.plot$plc[internal.plot$plc == "Unincorporated Sonoma county"] <- "Uninc. Sonoma County"
internal.plot$plc <- gsub(" city", "", internal.plot$plc)
# Income groups
inc <- ggplot(filter(internal.plot, variable %in% c("i_SE01", "i_SE02", "i_SE03")),
aes(x = year, y = value, col = variable)) + geom_point() + geom_line(aes(group = variable)) +
scale_color_brewer(palette = "Dark2",
labels = c("tier 1 jobs", "tier 2 jobs", "tier 3 jobs")) +
xlab(NULL) + ylab("internal capture (%)") +
theme_bw() +
theme(plot.title = element_text(face = "bold"), legend.title = element_blank(),
axis.text.x = element_text(angle=45, vjust = 0.5),
legend.position = "bottom")
inc + facet_wrap(~ plc)
ggsave("output_2013/Internal capture_Income.png", width = 14, height = 15, scale = 0.6)
# Industry groups
ind <- ggplot(filter(internal.plot, variable %in% c("i_SI01", "i_SI02", "i_SI03")),
aes(x = year, y = value, col = variable)) + geom_point() + geom_line(aes(group = variable)) +
scale_color_brewer(palette = "Set1",
labels = c("goods producing", "trade, transport, utilities", "other services")) +
xlab(NULL) + ylab("internal capture (%)") +
theme_bw() +
theme(plot.title = element_text(face = "bold"), legend.title = element_blank(),
axis.text.x = element_text(angle=45, vjust = 0.5),
legend.position = "bottom")
ind + facet_wrap(~ plc)
ggsave("output_2013/Internal capture_Industry.png", width = 14, height = 15, scale = 0.6)
# Age
age <- ggplot(filter(internal.plot, variable %in% c("i_SA01", "i_SA02", "i_SA03")),
aes(x = year, y = value, col = variable)) + geom_point() + geom_line(aes(group = variable)) +
scale_color_brewer(palette = "Set1",
labels = c("< 29", "30 - 54", "> 55")) +
xlab(NULL) + ylab("internal capture (%)") +
theme_bw() +
theme(plot.title = element_text(face = "bold"), legend.title = element_blank(),
axis.text.x = element_text(angle=45, vjust = 0.5),
legend.position = "bottom")
age + facet_wrap(~ plc)
ggsave("output_2013/Internal capture_age.png", width = 14, height = 15, scale = 0.6) |
#Import Data and subset for the test set:
all.cesd <- fread("cesd_item_level.csv", header = T)
all.cesd<- unique(all.cesd, by='userid')
names(all.cesd)
cesd <- all.cesd[,-(2:7)]
cesd[cesd == -1] <- NA
cesd[cesd == 0] <- NA
cesd.noNA <- na.omit(cesd)
sapply(cesd.noNA[,q1:q20], function(x) table(is.na(x)))
cor(cesd.noNA[,-1])
reverse.cols<- c(5, 9, 13,17)
cesd.noNA[ ,reverse.cols] = 5 - cesd.noNA[ ,..reverse.cols]
cesd.noNA$score.cesd<- rowSums(cesd.noNA[,-1])
final.cesd<- cesd.noNA[,c("userid", "score.cesd")]
final.cesd<- unique(final.cesd, by= 'userid')
dim(final.cesd)
#Correlation between swl and cesd variable:
#cesd.swl<- merge(final.cesd, train.swl, by="userid")
#cor.cesd_swl<-cor(cesd.swl$score.cesd, cesd.swl$swl)
######---------------------------------------------------------########
cesd.big5<- unique(merge(big5.100Q,final.cesd, by="userid"))
dim(cesd.big5)
#Take 800 for the test set:
set.seed(1)
size_test <- 800
test_ind <- sample(seq_len(nrow(cesd.big5)),
size = size_test)
test.cesd <- data.table(cesd.big5[test_ind, ])
train.cesd <- data.table(cesd.big5[-test_ind, ])
dim(train.cesd)
cesd.train.100<- train.cesd[,-c("userid", "O","C","E","A","N")]
#Regression model:
cesd.model<- lm(score.cesd~.,cesd.train.100)
coef(cesd.model)
length( coef(cesd.model))
summary(cesd.model)
| /cesd.R | no_license | GalBenY/Predictive-Five | R | false | false | 1,333 | r |
#Import Data and subset for the test set:
all.cesd <- fread("cesd_item_level.csv", header = T)
all.cesd<- unique(all.cesd, by='userid')
names(all.cesd)
cesd <- all.cesd[,-(2:7)]
cesd[cesd == -1] <- NA
cesd[cesd == 0] <- NA
cesd.noNA <- na.omit(cesd)
sapply(cesd.noNA[,q1:q20], function(x) table(is.na(x)))
cor(cesd.noNA[,-1])
reverse.cols<- c(5, 9, 13,17)
cesd.noNA[ ,reverse.cols] = 5 - cesd.noNA[ ,..reverse.cols]
cesd.noNA$score.cesd<- rowSums(cesd.noNA[,-1])
final.cesd<- cesd.noNA[,c("userid", "score.cesd")]
final.cesd<- unique(final.cesd, by= 'userid')
dim(final.cesd)
#Correlation between swl and cesd variable:
#cesd.swl<- merge(final.cesd, train.swl, by="userid")
#cor.cesd_swl<-cor(cesd.swl$score.cesd, cesd.swl$swl)
######---------------------------------------------------------########
cesd.big5<- unique(merge(big5.100Q,final.cesd, by="userid"))
dim(cesd.big5)
#Take 800 for the test set:
set.seed(1)
size_test <- 800
test_ind <- sample(seq_len(nrow(cesd.big5)),
size = size_test)
test.cesd <- data.table(cesd.big5[test_ind, ])
train.cesd <- data.table(cesd.big5[-test_ind, ])
dim(train.cesd)
cesd.train.100<- train.cesd[,-c("userid", "O","C","E","A","N")]
#Regression model:
cesd.model<- lm(score.cesd~.,cesd.train.100)
coef(cesd.model)
length( coef(cesd.model))
summary(cesd.model)
|
test_that("test praise works", {
expect_identical(praise("Jingxu"),
glue::glue("You're the best, Jingxu!"))
expect_identical(praise("Jingxu", ";"),
glue::glue("You're the best, Jingxu;"))
expect_error(praise())
})
| /tests/testthat/test-praise.R | permissive | JingxuJ/praiseme | R | false | false | 258 | r | test_that("test praise works", {
expect_identical(praise("Jingxu"),
glue::glue("You're the best, Jingxu!"))
expect_identical(praise("Jingxu", ";"),
glue::glue("You're the best, Jingxu;"))
expect_error(praise())
})
|
context("Dictionary")
test_that("Dictionary", {
Foo = R6::R6Class("Foo", public = list(x=0, id=NULL, initialize = function(x) self$x = x), cloneable = TRUE)
d = Dictionary$new()
expect_identical(d$keys(), character(0L))
f1 = Foo
f2 = Foo
expect_false(d$has("f1"))
d$add("f1", f1)
expect_identical(d$keys(), "f1")
expect_true(d$has("f1"))
f1c = d$get("f1", x = 1)
expect_list(d$mget("f1", x = 1), names = "unique", len = 1, types = "Foo")
d$add("f2", f2)
expect_set_equal(d$keys(), c("f1", "f2"))
expect_list(d$mget(c("f1", "f2"), x = 1), names = "unique", len = 2, types = "Foo")
d$remove("f2")
expect_set_equal(d$keys(), "f1")
expect_false(d$has("f2"))
expect_data_table(as.data.table(d), nrow = 1L)
})
test_that("Dictionary: clone works", {
t1 = mlr_tasks$get("iris")
expect_task(t1)
t2 = mlr_tasks$get("iris")
expect_task(t2)
expect_different_address(t1, t2)
})
test_that("$keys(pattern) works", {
expect_subset(mlr_learners$keys("classif"), mlr_learners$keys(), empty.ok = FALSE)
})
test_that("Dictionaries are populated", {
expect_dictionary(mlr_tasks, "Task", min.items = 1L)
expect_dictionary(mlr_learners, "Learner", min.items = 1L)
expect_dictionary(mlr_resamplings, "Resampling", min.items = 1L)
expect_dictionary(mlr_measures, "Measure", min.items = 1L)
expect_data_table(as.data.table(mlr_tasks), nrow = length(mlr_tasks$keys()))
expect_data_table(as.data.table(mlr_learners), nrow = length(mlr_learners$keys()))
expect_data_table(as.data.table(mlr_resamplings), nrow = length(mlr_resamplings$keys()))
expect_data_table(as.data.table(mlr_measures), nrow = length(mlr_measures$keys()))
})
test_that("Error when a package containing the dataset is not installed", {
test_task = DictionaryTask$new()
test_task$add("missing_package", function() {
b = DataBackendDataTableVirtualKey$new(data = load_dataset("xxx", "missing_package_123"))
TaskClassif$new("missing_package", b, target = "x", positive = "y")
})
expect_error(test_task$get("missing_package"))
})
| /tests/testthat/test_Dictionary.R | permissive | Pinto-P/mlr3 | R | false | false | 2,057 | r | context("Dictionary")
test_that("Dictionary", {
Foo = R6::R6Class("Foo", public = list(x=0, id=NULL, initialize = function(x) self$x = x), cloneable = TRUE)
d = Dictionary$new()
expect_identical(d$keys(), character(0L))
f1 = Foo
f2 = Foo
expect_false(d$has("f1"))
d$add("f1", f1)
expect_identical(d$keys(), "f1")
expect_true(d$has("f1"))
f1c = d$get("f1", x = 1)
expect_list(d$mget("f1", x = 1), names = "unique", len = 1, types = "Foo")
d$add("f2", f2)
expect_set_equal(d$keys(), c("f1", "f2"))
expect_list(d$mget(c("f1", "f2"), x = 1), names = "unique", len = 2, types = "Foo")
d$remove("f2")
expect_set_equal(d$keys(), "f1")
expect_false(d$has("f2"))
expect_data_table(as.data.table(d), nrow = 1L)
})
test_that("Dictionary: clone works", {
t1 = mlr_tasks$get("iris")
expect_task(t1)
t2 = mlr_tasks$get("iris")
expect_task(t2)
expect_different_address(t1, t2)
})
test_that("$keys(pattern) works", {
expect_subset(mlr_learners$keys("classif"), mlr_learners$keys(), empty.ok = FALSE)
})
test_that("Dictionaries are populated", {
expect_dictionary(mlr_tasks, "Task", min.items = 1L)
expect_dictionary(mlr_learners, "Learner", min.items = 1L)
expect_dictionary(mlr_resamplings, "Resampling", min.items = 1L)
expect_dictionary(mlr_measures, "Measure", min.items = 1L)
expect_data_table(as.data.table(mlr_tasks), nrow = length(mlr_tasks$keys()))
expect_data_table(as.data.table(mlr_learners), nrow = length(mlr_learners$keys()))
expect_data_table(as.data.table(mlr_resamplings), nrow = length(mlr_resamplings$keys()))
expect_data_table(as.data.table(mlr_measures), nrow = length(mlr_measures$keys()))
})
test_that("Error when a package containing the dataset is not installed", {
test_task = DictionaryTask$new()
test_task$add("missing_package", function() {
b = DataBackendDataTableVirtualKey$new(data = load_dataset("xxx", "missing_package_123"))
TaskClassif$new("missing_package", b, target = "x", positive = "y")
})
expect_error(test_task$get("missing_package"))
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/idealised-pathway-cost.R
\name{calcIdealisedTestCosts}
\alias{calcIdealisedTestCosts}
\title{Calculate NICE Idealised Test Costs}
\usage{
calcIdealisedTestCosts(data, BAL = FALSE, Dosanjh3TB = FALSE,
return = "matrix")
}
\arguments{
\item{data}{IDEA data set (perhaps bootstrap sampled or imputed)}
\item{return}{Return a list of matrix.}
}
\value{
Total test cost statistics
}
\description{
Uses the pathway frequencies for the NICE idealised pathway (chest X-ray, culture, smear, clinical judgement)
and whether the observed Dosanjh final diagnosis is what would be expected from the idealised pathway.
Assume that three sputum samples are taken so at least 3 culture and smear tests.
Inclusion of BAL would further include this procedure cost and the additional culture and smear tests.
Where it is not (usually when the 4 measurement are insufficient to discriminate) then we use
the raw data and just sum the cost of the observed tests. The subjectivity come in when deciding
exactly what the expected diagnosis is. In most cases it is clear.
We could remove patients who do not have all 3 tests so there may be some bias introduced,
but can use an imputed completed sample (prefered).
}
\details{
This is the function used in \code{\link{boot.calcIdealisedTestCosts}}.
}
| /man/calcIdealisedTestCosts.Rd | no_license | n8thangreen/IDEAdectree | R | false | true | 1,358 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/idealised-pathway-cost.R
\name{calcIdealisedTestCosts}
\alias{calcIdealisedTestCosts}
\title{Calculate NICE Idealised Test Costs}
\usage{
calcIdealisedTestCosts(data, BAL = FALSE, Dosanjh3TB = FALSE,
return = "matrix")
}
\arguments{
\item{data}{IDEA data set (perhaps bootstrap sampled or imputed)}
\item{return}{Return a list of matrix.}
}
\value{
Total test cost statistics
}
\description{
Uses the pathway frequencies for the NICE idealised pathway (chest X-ray, culture, smear, clinical judgement)
and whether the observed Dosanjh final diagnosis is what would be expected from the idealised pathway.
Assume that three sputum samples are taken so at least 3 culture and smear tests.
Inclusion of BAL would further include this procedure cost and the additional culture and smear tests.
Where it is not (usually when the 4 measurement are insufficient to discriminate) then we use
the raw data and just sum the cost of the observed tests. The subjectivity come in when deciding
exactly what the expected diagnosis is. In most cases it is clear.
We could remove patients who do not have all 3 tests so there may be some bias introduced,
but can use an imputed completed sample (prefered).
}
\details{
This is the function used in \code{\link{boot.calcIdealisedTestCosts}}.
}
|
% Generated by roxygen2 (4.0.0): do not edit by hand
\name{scale_size_area}
\alias{scale_size_area}
\title{Scale area instead of radius, for size.}
\usage{
scale_size_area(..., max_size = 6)
}
\arguments{
\item{...}{Other arguments passed on to
\code{\link{continuous_scale}} to control name, limits,
breaks, labels and so forth.}
\item{max_size}{Size of largest points.}
}
\description{
When \code{scale_size_area} is used, the default behavior is to scale the
area of points to be proportional to the value.
}
\details{
Note that this controls the size scale, so it will also control
the thickness of lines. Line thickness will be proportional to the square
root of the value, which is probably undesirable in most cases.
}
| /man/scale_size_area.Rd | no_license | nietzsche1993/ggplot2 | R | false | false | 736 | rd | % Generated by roxygen2 (4.0.0): do not edit by hand
\name{scale_size_area}
\alias{scale_size_area}
\title{Scale area instead of radius, for size.}
\usage{
scale_size_area(..., max_size = 6)
}
\arguments{
\item{...}{Other arguments passed on to
\code{\link{continuous_scale}} to control name, limits,
breaks, labels and so forth.}
\item{max_size}{Size of largest points.}
}
\description{
When \code{scale_size_area} is used, the default behavior is to scale the
area of points to be proportional to the value.
}
\details{
Note that this controls the size scale, so it will also control
the thickness of lines. Line thickness will be proportional to the square
root of the value, which is probably undesirable in most cases.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{get_size_cpp}
\alias{get_size_cpp}
\title{File size}
\usage{
get_size_cpp(filename)
}
\arguments{
\item{filename}{a character string specifying the name of the file to be
processed with \code{pcadapt}.}
}
\value{
The returned value is a numeric vector of length 2.
}
\description{
\code{get_size_cpp} returns the number of genetic markers and the number of
individuals present in the data.
}
| /man/get_size_cpp.Rd | no_license | keurcien/pcadapt | R | false | true | 492 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{get_size_cpp}
\alias{get_size_cpp}
\title{File size}
\usage{
get_size_cpp(filename)
}
\arguments{
\item{filename}{a character string specifying the name of the file to be
processed with \code{pcadapt}.}
}
\value{
The returned value is a numeric vector of length 2.
}
\description{
\code{get_size_cpp} returns the number of genetic markers and the number of
individuals present in the data.
}
|
# Description:
# run goseq to identify enriched gene ontology biological processes
library(goseq)
# Repeated for ARPE-19 and MIO-M1 analyses
path = '/analysis/arpe19_miom1/results'
dir.create(file.path(path, 'goseq'), showWarnings = F)
# load gene length info
gene.length = read.csv(file.path(path, 'gene_length.csv'), row.names = 1,
stringsAsFactors = F)
# |log2FC| limit for being considered as DE
low_log2fc_cutoff = 0
# FDR cutoff for DE
FDR_cutoff = 0.05
# goseq stat. significance cutoff
goseq_stat_cutoff = 0.05
# biomart was used to extract genes in each GO BP on 2019.06.26,
# which was saved in an object named go.db
load('reference/go/go.db.RData')
GOseq_DE_genes = function(filtered.genes, gene.length.tested, de.genes, log2fc_cutoff,
de.name){
# function that runs goseq with inputs
# 1. vector of detected genes (filtered.genes)
# 2. length of detected genes (gene.length.tested)
# 3. vector of 0/1 indicating whether the gene is to be tested,
# typically DE genes (de.genes)
# goseq PWF, wallenius approximation, BH adjust p-values
pwf=nullp(DEgenes = de.genes, bias.data=gene.length.tested, plot.fit = FALSE)
# check if at least one DE gene in the db
if (sum(names(de.genes)[de.genes] %in% names(go.db))>0){
# calculate p-value, adj. p-value, separate GO ID and GO name
go.wall = goseq(pwf, gene2cat=go.db)
go.wall$adj.pvalue = p.adjust(go.wall$over_represented_pvalue, method="BH")
go.wall$proper.category = sapply(go.wall$category, function(x)
paste0('GO:',x), USE.NAMES = F)
go.wall$go.name = sapply(go.wall$proper.category, function(x)
paste0(toupper(substring(x, 12, 12)),
substring(x, 13, nchar(x))),
USE.NAMES = F)
go.wall$go.id = sapply(go.wall$proper.category, function(x)
substring(x, 1, 10), USE.NAMES = F)
# select columns of interest and rename column names
go.wall = go.wall[,c('go.name','go.id','adj.pvalue','over_represented_pvalue',
'numDEInCat','numInCat')]
colnames(go.wall) = c('go.name','go.id','adj.pvalue','pvalue',
'num.de','num.category')
# select stat. significant GO
go.wall = go.wall[order(go.wall$adj.pvalue),]
go.enriched = go.wall[go.wall$adj.pvalue<goseq_stat_cutoff,]
# write to drive
write.csv(go.enriched, file.path(path, 'goseq', de.name,
paste0('goseq_', log2fc_cutoff, '_',
go.db.name,'.csv')),
row.names = F)
}
}
Call_goseq = function(de=de, de.name=de.name){
# create comparison folder
dir.create(file.path(path, 'goseq', de.name), showWarnings = F)
# parse all filtered genes and their length
filtered.genes = de$id
gene.length.tested = sapply(filtered.genes, function(x)
gene.length$length[rownames(gene.length)==x],
USE.NAMES = F)
# run goseq
de.genes = setNames(de$FDR<FDR_cutoff & abs(de$logFC)>low_log2fc_cutoff, filtered.genes)
GOseq_DE_genes(filtered.genes = filtered.genes,
gene.length.tested = gene.length.tested,
de.genes = de.genes,
log2fc_cutoff = paste0('log2FC=',low_log2fc_cutoff),
de.name = de.name)
}
# loop goseq through each comparison
file.path = list.files(file.path(path, 'edgeR'), pattern = '*.csv$', full.names = T)
file.name = list.files(file.path(path, 'edgeR'), pattern = '*.csv$', full.names = F)
file.name = tools::file_path_sans_ext(file.name)
file.name = sapply(file.name, function(x) substring(x, 10),
USE.NAMES = F)
for (i in seq(file.name)){
de = read.csv(file.path[i], stringsAsFactors = F)
de.name = file.name[i]
Call_goseq(de=de, de.name=de.name)
}
| /goseq.R | no_license | zshao1/RSG_H2O2 | R | false | false | 3,730 | r | # Description:
# run goseq to identify enriched gene ontology biological processes
library(goseq)
# Repeated for ARPE-19 and MIO-M1 analyses
path = '/analysis/arpe19_miom1/results'
dir.create(file.path(path, 'goseq'), showWarnings = F)
# load gene length info
gene.length = read.csv(file.path(path, 'gene_length.csv'), row.names = 1,
stringsAsFactors = F)
# |log2FC| limit for being considered as DE
low_log2fc_cutoff = 0
# FDR cutoff for DE
FDR_cutoff = 0.05
# goseq stat. significance cutoff
goseq_stat_cutoff = 0.05
# biomart was used to extract genes in each GO BP on 2019.06.26,
# which was saved in an object named go.db
load('reference/go/go.db.RData')
GOseq_DE_genes = function(filtered.genes, gene.length.tested, de.genes, log2fc_cutoff,
de.name){
# function that runs goseq with inputs
# 1. vector of detected genes (filtered.genes)
# 2. length of detected genes (gene.length.tested)
# 3. vector of 0/1 indicating whether the gene is to be tested,
# typically DE genes (de.genes)
# goseq PWF, wallenius approximation, BH adjust p-values
pwf=nullp(DEgenes = de.genes, bias.data=gene.length.tested, plot.fit = FALSE)
# check if at least one DE gene in the db
if (sum(names(de.genes)[de.genes] %in% names(go.db))>0){
# calculate p-value, adj. p-value, separate GO ID and GO name
go.wall = goseq(pwf, gene2cat=go.db)
go.wall$adj.pvalue = p.adjust(go.wall$over_represented_pvalue, method="BH")
go.wall$proper.category = sapply(go.wall$category, function(x)
paste0('GO:',x), USE.NAMES = F)
go.wall$go.name = sapply(go.wall$proper.category, function(x)
paste0(toupper(substring(x, 12, 12)),
substring(x, 13, nchar(x))),
USE.NAMES = F)
go.wall$go.id = sapply(go.wall$proper.category, function(x)
substring(x, 1, 10), USE.NAMES = F)
# select columns of interest and rename column names
go.wall = go.wall[,c('go.name','go.id','adj.pvalue','over_represented_pvalue',
'numDEInCat','numInCat')]
colnames(go.wall) = c('go.name','go.id','adj.pvalue','pvalue',
'num.de','num.category')
# select stat. significant GO
go.wall = go.wall[order(go.wall$adj.pvalue),]
go.enriched = go.wall[go.wall$adj.pvalue<goseq_stat_cutoff,]
# write to drive
write.csv(go.enriched, file.path(path, 'goseq', de.name,
paste0('goseq_', log2fc_cutoff, '_',
go.db.name,'.csv')),
row.names = F)
}
}
Call_goseq = function(de=de, de.name=de.name){
# create comparison folder
dir.create(file.path(path, 'goseq', de.name), showWarnings = F)
# parse all filtered genes and their length
filtered.genes = de$id
gene.length.tested = sapply(filtered.genes, function(x)
gene.length$length[rownames(gene.length)==x],
USE.NAMES = F)
# run goseq
de.genes = setNames(de$FDR<FDR_cutoff & abs(de$logFC)>low_log2fc_cutoff, filtered.genes)
GOseq_DE_genes(filtered.genes = filtered.genes,
gene.length.tested = gene.length.tested,
de.genes = de.genes,
log2fc_cutoff = paste0('log2FC=',low_log2fc_cutoff),
de.name = de.name)
}
# loop goseq through each comparison
file.path = list.files(file.path(path, 'edgeR'), pattern = '*.csv$', full.names = T)
file.name = list.files(file.path(path, 'edgeR'), pattern = '*.csv$', full.names = F)
file.name = tools::file_path_sans_ext(file.name)
file.name = sapply(file.name, function(x) substring(x, 10),
USE.NAMES = F)
for (i in seq(file.name)){
de = read.csv(file.path[i], stringsAsFactors = F)
de.name = file.name[i]
Call_goseq(de=de, de.name=de.name)
}
|
theme_depth <- function(...) {
theme_minimal() +
theme(
text = element_text(family = "Hanalei", color = "#5e644f"),
axis.line = element_blank(),
axis.text.x = element_blank(),
axis.text.y = element_blank(),
axis.ticks = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
plot.background = element_rect(fill = "#f5f5f2", color = NA),
panel.background = element_rect(fill = "#f5f5f2", color = NA),
legend.background = element_rect(fill = "#f5f5f2", color = NA),
panel.border = element_blank(),
legend.text = element_text(family = "Hanalei Fill", colour="#A77B56", size=12),
legend.title = element_text(family = "Hanalei Fill", colour="#A77B56"),
legend.position="bottom",
plot.title = element_text(family = "Hanalei", color = "#5e644f", size = 40, hjust = 0.49, vjust = -2),
...
)
} | /R/themes/depth_theme.R | permissive | moldach/mahalo | R | false | false | 1,356 | r | theme_depth <- function(...) {
theme_minimal() +
theme(
text = element_text(family = "Hanalei", color = "#5e644f"),
axis.line = element_blank(),
axis.text.x = element_blank(),
axis.text.y = element_blank(),
axis.ticks = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
plot.background = element_rect(fill = "#f5f5f2", color = NA),
panel.background = element_rect(fill = "#f5f5f2", color = NA),
legend.background = element_rect(fill = "#f5f5f2", color = NA),
panel.border = element_blank(),
legend.text = element_text(family = "Hanalei Fill", colour="#A77B56", size=12),
legend.title = element_text(family = "Hanalei Fill", colour="#A77B56"),
legend.position="bottom",
plot.title = element_text(family = "Hanalei", color = "#5e644f", size = 40, hjust = 0.49, vjust = -2),
...
)
} |
#' Check model function
#'
#' Generates a synthetic time series with the given properties and compares its sample statistics with the theoretically expected values.
#'
#' @inheritParams generateTS
#' @param plot logical - plot the result? (if TRUE, saved as an attribute 'plot')
#' @param returnTS logical - return timeseries generated by the check function? (if TRUE, saved as an attribute 'TS')
#'
#' @import moments reshape2
#' @export
#'
#' @keywords internal
#'
#' @examples
#'
#' library(CoSMoS)
#'
#' \donttest{
#' ## check your model
#' checkmodel(margdist = 'ggamma',
#' margarg = list(shape1 = 3,
#' shape2 = .5,
#' scale = 10),
#' n = 100000,
#' p = 30,
#' p0 = 0,
#' acsvalue = acs(id = 'weibull',
#' t = 0:30,
#' shape = .5,
#' scale = 10))
#' }
#'
#' \dontshow{
#' checkmodel(margdist = 'ggamma',
#' margarg = list(shape1 = 3,
#' shape2 = .5,
#' scale = 10),
#' n = 1000,
#' p = 30,
#' p0 = 0,
#' acsvalue = acs(id = 'weibull',
#' t = 0:30,
#' shape = .5,
#' scale = 10))
#' }
#'
checkmodel <- function(margdist, margarg, n, p = NULL, p0 = .6, acsvalue, TSn = 1, distbounds = c(-Inf, Inf), plot = FALSE, returnTS = FALSE) {
suppressWarnings({x <- generateTS(margdist = margdist,
margarg = margarg,
n = n, p = p, p0 = p0, TSn = TSn,
acsvalue = acsvalue)
a <- sapply(x, function(y) acf(y, plot = F)$acf)
out <- data.frame(mean = c(popmean(margdist,
margarg,
distbounds = distbounds,
p0 = p0),
sapply(x, mean)),
sd = c(popsd(margdist,
margarg,
distbounds = distbounds,
p0 = p0),
sapply(x, sd)),
skew = c(popskew(margdist,
margarg,
distbounds = distbounds,
p0 = p0),
sapply(x, moments::skewness)),
p0 = c(p0,
sapply(x, function(y) length(which(y == 0))/length(y))),
acf_t1 = c(acsvalue[2],
a[2,]),
acf_t2 = c(acsvalue[3],
a[3,]),
acf_t3 = c(acsvalue[4],
a[4,]))
row.names(out) <- c('expected', paste0('simulation', 1:TSn))})
out <- round(out, 2)
if(plot) {
p <- checkplot(x = out,
margdist = margdist,
margarg = margarg,
p0 = p0)
} else {
p <- NA
}
if(!returnTS) {
x <- NA
}
structure(.Data = out,
plot = p,
TS = x)
}
#' Check generated timeseries
#'
#' Compares generated time series sample statistics with the theoretically expected values.
#'
#' @param TS generated timeseries
#' @inheritParams checkmodel
#'
#' @export
#'
#' @examples
#'
#' library(CoSMoS)
#'
#' ## check your generated timeseries
#' x <- generateTS(margdist = 'burrXII',
#' margarg = list(scale = 1,
#' shape1 = .75,
#' shape2 = .25),
#' acsvalue = acs(id = 'weibull',
#' t = 0:30,
#' scale = 10,
#' shape = .75),
#' n = 1000, p = 30, p0 = .5, TSn = 5)
#'
#' checksimulation(x)
#'
checksimulation <- function(TS, distbounds = c(-Inf, Inf), plot = FALSE) {
if (!is.list(TS)) {
TS <- list(TS)
}
att <- attributes(TS[[1]])
margdist <- att$margdist
margarg <- att$margarg
p0 <- att$p0
acsvalue <- att$acsvalue
ac <- sapply(TS, function(x) acf(x, plot = F)$acf)
out <- data.frame(mean = c(popmean(margdist,
margarg,
distbounds = distbounds,
p0 = p0),
sapply(TS, mean)),
sd = c(popsd(margdist,
margarg,
distbounds = distbounds,
p0 = p0),
sapply(TS, sd)),
skew = c(popskew(margdist,
margarg,
distbounds = distbounds,
p0 = p0),
sapply(TS, moments::skewness)),
p0 = c(p0,
sapply(TS, function(x) length(which(x == 0))/length(x))),
acf_t1 = c(acsvalue[2],
ac[2,]),
acf_t2 = c(acsvalue[3],
ac[3,]),
acf_t3 = c(acsvalue[4],
ac[4,]))
row.names(out) <- c('expected', paste0('simulation', seq_along(TS)))
out <- round(out, 2)
if(plot) {
p <- checkplot(x = out,
margdist = margdist,
margarg = margarg,
p0 = p0)
} else {
p <- NA
}
structure(.Data = out,
plot = p)
}
#' Plot function for check results
#'
#' @param x check result
#'
#' @keywords internal
#' @export
#'
checkplot <- function(x, margdist, margarg, p0) {
dta <- melt(as.matrix(x))
dta.e <- dta[which(dta$Var1 == 'expected'),]
dta.s <- dta[which(dta$Var1 != 'expected'),]
p <- ggplot() +
geom_boxplot(data = dta.s,
aes_string(x = 'Var2',
y = 'value',
group = 'Var2')) +
geom_point(data = dta.e,
aes_string(x = 'Var2',
y = 'value',
group = 'Var2'),
size = 2,
colour = 'red1') +
facet_wrap('Var2',
scales = 'free',
nrow = 1) +
labs(x = '',
y = '',
title = paste('Marginal =', margdist),
subtitle = paste(paste(names(margarg),
'=',
margarg,
collapse = '; '),
paste('\np0 =',
p0),
collapse = ' ')) +
theme_bw() +
theme(axis.title.x = element_blank(),
axis.text.x = element_blank(),
axis.ticks.x = element_blank())
return(p)
}
| /R/check.R | no_license | SalmaHobbi/CoSMoS | R | false | false | 7,037 | r | #' Check model function
#'
#' Generates a synthetic time series with the given properties and compares its sample statistics with the theoretically expected values.
#'
#' @inheritParams generateTS
#' @param plot logical - plot the result? (if TRUE, saved as an attribute 'plot')
#' @param returnTS logical - return timeseries generated by the check function? (if TRUE, saved as an attribute 'TS')
#'
#' @import moments reshape2
#' @export
#'
#' @keywords internal
#'
#' @examples
#'
#' library(CoSMoS)
#'
#' \donttest{
#' ## check your model
#' checkmodel(margdist = 'ggamma',
#' margarg = list(shape1 = 3,
#' shape2 = .5,
#' scale = 10),
#' n = 100000,
#' p = 30,
#' p0 = 0,
#' acsvalue = acs(id = 'weibull',
#' t = 0:30,
#' shape = .5,
#' scale = 10))
#' }
#'
#' \dontshow{
#' checkmodel(margdist = 'ggamma',
#' margarg = list(shape1 = 3,
#' shape2 = .5,
#' scale = 10),
#' n = 1000,
#' p = 30,
#' p0 = 0,
#' acsvalue = acs(id = 'weibull',
#' t = 0:30,
#' shape = .5,
#' scale = 10))
#' }
#'
checkmodel <- function(margdist, margarg, n, p = NULL, p0 = .6, acsvalue, TSn = 1, distbounds = c(-Inf, Inf), plot = FALSE, returnTS = FALSE) {
suppressWarnings({x <- generateTS(margdist = margdist,
margarg = margarg,
n = n, p = p, p0 = p0, TSn = TSn,
acsvalue = acsvalue)
a <- sapply(x, function(y) acf(y, plot = F)$acf)
out <- data.frame(mean = c(popmean(margdist,
margarg,
distbounds = distbounds,
p0 = p0),
sapply(x, mean)),
sd = c(popsd(margdist,
margarg,
distbounds = distbounds,
p0 = p0),
sapply(x, sd)),
skew = c(popskew(margdist,
margarg,
distbounds = distbounds,
p0 = p0),
sapply(x, moments::skewness)),
p0 = c(p0,
sapply(x, function(y) length(which(y == 0))/length(y))),
acf_t1 = c(acsvalue[2],
a[2,]),
acf_t2 = c(acsvalue[3],
a[3,]),
acf_t3 = c(acsvalue[4],
a[4,]))
row.names(out) <- c('expected', paste0('simulation', 1:TSn))})
out <- round(out, 2)
if(plot) {
p <- checkplot(x = out,
margdist = margdist,
margarg = margarg,
p0 = p0)
} else {
p <- NA
}
if(!returnTS) {
x <- NA
}
structure(.Data = out,
plot = p,
TS = x)
}
#' Check generated timeseries
#'
#' Compares generated time series sample statistics with the theoretically expected values.
#'
#' @param TS generated timeseries
#' @inheritParams checkmodel
#'
#' @export
#'
#' @examples
#'
#' library(CoSMoS)
#'
#' ## check your generated timeseries
#' x <- generateTS(margdist = 'burrXII',
#' margarg = list(scale = 1,
#' shape1 = .75,
#' shape2 = .25),
#' acsvalue = acs(id = 'weibull',
#' t = 0:30,
#' scale = 10,
#' shape = .75),
#' n = 1000, p = 30, p0 = .5, TSn = 5)
#'
#' checksimulation(x)
#'
checksimulation <- function(TS, distbounds = c(-Inf, Inf), plot = FALSE) {
if (!is.list(TS)) {
TS <- list(TS)
}
att <- attributes(TS[[1]])
margdist <- att$margdist
margarg <- att$margarg
p0 <- att$p0
acsvalue <- att$acsvalue
ac <- sapply(TS, function(x) acf(x, plot = F)$acf)
out <- data.frame(mean = c(popmean(margdist,
margarg,
distbounds = distbounds,
p0 = p0),
sapply(TS, mean)),
sd = c(popsd(margdist,
margarg,
distbounds = distbounds,
p0 = p0),
sapply(TS, sd)),
skew = c(popskew(margdist,
margarg,
distbounds = distbounds,
p0 = p0),
sapply(TS, moments::skewness)),
p0 = c(p0,
sapply(TS, function(x) length(which(x == 0))/length(x))),
acf_t1 = c(acsvalue[2],
ac[2,]),
acf_t2 = c(acsvalue[3],
ac[3,]),
acf_t3 = c(acsvalue[4],
ac[4,]))
row.names(out) <- c('expected', paste0('simulation', seq_along(TS)))
out <- round(out, 2)
if(plot) {
p <- checkplot(x = out,
margdist = margdist,
margarg = margarg,
p0 = p0)
} else {
p <- NA
}
structure(.Data = out,
plot = p)
}
#' Plot function for check results
#'
#' @param x check result
#'
#' @keywords internal
#' @export
#'
checkplot <- function(x, margdist, margarg, p0) {
dta <- melt(as.matrix(x))
dta.e <- dta[which(dta$Var1 == 'expected'),]
dta.s <- dta[which(dta$Var1 != 'expected'),]
p <- ggplot() +
geom_boxplot(data = dta.s,
aes_string(x = 'Var2',
y = 'value',
group = 'Var2')) +
geom_point(data = dta.e,
aes_string(x = 'Var2',
y = 'value',
group = 'Var2'),
size = 2,
colour = 'red1') +
facet_wrap('Var2',
scales = 'free',
nrow = 1) +
labs(x = '',
y = '',
title = paste('Marginal =', margdist),
subtitle = paste(paste(names(margarg),
'=',
margarg,
collapse = '; '),
paste('\np0 =',
p0),
collapse = ' ')) +
theme_bw() +
theme(axis.title.x = element_blank(),
axis.text.x = element_blank(),
axis.ticks.x = element_blank())
return(p)
}
|
#
# gene_enhancer_correlation.R -- correlates gene expression with enhancer initiation levels.
#
as <- rbind(read.table("tmp/H.change-U.tsspc.enh"), read.table("tmp/C.change-U.tsspc.enh"), read.table("tmp/M.change-U.tsspc.enh"))
#as <- as[abs(as$V7)>1,]
print(cor.test(as$V5, as$V7))
print(cor.test(as$V5, as$V7, method="spearman"))
plot(as$V5, as$V7, xlab= "Gene Expression", ylab="Sum Enhancers", pch=19)
pdf("gene_enhancer_correlation.pdf")
plot(as$V5, as$V7, xlab= "Gene Expression", ylab="Sum Enhancers", pch=19)
source("../lib/densScatterplot.R")
densScatterplot(as$V5, as$V7, xlab= "Gene Expression", ylab="Sum Enhancers")
dev.off()
####################################################################################################
## Not sure why promoters are not being picked up in some cases using bedmap/ bedops?! Repeat in R.
source("../lib/getOverlap.R")
source("../lib/densScatterplot.R")
getEnhNear <- function(prefix="H", column=21, dist=50000, post_pro= ".change-U.tsv", post_enh= ".change-U.all.tsv") {
## Get TSS of changed, annotated protein coding genes.
HC <- read.table(paste("../annotations/chage_expr/",prefix,post_pro, sep=""))
genes <- HC[HC$V7 == "protein_coding",]
tss <- genes
tss[tss[,6] == "+",2] <- tss[tss[,6] == "+",2]-250; tss[tss[,6] == "+",3] <- tss[tss[,6] == "+",2]+1
tss[tss[,6] == "-",3] <- tss[tss[,6] == "-",3]+251; tss[tss[,6] == "-",2] <- tss[tss[,6] == "-",3]-1
## dREG sites.
tres <- read.table(paste("../annotations/chage_expr/",prefix, post_enh, sep=""))
tres <- tres[grep("dREG", tres$V10),]
## nearby ...
nearby <- tss
nearby[,2] <- tss[,2]-dist
nearby[,3] <- tss[,3]+dist
## Find out which loops intersect.
enh_pro_change <- NULL
for(i in c(1:NROW(tss))) {
## Get all nearby REs, excluding overlap with the TSS.
indxtss <- getOverlap(tss[i,], tres)
indx <- getOverlap(nearby[i,], tres)
truth_ <- rep(FALSE, NROW(tres)); truth_[indx] <- TRUE; truth_[indxtss] <- FALSE
indx <- which(truth_)
## If REs are left over, include them.
if(NROW(indx)>0) {
enh_pro_change <- rbind(enh_pro_change, data.frame(enh=mean(tres[indx, column]), pro=tss[i,column]))
}
}
return(enh_pro_change)
}
enh_pro_change <- rbind(getEnhNear("H", 21),
getEnhNear("C", 22),
getEnhNear("M", 23))
cor.test(enh_pro_change$pro, enh_pro_change$enh)
plot(enh_pro_change$pro, enh_pro_change$enh, xlab= "Gene Expression", ylab="Sum Enhancers", pch=19)
densScatterplot(enh_pro_change$pro, enh_pro_change$enh, xlab= "Gene Expression", ylab="Sum Enhancers")
| /gene_enhancer/gene_enhancer_correlation.R | no_license | Danko-Lab/CD4-Cell-Evolution | R | false | false | 2,586 | r | #
# gene_enhancer_correlation.R -- correlates gene expression with enhancer initiation levels.
#
as <- rbind(read.table("tmp/H.change-U.tsspc.enh"), read.table("tmp/C.change-U.tsspc.enh"), read.table("tmp/M.change-U.tsspc.enh"))
#as <- as[abs(as$V7)>1,]
print(cor.test(as$V5, as$V7))
print(cor.test(as$V5, as$V7, method="spearman"))
plot(as$V5, as$V7, xlab= "Gene Expression", ylab="Sum Enhancers", pch=19)
pdf("gene_enhancer_correlation.pdf")
plot(as$V5, as$V7, xlab= "Gene Expression", ylab="Sum Enhancers", pch=19)
source("../lib/densScatterplot.R")
densScatterplot(as$V5, as$V7, xlab= "Gene Expression", ylab="Sum Enhancers")
dev.off()
####################################################################################################
## Not sure why promoters are not being picked up in some cases using bedmap/ bedops?! Repeat in R.
source("../lib/getOverlap.R")
source("../lib/densScatterplot.R")
getEnhNear <- function(prefix="H", column=21, dist=50000, post_pro= ".change-U.tsv", post_enh= ".change-U.all.tsv") {
## Get TSS of changed, annotated protein coding genes.
HC <- read.table(paste("../annotations/chage_expr/",prefix,post_pro, sep=""))
genes <- HC[HC$V7 == "protein_coding",]
tss <- genes
tss[tss[,6] == "+",2] <- tss[tss[,6] == "+",2]-250; tss[tss[,6] == "+",3] <- tss[tss[,6] == "+",2]+1
tss[tss[,6] == "-",3] <- tss[tss[,6] == "-",3]+251; tss[tss[,6] == "-",2] <- tss[tss[,6] == "-",3]-1
## dREG sites.
tres <- read.table(paste("../annotations/chage_expr/",prefix, post_enh, sep=""))
tres <- tres[grep("dREG", tres$V10),]
## nearby ...
nearby <- tss
nearby[,2] <- tss[,2]-dist
nearby[,3] <- tss[,3]+dist
## Find out which loops intersect.
enh_pro_change <- NULL
for(i in c(1:NROW(tss))) {
## Get all nearby REs, excluding overlap with the TSS.
indxtss <- getOverlap(tss[i,], tres)
indx <- getOverlap(nearby[i,], tres)
truth_ <- rep(FALSE, NROW(tres)); truth_[indx] <- TRUE; truth_[indxtss] <- FALSE
indx <- which(truth_)
## If REs are left over, include them.
if(NROW(indx)>0) {
enh_pro_change <- rbind(enh_pro_change, data.frame(enh=mean(tres[indx, column]), pro=tss[i,column]))
}
}
return(enh_pro_change)
}
enh_pro_change <- rbind(getEnhNear("H", 21),
getEnhNear("C", 22),
getEnhNear("M", 23))
cor.test(enh_pro_change$pro, enh_pro_change$enh)
plot(enh_pro_change$pro, enh_pro_change$enh, xlab= "Gene Expression", ylab="Sum Enhancers", pch=19)
densScatterplot(enh_pro_change$pro, enh_pro_change$enh, xlab= "Gene Expression", ylab="Sum Enhancers")
|
#library(Rcpp); library(mvtnorm); library(msm); sourceCpp ("../src/cid.cpp"); source("CID-basefunctions.R");
# Latent Vector Model: Reference Class
# Y_ij = q_i'q_j + e_ij
#This version: no multiplicative factor, just yet. Too much bother at this point to get it right.
LVMcid <-
setRefClass(
"LVMcid",
fields = list(
dimension="numeric",
latent.vector.pos="matrix",
#mult.factor="numeric",
#mult.factor.m="numeric",
#mult.factor.v="numeric",
latent.vector.pos.m="numeric",
latent.vector.pos.V="matrix",
latent.vector.pos.P="matrix",
#latent.vector.tune="numeric",
##inherited from main. Must fix later, but OK for now.
node.names="character",
n.nodes="numeric",
outcome="numeric",
edge.list="matrix",
residual.variance="numeric",
edge.list.rows="list" #,
),
methods=list(
initialize = function (
dimension=1,
n.nodes=10,
edge.list=make.edge.list(n.nodes),
edge.list.rows=row.list.maker(edge.list),
residual.variance=1,
outcome=numeric(0),
latent.vector.pos=matrix(rnorm(dimension*n.nodes), nrow=n.nodes),
#mult.factor=-1,
#mult.factor.m=0,
#mult.factor.v=10000,
latent.vector.pos.m=rep(0, dimension),
latent.vector.pos.V=diag(10000, dimension),
#latent.vector.tune=0.1,
generate=FALSE
) {
.self$n.nodes <<- n.nodes
.self$edge.list <<- edge.list
.self$edge.list.rows <<- edge.list.rows
.self$residual.variance <<- residual.variance
.self$node.names <<- as.character(1:.self$n.nodes)
.self$dimension <<- dimension
.self$latent.vector.pos <<- latent.vector.pos
.self$latent.vector.pos.m <<- latent.vector.pos.m
.self$latent.vector.pos.V <<- latent.vector.pos.V
.self$latent.vector.pos.P <<- solve(latent.vector.pos.V)
#.self$mult.factor <<- mult.factor
#.self$mult.factor.m <<- mult.factor.m
#.self$mult.factor.v <<- mult.factor.v
#.self$latent.vector.tune <<- latent.vector.tune
#adjust.lsp()
if (generate) .self$generate() else .self$outcome <<- outcome
},
#adjust.lsp = function (mult.up=TRUE) {
# mft <- mean(edge.list.distance(latent.vector.pos, edge.list))
# mult.factor <<- mult.factor*mft
# latent.vector.pos <<- latent.vector.pos/mft
#},
reinitialize = function (n.nodes=NULL,
edge.list=NULL, node.names=NULL) {
if (!is.null(n.nodes)) n.nodes <<- n.nodes #.self$
if (!is.null(edge.list)) {
edge.list <<- edge.list
edge.list.rows <<- row.list.maker(edge.list)
}
if (nrow(latent.vector.pos) != .self$n.nodes) {
message ("Reinitializing LVM Vectors")
latent.vector.pos <<- matrix(rnorm(dimension*n.nodes), nrow=n.nodes)
#adjust.lsp()
}
if (!is.null(node.names)) {
if (length(node.names) == .self$n.nodes) node.names <<- node.names
} else node.names <<- as.character(1:.self$n.nodes)
},
pieces = function (include.name=FALSE) {
out <- list (latent.vector.pos=latent.vector.pos) #, mult.factor=mult.factor)
class(out) <- "LVMout"
out
},
show = function () {
message("t(latent.vector.pos):"); print(t(latent.vector.pos))
# message("mult.factor:"); print(mult.factor)
},
plot = function (pos=latent.vector.pos, ...) {
latent.space.plot (pos, arrowlines=TRUE, labels=node.names, ...)
},
plot.network = function (color=outcome, ...) {
image.netplot (edge.list, color, node.labels=node.names, ...)
},
value = function () {cosine.closeness(latent.vector.pos, edge.list)},
value.ext = function (parameters=pieces(), edges=1:nrow(edge.list)) { #slightly slower.
cosine.closeness(parameters[[1]], rbind(edge.list[edges,])) },
generate = function () {outcome <<- rnorm(nrow(edge.list), value(), sqrt(residual.variance))},
log.likelihood = function(parameters=pieces(), edges=1:nrow(edge.list)) {
meanpart <- value.ext (parameters, edges)
sum(dnorm(outcome[edges], meanpart, sqrt(residual.variance), log=TRUE))
},
random.start = function () {
latent.vector.pos <<- matrix(rnorm(dimension*n.nodes), nrow=n.nodes)
#mult.factor <<- rnorm(1, mult.factor.m, sqrt(mult.factor.v))
},
draw = function (verbose=0) { # tune=latent.vector.tune
#d1 <- LSMcid$new(); latent.vector.pos <- d1$latent.vector.pos; mult.factor <- d1$mult.factor; edge.list <- d1$edge.list; edge.list.rows <- d1$edge.list.rows; n.nodes <- d1$n.nodes; mult.factor.m=0; mult.factor.v=10000; latent.vector.tune=0.1
lsdim <- dim(latent.vector.pos)[2]
latent.vector.pos.hold <- latent.vector.pos
for (dd in 1:n.nodes) {
#Gibbs draw for one node given others. Direct!
row1 <- which(edge.list[,1] == dd)
row2 <- which(edge.list[,2] == dd)
#get the counterpart.
xx.mat <- rbind(matrix(latent.vector.pos.hold[edge.list[row1,2],], ncol=lsdim),
matrix(latent.vector.pos.hold[edge.list[row2,1],], ncol=lsdim))
cls.VV <- solve(t(xx.mat)%*%xx.mat/residual.variance + latent.vector.pos.P)
cls.mean <- cls.VV%*%(t(xx.mat)%*%outcome[c(row1,row2)]/residual.variance + latent.vector.pos.P%*%latent.vector.pos.m)
latent.vector.pos.hold[dd,] <- c(rmvnorm(1, cls.mean, cls.VV))
}
#Rotate back.
latent.vector.pos.hold <-
postprocess.latent.positions(latent.vector.pos.hold, recenter=FALSE)
rownames(latent.vector.pos.hold) <- node.names
latent.vector.pos <<- latent.vector.pos.hold
},
gibbs.full = function (report.interval=0, draws=100, burnin=0, thin=1, make.random.start=FALSE) {
out <- list()
if (make.random.start) random.start()
for (kk in 1:(draws*thin+burnin)) {
draw();
index <- (kk-burnin)/thin
if (kk > burnin & round(index)==index) {
out[[index]] <- c(pieces(), list(log.likelihood=log.likelihood()))
if (report.interval > 0) if (index %% report.interval == 0) message("LVM ",index)
} else if (round(index)==index) {
if (report.interval > 0) if (index %% report.interval == 0) message("LVM burnin ",index)
}
}
return(out)
},
gibbs.value = function (gibbs.out) sapply(gibbs.out, function(gg) {
value.ext (gg)
}),
gibbs.summary = function (gibbs.out) {
lsp.all <- sapply(gibbs.out, function(gg) gg$latent.vector.pos)
output <- matrix(apply(lsp.all, 1, mean), nrow=n.nodes)
rownames(output) <- node.names
colnames(output) <- paste0("pos",1:ncol(output))
return(output)
},
print.gibbs.summary = function (gibbs.out) {
get.sum <- gibbs.summary(gibbs.out)
message ("Mean Latent Vector Positions:")
print(get.sum)
return(invisible(get.sum))
},
gibbs.mean = function(gibbs.out){
get.sum <- gibbs.summary(gibbs.out)
return(LVM(dimension=dimension,n.nodes=n.nodes,
edge.list=edge.list,
edge.list.rows=edge.list.rows,
residual.variance=residual.variance,
outcome=outcome,
latent.vector.pos=get.sum,
latent.vector.pos.m=latent.vector.pos.m,
latent.vector.pos.V=latent.vector.pos.V))
},
gibbs.plot = function (gibbs.out, ...) {
get.sum <- gibbs.summary(gibbs.out)
plot (get.sum, main = "Mean Latent Vector Positions from Gibbs Sampler", ...)
},
gibbs.node.colors = function (gibbs.out) {
rep("#DDDDFF", n.nodes)
}
)
)
| /R/LVM-reference.R | no_license | bdabbs13/CIDnetworks | R | false | false | 8,008 | r |
#library(Rcpp); library(mvtnorm); library(msm); sourceCpp ("../src/cid.cpp"); source("CID-basefunctions.R");
# Latent Vector Model: Reference Class
# Y_ij = q_i'q_j + e_ij
#This version: no multiplicative factor, just yet. Too much bother at this point to get it right.
LVMcid <-
setRefClass(
"LVMcid",
fields = list(
dimension="numeric",
latent.vector.pos="matrix",
#mult.factor="numeric",
#mult.factor.m="numeric",
#mult.factor.v="numeric",
latent.vector.pos.m="numeric",
latent.vector.pos.V="matrix",
latent.vector.pos.P="matrix",
#latent.vector.tune="numeric",
##inherited from main. Must fix later, but OK for now.
node.names="character",
n.nodes="numeric",
outcome="numeric",
edge.list="matrix",
residual.variance="numeric",
edge.list.rows="list" #,
),
methods=list(
initialize = function (
dimension=1,
n.nodes=10,
edge.list=make.edge.list(n.nodes),
edge.list.rows=row.list.maker(edge.list),
residual.variance=1,
outcome=numeric(0),
latent.vector.pos=matrix(rnorm(dimension*n.nodes), nrow=n.nodes),
#mult.factor=-1,
#mult.factor.m=0,
#mult.factor.v=10000,
latent.vector.pos.m=rep(0, dimension),
latent.vector.pos.V=diag(10000, dimension),
#latent.vector.tune=0.1,
generate=FALSE
) {
.self$n.nodes <<- n.nodes
.self$edge.list <<- edge.list
.self$edge.list.rows <<- edge.list.rows
.self$residual.variance <<- residual.variance
.self$node.names <<- as.character(1:.self$n.nodes)
.self$dimension <<- dimension
.self$latent.vector.pos <<- latent.vector.pos
.self$latent.vector.pos.m <<- latent.vector.pos.m
.self$latent.vector.pos.V <<- latent.vector.pos.V
.self$latent.vector.pos.P <<- solve(latent.vector.pos.V)
#.self$mult.factor <<- mult.factor
#.self$mult.factor.m <<- mult.factor.m
#.self$mult.factor.v <<- mult.factor.v
#.self$latent.vector.tune <<- latent.vector.tune
#adjust.lsp()
if (generate) .self$generate() else .self$outcome <<- outcome
},
#adjust.lsp = function (mult.up=TRUE) {
# mft <- mean(edge.list.distance(latent.vector.pos, edge.list))
# mult.factor <<- mult.factor*mft
# latent.vector.pos <<- latent.vector.pos/mft
#},
reinitialize = function (n.nodes=NULL,
edge.list=NULL, node.names=NULL) {
if (!is.null(n.nodes)) n.nodes <<- n.nodes #.self$
if (!is.null(edge.list)) {
edge.list <<- edge.list
edge.list.rows <<- row.list.maker(edge.list)
}
if (nrow(latent.vector.pos) != .self$n.nodes) {
message ("Reinitializing LVM Vectors")
latent.vector.pos <<- matrix(rnorm(dimension*n.nodes), nrow=n.nodes)
#adjust.lsp()
}
if (!is.null(node.names)) {
if (length(node.names) == .self$n.nodes) node.names <<- node.names
} else node.names <<- as.character(1:.self$n.nodes)
},
pieces = function (include.name=FALSE) {
out <- list (latent.vector.pos=latent.vector.pos) #, mult.factor=mult.factor)
class(out) <- "LVMout"
out
},
show = function () {
message("t(latent.vector.pos):"); print(t(latent.vector.pos))
# message("mult.factor:"); print(mult.factor)
},
plot = function (pos=latent.vector.pos, ...) {
latent.space.plot (pos, arrowlines=TRUE, labels=node.names, ...)
},
plot.network = function (color=outcome, ...) {
image.netplot (edge.list, color, node.labels=node.names, ...)
},
value = function () {cosine.closeness(latent.vector.pos, edge.list)},
value.ext = function (parameters=pieces(), edges=1:nrow(edge.list)) { #slightly slower.
cosine.closeness(parameters[[1]], rbind(edge.list[edges,])) },
generate = function () {outcome <<- rnorm(nrow(edge.list), value(), sqrt(residual.variance))},
log.likelihood = function(parameters=pieces(), edges=1:nrow(edge.list)) {
meanpart <- value.ext (parameters, edges)
sum(dnorm(outcome[edges], meanpart, sqrt(residual.variance), log=TRUE))
},
random.start = function () {
latent.vector.pos <<- matrix(rnorm(dimension*n.nodes), nrow=n.nodes)
#mult.factor <<- rnorm(1, mult.factor.m, sqrt(mult.factor.v))
},
draw = function (verbose=0) { # tune=latent.vector.tune
#d1 <- LSMcid$new(); latent.vector.pos <- d1$latent.vector.pos; mult.factor <- d1$mult.factor; edge.list <- d1$edge.list; edge.list.rows <- d1$edge.list.rows; n.nodes <- d1$n.nodes; mult.factor.m=0; mult.factor.v=10000; latent.vector.tune=0.1
lsdim <- dim(latent.vector.pos)[2]
latent.vector.pos.hold <- latent.vector.pos
for (dd in 1:n.nodes) {
#Gibbs draw for one node given others. Direct!
row1 <- which(edge.list[,1] == dd)
row2 <- which(edge.list[,2] == dd)
#get the counterpart.
xx.mat <- rbind(matrix(latent.vector.pos.hold[edge.list[row1,2],], ncol=lsdim),
matrix(latent.vector.pos.hold[edge.list[row2,1],], ncol=lsdim))
cls.VV <- solve(t(xx.mat)%*%xx.mat/residual.variance + latent.vector.pos.P)
cls.mean <- cls.VV%*%(t(xx.mat)%*%outcome[c(row1,row2)]/residual.variance + latent.vector.pos.P%*%latent.vector.pos.m)
latent.vector.pos.hold[dd,] <- c(rmvnorm(1, cls.mean, cls.VV))
}
#Rotate back.
latent.vector.pos.hold <-
postprocess.latent.positions(latent.vector.pos.hold, recenter=FALSE)
rownames(latent.vector.pos.hold) <- node.names
latent.vector.pos <<- latent.vector.pos.hold
},
gibbs.full = function (report.interval=0, draws=100, burnin=0, thin=1, make.random.start=FALSE) {
out <- list()
if (make.random.start) random.start()
for (kk in 1:(draws*thin+burnin)) {
draw();
index <- (kk-burnin)/thin
if (kk > burnin & round(index)==index) {
out[[index]] <- c(pieces(), list(log.likelihood=log.likelihood()))
if (report.interval > 0) if (index %% report.interval == 0) message("LVM ",index)
} else if (round(index)==index) {
if (report.interval > 0) if (index %% report.interval == 0) message("LVM burnin ",index)
}
}
return(out)
},
gibbs.value = function (gibbs.out) sapply(gibbs.out, function(gg) {
value.ext (gg)
}),
gibbs.summary = function (gibbs.out) {
lsp.all <- sapply(gibbs.out, function(gg) gg$latent.vector.pos)
output <- matrix(apply(lsp.all, 1, mean), nrow=n.nodes)
rownames(output) <- node.names
colnames(output) <- paste0("pos",1:ncol(output))
return(output)
},
print.gibbs.summary = function (gibbs.out) {
get.sum <- gibbs.summary(gibbs.out)
message ("Mean Latent Vector Positions:")
print(get.sum)
return(invisible(get.sum))
},
gibbs.mean = function(gibbs.out){
get.sum <- gibbs.summary(gibbs.out)
return(LVM(dimension=dimension,n.nodes=n.nodes,
edge.list=edge.list,
edge.list.rows=edge.list.rows,
residual.variance=residual.variance,
outcome=outcome,
latent.vector.pos=get.sum,
latent.vector.pos.m=latent.vector.pos.m,
latent.vector.pos.V=latent.vector.pos.V))
},
gibbs.plot = function (gibbs.out, ...) {
get.sum <- gibbs.summary(gibbs.out)
plot (get.sum, main = "Mean Latent Vector Positions from Gibbs Sampler", ...)
},
gibbs.node.colors = function (gibbs.out) {
rep("#DDDDFF", n.nodes)
}
)
)
|
#' run.harmony - dun Harmony alignment on a data.table
#'
#' This function allows you to run the 'Harmony' data alignment algorithm on single cell or cytometry data stored in a data.table
#'
#' @usage run.harmony()
#'
#' @param dat NO DEFAULT. A data.table with all of the data you wish to align
#' @param align.cols NO default. The columns you wish to align. For cytometry data, this can be the markers themselves or principle components. For single-cell seq data, principle components are recommended.
#' @param batch.col NO default. The column that denotes the batch or dataset that each cell belongs to
#' @param append.name DEFAULT = '_aligned'. Text that will be appended to the new columns containing aligned data
#' @param do_pca DEFAULT = FALSE. Whether to perform PCA on input matrix.
#' @param npcs If doing PCA on input matrix, number of PCs to compute.
#' @param theta Diversity clustering penalty parameter. Specify for each
#' variable in vars_use Default theta=2. theta=0 does not encourage any
#' diversity. Larger values of theta result in more diverse clusters.
#' @param lambda Ridge regression penalty parameter. Specify for each variable
#' in vars_use.
#' Default lambda=1. Lambda must be strictly positive. Smaller values result
#' in more aggressive correction.
#' @param sigma Width of soft kmeans clusters. Default sigma=0.1. Sigma scales
#' the distance from a cell to cluster centroids. Larger values of sigma
#' result in cells assigned to more clusters. Smaller values of sigma make
#' soft kmeans cluster approach hard clustering.
#' @param nclust Number of clusters in model. nclust=1 equivalent to simple
#' linear regression.
#' @param tau Protection against overclustering small datasets with large ones.
#' tau is the expected number of cells per cluster.
#' @param block.size What proportion of cells to update during clustering.
#' Between 0 to 1, default 0.05. Larger values may be faster but less accurate
#' @param max.iter.cluster Maximum number of rounds to run clustering at each
#' round of Harmony.
#' @param epsilon.cluster Convergence tolerance for clustering round of
#' Harmony. Set to -Inf to never stop early.
#' @param max.iter.harmony Maximum number of rounds to run Harmony. One round
#' of Harmony involves one clustering and one correction step.
#' @param epsilon.harmony Convergence tolerance for Harmony. Set to -Inf to
#' never stop early.
#' @param plot_convergence Whether to print the convergence plot of the
#' clustering objective function. TRUE to plot, FALSE to suppress. This can be
#' useful for debugging.
#' @param return_object (Advanced Usage) Whether to return the Harmony object
#' or only the corrected PCA embeddings.
#' @param verbose DEFAULT = FALSE. Whether to print progress messages. TRUE to print,
#' FALSE to suppress.
#' @param reference_values (Advanced Usage) Defines reference dataset(s).
#' Cells that have batch variables values matching reference_values will not
#' be moved.
#' @param cluster_prior (Advanced Usage) Provides user defined clusters for
#' cluster initialization. If the number of provided clusters C is less than K,
#' Harmony will initialize K-C clusters with kmeans. C cannot exceed K.
#'
#'
#' @return Returns a data.table with aligned data added in new columns.
#'
#' @author Thomas M Ashhurst, \email{thomas.ashhurst@@sydney.edu.au}
#'
#' @examples
#' cell.dat <- run.harmony()
#'
#' @import data.table
#'
#' @export
run.harmony <- function(dat,
align.cols,
batch.col,
append.name = '_aligned',
do_pca = FALSE,
npcs = 20,
theta = NULL,
lambda = NULL,
sigma = 0.1,
nclust = NULL,
tau = 0,
block.size = 0.05,
max.iter.harmony = 10,
max.iter.cluster = 200,
epsilon.cluster = 1e-5,
epsilon.harmony = 1e-4,
plot_convergence = FALSE,
return_object = FALSE,
verbose = FALSE,
reference_values = NULL,
cluster_prior = NULL){
### Packages
if(!is.element('Spectre', installed.packages()[,1])) stop('Spectre is required but not installed')
if(!is.element('data.table', installed.packages()[,1])) stop('data.table is required but not installed')
if(!is.element('harmony', installed.packages()[,1])) stop('harmony is required but not installed. You can install harmony by running devtools::install_github("immunogenomics/harmony")')
### Require packages
require(Spectre)
require(data.table)
require(harmony)
### Data prep
message("run.harmony - preparing data (1/4)")
start.dat <- dat
dat <- dat[,align.cols, with = FALSE]
nms <- names(dat)
dat <- as.matrix(dat)
meta <- data.table()
meta$CellID <- c(1:nrow(dat))
meta$CellID <- as.character(meta$CellID)
meta <- cbind(meta, start.dat[,batch.col, with = FALSE])
meta <- as_tibble(meta)
### Run harmony
message("run.harmony - running harmony (2/4)")
hrm.res <- harmony::HarmonyMatrix(data_mat = dat,
meta_data = meta,
vars_use = batch.col,
do_pca = do_pca,
npcs = npcs,
theta = theta,
lambda = lambda,
sigma = sigma,
nclust = nclust,
tau = tau,
block.size = block.size,
max.iter.harmony = max.iter.harmony,
max.iter.cluster = max.iter.cluster,
epsilon.cluster = epsilon.cluster,
epsilon.harmony = epsilon.harmony,
plot_convergence = plot_convergence,
return_object = return_object,
verbose = verbose,
reference_values = reference_values,
cluster_prior = cluster_prior)
### Final preparation and return
message("run.harmony - harmony complete, finalising data (3/4)")
hrm.res <- as.data.table(hrm.res)
names(hrm.res) <- paste0(names(hrm.res), append.name)
hrm.res
final.res <- cbind(start.dat, hrm.res)
message("run.harmony - returning data (4/4)")
return(final.res)
}
| /R/run.harmony.R | permissive | denvercal1234GitHub/Spectre | R | false | false | 7,088 | r | #' run.harmony - dun Harmony alignment on a data.table
#'
#' This function allows you to run the 'Harmony' data alignment algorithm on single cell or cytometry data stored in a data.table
#'
#' @usage run.harmony()
#'
#' @param dat NO DEFAULT. A data.table with all of the data you wish to align
#' @param align.cols NO default. The columns you wish to align. For cytometry data, this can be the markers themselves or principle components. For single-cell seq data, principle components are recommended.
#' @param batch.col NO default. The column that denotes the batch or dataset that each cell belongs to
#' @param append.name DEFAULT = '_aligned'. Text that will be appended to the new columns containing aligned data
#' @param do_pca DEFAULT = FALSE. Whether to perform PCA on input matrix.
#' @param npcs If doing PCA on input matrix, number of PCs to compute.
#' @param theta Diversity clustering penalty parameter. Specify for each
#' variable in vars_use Default theta=2. theta=0 does not encourage any
#' diversity. Larger values of theta result in more diverse clusters.
#' @param lambda Ridge regression penalty parameter. Specify for each variable
#' in vars_use.
#' Default lambda=1. Lambda must be strictly positive. Smaller values result
#' in more aggressive correction.
#' @param sigma Width of soft kmeans clusters. Default sigma=0.1. Sigma scales
#' the distance from a cell to cluster centroids. Larger values of sigma
#' result in cells assigned to more clusters. Smaller values of sigma make
#' soft kmeans cluster approach hard clustering.
#' @param nclust Number of clusters in model. nclust=1 equivalent to simple
#' linear regression.
#' @param tau Protection against overclustering small datasets with large ones.
#' tau is the expected number of cells per cluster.
#' @param block.size What proportion of cells to update during clustering.
#' Between 0 to 1, default 0.05. Larger values may be faster but less accurate
#' @param max.iter.cluster Maximum number of rounds to run clustering at each
#' round of Harmony.
#' @param epsilon.cluster Convergence tolerance for clustering round of
#' Harmony. Set to -Inf to never stop early.
#' @param max.iter.harmony Maximum number of rounds to run Harmony. One round
#' of Harmony involves one clustering and one correction step.
#' @param epsilon.harmony Convergence tolerance for Harmony. Set to -Inf to
#' never stop early.
#' @param plot_convergence Whether to print the convergence plot of the
#' clustering objective function. TRUE to plot, FALSE to suppress. This can be
#' useful for debugging.
#' @param return_object (Advanced Usage) Whether to return the Harmony object
#' or only the corrected PCA embeddings.
#' @param verbose DEFAULT = FALSE. Whether to print progress messages. TRUE to print,
#' FALSE to suppress.
#' @param reference_values (Advanced Usage) Defines reference dataset(s).
#' Cells that have batch variables values matching reference_values will not
#' be moved.
#' @param cluster_prior (Advanced Usage) Provides user defined clusters for
#' cluster initialization. If the number of provided clusters C is less than K,
#' Harmony will initialize K-C clusters with kmeans. C cannot exceed K.
#'
#'
#' @return Returns a data.table with aligned data added in new columns.
#'
#' @author Thomas M Ashhurst, \email{thomas.ashhurst@@sydney.edu.au}
#'
#' @examples
#' cell.dat <- run.harmony()
#'
#' @import data.table
#'
#' @export
run.harmony <- function(dat,
align.cols,
batch.col,
append.name = '_aligned',
do_pca = FALSE,
npcs = 20,
theta = NULL,
lambda = NULL,
sigma = 0.1,
nclust = NULL,
tau = 0,
block.size = 0.05,
max.iter.harmony = 10,
max.iter.cluster = 200,
epsilon.cluster = 1e-5,
epsilon.harmony = 1e-4,
plot_convergence = FALSE,
return_object = FALSE,
verbose = FALSE,
reference_values = NULL,
cluster_prior = NULL){
### Packages
if(!is.element('Spectre', installed.packages()[,1])) stop('Spectre is required but not installed')
if(!is.element('data.table', installed.packages()[,1])) stop('data.table is required but not installed')
if(!is.element('harmony', installed.packages()[,1])) stop('harmony is required but not installed. You can install harmony by running devtools::install_github("immunogenomics/harmony")')
### Require packages
require(Spectre)
require(data.table)
require(harmony)
### Data prep
message("run.harmony - preparing data (1/4)")
start.dat <- dat
dat <- dat[,align.cols, with = FALSE]
nms <- names(dat)
dat <- as.matrix(dat)
meta <- data.table()
meta$CellID <- c(1:nrow(dat))
meta$CellID <- as.character(meta$CellID)
meta <- cbind(meta, start.dat[,batch.col, with = FALSE])
meta <- as_tibble(meta)
### Run harmony
message("run.harmony - running harmony (2/4)")
hrm.res <- harmony::HarmonyMatrix(data_mat = dat,
meta_data = meta,
vars_use = batch.col,
do_pca = do_pca,
npcs = npcs,
theta = theta,
lambda = lambda,
sigma = sigma,
nclust = nclust,
tau = tau,
block.size = block.size,
max.iter.harmony = max.iter.harmony,
max.iter.cluster = max.iter.cluster,
epsilon.cluster = epsilon.cluster,
epsilon.harmony = epsilon.harmony,
plot_convergence = plot_convergence,
return_object = return_object,
verbose = verbose,
reference_values = reference_values,
cluster_prior = cluster_prior)
### Final preparation and return
message("run.harmony - harmony complete, finalising data (3/4)")
hrm.res <- as.data.table(hrm.res)
names(hrm.res) <- paste0(names(hrm.res), append.name)
hrm.res
final.res <- cbind(start.dat, hrm.res)
message("run.harmony - returning data (4/4)")
return(final.res)
}
|
#######################################
# Stats course lab assignment -- Part 4
# Pierre Dragicevic, Oct 2014
# Updated Sep 2015
#######################################
# The last part of this assignment consists in finishing analyzing the salesman
# dataset seen during the class. Remember that we were able to produce a bar chart
# with means, but could not figure out how much trust we should put in it. Adding
# confidence intervals will address the issue and we will be able to report to our
# manager.
# The data file is sales.csv.
# Tip: use the "head" instruction to get a textual preview on large tables.
# Feel free to reuse the code seen in the parts 1, 2 and 3. Since there are 6
# columns, the code may be a bit repetitive (and error-prone), so you can use the
# for instruction if you prefer, but you don't have to. Syntax is:
# for (i in seq(1, 6)) {
# ...
# }
# Note that for the poll datasets, people were on rows. Now they are in columns.
# Still, you should compute confidence intervals for each column separately, like
# before. Only the unit of statistical analysis has changed.
#
# For the poll, we needed to analyze people's responses for each of the two policies
# separately, and generalize them to the entire population of people. We say that
# the unit of analysis is the person. This is almost always the case in
# psychology and medical research.
#
# For the salesmen scenario, the problem is different. We are only interested in those
# six specific salesmen, not the entire population of possible salesmen.
# We need to examine sales performances across days for each salesman separately,
# and generalize these performances to the entire population of all possible days.
# We say that the unit of analysis is the day.
#
# It is always very important to clearly identify the unit of analysis. The inferences
# made can be very different.
###################################################################################
# -----> [write the analysis and graphing code here]
library(boot)
library(ggplot2)
salesData <- read.table("sales.csv", header = TRUE, sep = ",")
#function to calculate each seller's performance
bootstrapMeanCI <- function(data) {
samplemean <- function(x, d) {
return(mean(x[d]))
}
point_Estimate<-samplemean(data)
bootstrap_data<- boot(data = data, statistic = samplemean, R = 1000)
bootci <- boot.ci(bootstrap_data, type = "bca")
result<-c(point_Estimate,bootci$bca[4],bootci$bca[5])
return(result)
}
#use bootstrapMeanCI function get each seller's mean,ci.lower and ci.upper
result1<-bootstrapMeanCI(salesData$Seller.1)
result2<-bootstrapMeanCI(salesData$Seller.2)
result3<-bootstrapMeanCI(salesData$Seller.3)
result4<-bootstrapMeanCI(salesData$Seller.4)
result5<-bootstrapMeanCI(salesData$Seller.5)
result6<-bootstrapMeanCI(salesData$Seller.6)
#merge all the results to one data frame
results<-data.frame(result1,result2,result3,result4,result5,result6)
seller<-c("seller.1","seller.2","seller.3","seller.4","seller.5","seller.6")
results<-data.frame(seller,t(results))
colnames(results) <- c("seller","mean","ci.lower","ci.upper")
#plotting the confidence intervals using ggplot
plotBarchart<- function(data) {
ggplot(data = data, aes(x = seller, y = mean)) + geom_bar(stat = "identity", width = 0.5, fill = "#00000010") +theme_bw()+ylab("Mean support") + theme(axis.title.x = element_blank(), axis.ticks.x = element_blank(), panel.grid.major.x = element_blank(), panel.grid.minor.y = element_blank()) + geom_pointrange(aes(ymin=ci.lower, ymax=ci.upper), size=0.9)
}
dev.new(width=3, height=3)
plotBarchart(results)
###################################################################################
# -----> [write your interpretations and your conclusions here. What are you going to
# report to the manager?]
# we judge the performance of each seller according to the following 3 indicators:
# the lower and upper bound of the confidence interval and the expectation. From the # data set, it is clear to see that: ,
# seller 6's performances are the highest for all the 3 indicators;
# seller 3's performances are the lowest for all the 3 indicators;
# for the sellers 1,2 and 5, there performances on each indicator are quite close to # each other, which are better than seller 4 but worse than seller 6. In conclusion, # we rank the total performance of the sellers as:
# seller 6 > seller 1,2, and 5 > seller 4 > seller 3 | /part4-salesmen.R | no_license | wangcan04/Ranalysis-visualize | R | false | false | 4,514 | r | #######################################
# Stats course lab assignment -- Part 4
# Pierre Dragicevic, Oct 2014
# Updated Sep 2015
#######################################
# The last part of this assignment consists in finishing analyzing the salesman
# dataset seen during the class. Remember that we were able to produce a bar chart
# with means, but could not figure out how much trust we should put in it. Adding
# confidence intervals will address the issue and we will be able to report to our
# manager.
# The data file is sales.csv.
# Tip: use the "head" instruction to get a textual preview on large tables.
# Feel free to reuse the code seen in the parts 1, 2 and 3. Since there are 6
# columns, the code may be a bit repetitive (and error-prone), so you can use the
# for instruction if you prefer, but you don't have to. Syntax is:
# for (i in seq(1, 6)) {
# ...
# }
# Note that for the poll datasets, people were on rows. Now they are in columns.
# Still, you should compute confidence intervals for each column separately, like
# before. Only the unit of statistical analysis has changed.
#
# For the poll, we needed to analyze people's responses for each of the two policies
# separately, and generalize them to the entire population of people. We say that
# the unit of analysis is the person. This is almost always the case in
# psychology and medical research.
#
# For the salesmen scenario, the problem is different. We are only interested in those
# six specific salesmen, not the entire population of possible salesmen.
# We need to examine sales performances across days for each salesman separately,
# and generalize these performances to the entire population of all possible days.
# We say that the unit of analysis is the day.
#
# It is always very important to clearly identify the unit of analysis. The inferences
# made can be very different.
###################################################################################
# -----> [write the analysis and graphing code here]
library(boot)
library(ggplot2)
salesData <- read.table("sales.csv", header = TRUE, sep = ",")
#function to calculate each seller's performance
bootstrapMeanCI <- function(data) {
samplemean <- function(x, d) {
return(mean(x[d]))
}
point_Estimate<-samplemean(data)
bootstrap_data<- boot(data = data, statistic = samplemean, R = 1000)
bootci <- boot.ci(bootstrap_data, type = "bca")
result<-c(point_Estimate,bootci$bca[4],bootci$bca[5])
return(result)
}
#use bootstrapMeanCI function get each seller's mean,ci.lower and ci.upper
result1<-bootstrapMeanCI(salesData$Seller.1)
result2<-bootstrapMeanCI(salesData$Seller.2)
result3<-bootstrapMeanCI(salesData$Seller.3)
result4<-bootstrapMeanCI(salesData$Seller.4)
result5<-bootstrapMeanCI(salesData$Seller.5)
result6<-bootstrapMeanCI(salesData$Seller.6)
#merge all the results to one data frame
results<-data.frame(result1,result2,result3,result4,result5,result6)
seller<-c("seller.1","seller.2","seller.3","seller.4","seller.5","seller.6")
results<-data.frame(seller,t(results))
colnames(results) <- c("seller","mean","ci.lower","ci.upper")
#plotting the confidence intervals using ggplot
plotBarchart<- function(data) {
ggplot(data = data, aes(x = seller, y = mean)) + geom_bar(stat = "identity", width = 0.5, fill = "#00000010") +theme_bw()+ylab("Mean support") + theme(axis.title.x = element_blank(), axis.ticks.x = element_blank(), panel.grid.major.x = element_blank(), panel.grid.minor.y = element_blank()) + geom_pointrange(aes(ymin=ci.lower, ymax=ci.upper), size=0.9)
}
dev.new(width=3, height=3)
plotBarchart(results)
###################################################################################
# -----> [write your interpretations and your conclusions here. What are you going to
# report to the manager?]
# we judge the performance of each seller according to the following 3 indicators:
# the lower and upper bound of the confidence interval and the expectation. From the # data set, it is clear to see that: ,
# seller 6's performances are the highest for all the 3 indicators;
# seller 3's performances are the lowest for all the 3 indicators;
# for the sellers 1,2 and 5, there performances on each indicator are quite close to # each other, which are better than seller 4 but worse than seller 6. In conclusion, # we rank the total performance of the sellers as:
# seller 6 > seller 1,2, and 5 > seller 4 > seller 3 |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/G.RESULT-data.R
\docType{data}
\name{G_RESULT}
\alias{G_RESULT}
\title{G_RESULT data set}
\format{An object of class \code{CompressedGRangesList} of length 5397.}
\usage{
data("G.RESULT")
}
\description{
G_RESULT data set
}
\references{
The "Zwyx" package, 2018 (in press).
}
\keyword{datasets}
| /man/G_RESULT.Rd | no_license | bioboatr/Zwyx | R | false | true | 390 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/G.RESULT-data.R
\docType{data}
\name{G_RESULT}
\alias{G_RESULT}
\title{G_RESULT data set}
\format{An object of class \code{CompressedGRangesList} of length 5397.}
\usage{
data("G.RESULT")
}
\description{
G_RESULT data set
}
\references{
The "Zwyx" package, 2018 (in press).
}
\keyword{datasets}
|
###### TCGA Clinical Data Analysis - BRCA #######
# cleaning and exploring dataset
#### Loading Libraries ####
install.packages("tidyverse", dependencies = TRUE)
install.packages("skimr", dependencies = TRUE)
install.packages("finalfit", dependencies = TRUE)
library(tidyverse)
library(skimr)
library(finalfit)
library(dplyr)
library(ggplot2)
#### Download clinical data at combined_studies from cbioportal for Breast Cancer and TCGAA ####
# setwd()
brca_clin_raw <- read.delim("combined_study_clinical_brca.tsv")
brca_ancestry <- read.delim("tcga_ancestry_brca.csv", sep = ",", na.strings = "Not Available")
# Changing variables names
colnames(brca_clin_raw) <- gsub("\\_\\_", "\\_", gsub("\\.", "\\_", toupper(colnames(brca_clin_raw))))
colnames(brca_ancestry) <- gsub("\\_\\_", "\\_", gsub("\\.", "\\_", toupper(colnames(brca_ancestry))))
# Selecting the important variables
brca_clin_raw <- brca_clin_raw[, c("PATIENT_ID", "ONCOTREE_CODE", "CANCER_TYPE_DETAILED", "AGE_AT_DIAGNOSIS", "DISEASE_FREE_EVENT", "DISEASE_FREE_MONTHS_", "OVERALL_SURVIVAL_MONTHS_", "OVERALL_SURVIVAL_STATUS", "DISEASE_FREE_STATUS", "TIME_TO_DEATH_MONTHS_", "TIME_TO_METASTASIS_MONTHS_", "METASTATIC_RECURRENCE_TIME", "PATIENT_S_VITAL_STATUS", "MEDR_TIME_TO_METASTATIC_DIAGNOSIS_CALCULATED_MONTHS_", "TYPE_OF_BREAST_SURGERY")]
brca_clin_raw <- brca_clin_raw[!is.na(brca_clin_raw$DISEASE_FREE_MONTHS_),]
# Combined studies
studies_brca_merge <- merge(brca_clin_raw, brca_ancestry, by = "PATIENT_ID")
# Remove duplicates
studies_brca_merge <- studies_brca_merge[!duplicated(studies_brca_merge$PATIENT_ID), ]
# Visualizing
class(studies_brca_merge)
dim(studies_brca_merge)
names(studies_brca_merge)
glimpse(studies_brca_merge)
skim(studies_brca_merge)
#### Analyzing clinical data ####
# Frequencies of Each Status of Disease Status of Breast Cancer
studies_brca_merge %>%
count(DISEASE_FREE_STATUS) %>%
mutate(perc = n / nrow(studies_brca_merge)) -> subtype_brca
colnames(subtype_brca) <- c("DISEASE_FREE_STATUS", "N", "Perc" )
suppressWarnings(
ggplot(subtype_brca, aes(x = reorder(subtype_brca$DISEASE_FREE_STATUS, -subtype_brca$Perc) , y = subtype_brca$Perc )) +
scale_fill_brewer() +
geom_bar(aes(y = subtype_brca$Perc, fill = factor(subtype_brca$Perc)), colour = "black", stat = "identity") +
geom_text(aes( label = scales::percent(subtype_brca$Perc), fill = factor(..x..)), stat = "identity", vjust = -.4) +
scale_y_continuous(labels = scales::percent) +
geom_text(data = subtype_brca, aes(x = subtype_brca$DISEASE_FREE_STATUS, y = subtype_brca$Perc, label = subtype_brca$N),
vjust = 1.8, colour = "brown", size = 4, show_guide = F) +
guides(fill = FALSE) +
ylab("Frequencies") +
xlab("Disease Status of Breast Cancer"))
# Frequencies of Each Status of EIGENSTRAT of Breast Cancer
studies_brca_merge %>%
count(EIGENSTRAT) %>%
mutate(perc = n / nrow(studies_brca_merge)) -> subtype_brca
colnames(subtype_brca) <- c("EIGENSTRAT", "N", "Perc" )
suppressWarnings(
ggplot(subtype_brca, aes(x = reorder(subtype_brca$EIGENSTRAT, -subtype_brca$Perc) , y = subtype_brca$Perc )) +
scale_fill_brewer() +
geom_bar(aes(y = subtype_brca$Perc, fill = factor(subtype_brca$Perc)), colour = "black", stat = "identity") +
geom_text(aes( label = scales::percent(subtype_brca$Perc), fill = factor(..x..)), stat = "identity", vjust = -.4)+
scale_y_continuous(labels = scales::percent) +
geom_text(data = subtype_brca, aes(x = subtype_brca$EIGENSTRAT, y = subtype_brca$Perc, label = subtype_brca$N),
vjust = 1.8, colour = "brown", size = 4, show_guide = F) +
guides(fill = FALSE) +
ylab("Frequencies") +
xlab("Ancestry of Breast Cancer"))
# Frequencies of Each Status of SUBTYPE of Breast Cancer
studies_brca_merge %>%
count(ONCOTREE_CODE) %>%
mutate(perc = n / nrow(studies_brca_merge)) -> subtype_brca
colnames(subtype_brca) <- c("ONCOTREE_CODE", "N", "Perc" )
suppressWarnings(
ggplot(subtype_brca, aes(x = reorder(subtype_brca$ONCOTREE_CODE, -subtype_brca$Perc) , y = subtype_brca$Perc )) +
scale_fill_brewer() +
geom_bar(aes(y = subtype_brca$Perc, fill = factor(subtype_brca$Perc)), colour = "black", stat = "identity") +
geom_text(aes( label = scales::percent(subtype_brca$Perc), fill = factor(..x..)), stat = "identity", vjust = -.4) +
scale_y_continuous(labels = scales::percent) +
geom_text(data = subtype_brca, aes(x = subtype_brca$ONCOTREE_CODE, y = subtype_brca$Perc, label = subtype_brca$N),
vjust = 1.8, colour = "brown", size = 4, show_guide = F) +
guides(fill = FALSE) +
ylab("Frequencies") +
xlab("Oncotree of Breast Cancer"))
# Cumulative incidence of Disease Free/Recurred in BRCA Samples by Ancestry
library(survival)
library(survminer)
# Cumulative Incidence Function
require(cmprsk)
data <- data.frame(studies_brca_merge[,c("DISEASE_FREE_MONTHS_","DISEASE_FREE_STATUS","EIGENSTRAT")], check.names = TRUE, check.rows = TRUE)
fit_subtype <- cuminc(ftime = data$DISEASE_FREE_MONTHS, fstatus = data$DISEASE_FREE_STATUS, group = data$EIGENSTRAT)
fit_subtype
ggcompetingrisks(fit_subtype, palette = "Dark2",
legend = "top",
ggtheme = theme_bw())
# Downloading BRCA Samples by Normal Tissue and Tumour
query.brca <- GDCquery(project = "TCGA-BRCA",
data.category = "Gene expression",
data.type = "Gene expression quantification",
platform = "Illumina HiSeq",
file.type = "normalized_results",
experimental.strategy = "RNA-Seq",
legacy = TRUE)
GDCdownload(query.brca, method = "api", files.per.chunk = 10)
data.brca <- GDCprepare(query.brca)
listPatient_EA <- as.vector(studies_brca_merge$PATIENT_ID[studies_brca_merge$EIGENSTRAT == "EA"])
listPatient_AA <- as.vector(studies_brca_merge$PATIENT_ID[studies_brca_merge$EIGENSTRAT == "AA"])
samples_EA <- query.brca$results[[1]]$cases[substr(query.brca$results[[1]]$cases,1,12) %in% listPatient_EA]
samples_AA <- query.brca$results[[1]]$cases[substr(query.brca$results[[1]]$cases,1,12) %in% listPatient_AA]
# Tumor
samples_EA_TP <- TCGAquery_SampleTypes(barcode = samples_EA, typesample = "TP")
samples_AA_TP <- TCGAquery_SampleTypes(barcode = samples_AA, typesample = "TP")
# Normal Tissue
samples_EA_NT <- TCGAquery_SampleTypes(barcode = samples_EA, typesample = "NT")
samples_AA_NT <- TCGAquery_SampleTypes(barcode = samples_AA, typesample = "NT") | /TCGAClinic_BRCA.R | no_license | raquelpontesm/BRCA-TCGAA | R | false | false | 6,903 | r | ###### TCGA Clinical Data Analysis - BRCA #######
# cleaning and exploring dataset
#### Loading Libraries ####
install.packages("tidyverse", dependencies = TRUE)
install.packages("skimr", dependencies = TRUE)
install.packages("finalfit", dependencies = TRUE)
library(tidyverse)
library(skimr)
library(finalfit)
library(dplyr)
library(ggplot2)
#### Download clinical data at combined_studies from cbioportal for Breast Cancer and TCGAA ####
# setwd()
brca_clin_raw <- read.delim("combined_study_clinical_brca.tsv")
brca_ancestry <- read.delim("tcga_ancestry_brca.csv", sep = ",", na.strings = "Not Available")
# Changing variables names
colnames(brca_clin_raw) <- gsub("\\_\\_", "\\_", gsub("\\.", "\\_", toupper(colnames(brca_clin_raw))))
colnames(brca_ancestry) <- gsub("\\_\\_", "\\_", gsub("\\.", "\\_", toupper(colnames(brca_ancestry))))
# Selecting the important variables
brca_clin_raw <- brca_clin_raw[, c("PATIENT_ID", "ONCOTREE_CODE", "CANCER_TYPE_DETAILED", "AGE_AT_DIAGNOSIS", "DISEASE_FREE_EVENT", "DISEASE_FREE_MONTHS_", "OVERALL_SURVIVAL_MONTHS_", "OVERALL_SURVIVAL_STATUS", "DISEASE_FREE_STATUS", "TIME_TO_DEATH_MONTHS_", "TIME_TO_METASTASIS_MONTHS_", "METASTATIC_RECURRENCE_TIME", "PATIENT_S_VITAL_STATUS", "MEDR_TIME_TO_METASTATIC_DIAGNOSIS_CALCULATED_MONTHS_", "TYPE_OF_BREAST_SURGERY")]
brca_clin_raw <- brca_clin_raw[!is.na(brca_clin_raw$DISEASE_FREE_MONTHS_),]
# Combined studies
studies_brca_merge <- merge(brca_clin_raw, brca_ancestry, by = "PATIENT_ID")
# Remove duplicates
studies_brca_merge <- studies_brca_merge[!duplicated(studies_brca_merge$PATIENT_ID), ]
# Visualizing
class(studies_brca_merge)
dim(studies_brca_merge)
names(studies_brca_merge)
glimpse(studies_brca_merge)
skim(studies_brca_merge)
#### Analyzing clinical data ####
# Frequencies of Each Status of Disease Status of Breast Cancer
studies_brca_merge %>%
count(DISEASE_FREE_STATUS) %>%
mutate(perc = n / nrow(studies_brca_merge)) -> subtype_brca
colnames(subtype_brca) <- c("DISEASE_FREE_STATUS", "N", "Perc" )
suppressWarnings(
ggplot(subtype_brca, aes(x = reorder(subtype_brca$DISEASE_FREE_STATUS, -subtype_brca$Perc) , y = subtype_brca$Perc )) +
scale_fill_brewer() +
geom_bar(aes(y = subtype_brca$Perc, fill = factor(subtype_brca$Perc)), colour = "black", stat = "identity") +
geom_text(aes( label = scales::percent(subtype_brca$Perc), fill = factor(..x..)), stat = "identity", vjust = -.4) +
scale_y_continuous(labels = scales::percent) +
geom_text(data = subtype_brca, aes(x = subtype_brca$DISEASE_FREE_STATUS, y = subtype_brca$Perc, label = subtype_brca$N),
vjust = 1.8, colour = "brown", size = 4, show_guide = F) +
guides(fill = FALSE) +
ylab("Frequencies") +
xlab("Disease Status of Breast Cancer"))
# Frequencies of Each Status of EIGENSTRAT of Breast Cancer
studies_brca_merge %>%
count(EIGENSTRAT) %>%
mutate(perc = n / nrow(studies_brca_merge)) -> subtype_brca
colnames(subtype_brca) <- c("EIGENSTRAT", "N", "Perc" )
suppressWarnings(
ggplot(subtype_brca, aes(x = reorder(subtype_brca$EIGENSTRAT, -subtype_brca$Perc) , y = subtype_brca$Perc )) +
scale_fill_brewer() +
geom_bar(aes(y = subtype_brca$Perc, fill = factor(subtype_brca$Perc)), colour = "black", stat = "identity") +
geom_text(aes( label = scales::percent(subtype_brca$Perc), fill = factor(..x..)), stat = "identity", vjust = -.4)+
scale_y_continuous(labels = scales::percent) +
geom_text(data = subtype_brca, aes(x = subtype_brca$EIGENSTRAT, y = subtype_brca$Perc, label = subtype_brca$N),
vjust = 1.8, colour = "brown", size = 4, show_guide = F) +
guides(fill = FALSE) +
ylab("Frequencies") +
xlab("Ancestry of Breast Cancer"))
# Frequencies of Each Status of SUBTYPE of Breast Cancer
studies_brca_merge %>%
count(ONCOTREE_CODE) %>%
mutate(perc = n / nrow(studies_brca_merge)) -> subtype_brca
colnames(subtype_brca) <- c("ONCOTREE_CODE", "N", "Perc" )
suppressWarnings(
ggplot(subtype_brca, aes(x = reorder(subtype_brca$ONCOTREE_CODE, -subtype_brca$Perc) , y = subtype_brca$Perc )) +
scale_fill_brewer() +
geom_bar(aes(y = subtype_brca$Perc, fill = factor(subtype_brca$Perc)), colour = "black", stat = "identity") +
geom_text(aes( label = scales::percent(subtype_brca$Perc), fill = factor(..x..)), stat = "identity", vjust = -.4) +
scale_y_continuous(labels = scales::percent) +
geom_text(data = subtype_brca, aes(x = subtype_brca$ONCOTREE_CODE, y = subtype_brca$Perc, label = subtype_brca$N),
vjust = 1.8, colour = "brown", size = 4, show_guide = F) +
guides(fill = FALSE) +
ylab("Frequencies") +
xlab("Oncotree of Breast Cancer"))
# Cumulative incidence of Disease Free/Recurred in BRCA Samples by Ancestry
library(survival)
library(survminer)
# Cumulative Incidence Function
require(cmprsk)
data <- data.frame(studies_brca_merge[,c("DISEASE_FREE_MONTHS_","DISEASE_FREE_STATUS","EIGENSTRAT")], check.names = TRUE, check.rows = TRUE)
fit_subtype <- cuminc(ftime = data$DISEASE_FREE_MONTHS, fstatus = data$DISEASE_FREE_STATUS, group = data$EIGENSTRAT)
fit_subtype
ggcompetingrisks(fit_subtype, palette = "Dark2",
legend = "top",
ggtheme = theme_bw())
# Downloading BRCA Samples by Normal Tissue and Tumour
query.brca <- GDCquery(project = "TCGA-BRCA",
data.category = "Gene expression",
data.type = "Gene expression quantification",
platform = "Illumina HiSeq",
file.type = "normalized_results",
experimental.strategy = "RNA-Seq",
legacy = TRUE)
GDCdownload(query.brca, method = "api", files.per.chunk = 10)
data.brca <- GDCprepare(query.brca)
listPatient_EA <- as.vector(studies_brca_merge$PATIENT_ID[studies_brca_merge$EIGENSTRAT == "EA"])
listPatient_AA <- as.vector(studies_brca_merge$PATIENT_ID[studies_brca_merge$EIGENSTRAT == "AA"])
samples_EA <- query.brca$results[[1]]$cases[substr(query.brca$results[[1]]$cases,1,12) %in% listPatient_EA]
samples_AA <- query.brca$results[[1]]$cases[substr(query.brca$results[[1]]$cases,1,12) %in% listPatient_AA]
# Tumor
samples_EA_TP <- TCGAquery_SampleTypes(barcode = samples_EA, typesample = "TP")
samples_AA_TP <- TCGAquery_SampleTypes(barcode = samples_AA, typesample = "TP")
# Normal Tissue
samples_EA_NT <- TCGAquery_SampleTypes(barcode = samples_EA, typesample = "NT")
samples_AA_NT <- TCGAquery_SampleTypes(barcode = samples_AA, typesample = "NT") |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/supervised-model-deployment.R
\docType{class}
\name{SupervisedModelDeployment}
\alias{SupervisedModelDeployment}
\title{Deploy predictive models, created on your data}
\format{An object of class \code{R6ClassGenerator} of length 24.}
\usage{
SupervisedModelDeployment(object)
}
\arguments{
\item{object}{of SupervisedModelDeploymentParams class for $new() constructor}
}
\description{
This step allows one to create deploy models on your data
and helps determine which performs best.
}
\references{
\url{http://healthcare.ai}
}
\seealso{
\code{\link{healthcareai}}
}
\keyword{datasets}
| /man/SupervisedModelDeployment.Rd | permissive | jamesjsommer/healthcareai-r | R | false | true | 665 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/supervised-model-deployment.R
\docType{class}
\name{SupervisedModelDeployment}
\alias{SupervisedModelDeployment}
\title{Deploy predictive models, created on your data}
\format{An object of class \code{R6ClassGenerator} of length 24.}
\usage{
SupervisedModelDeployment(object)
}
\arguments{
\item{object}{of SupervisedModelDeploymentParams class for $new() constructor}
}
\description{
This step allows one to create deploy models on your data
and helps determine which performs best.
}
\references{
\url{http://healthcare.ai}
}
\seealso{
\code{\link{healthcareai}}
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RCy3-deprecated.R
\name{getDefaultNodeReverseSelectionColor}
\alias{getDefaultNodeReverseSelectionColor}
\alias{getDefaultNodeReverseSelectionColordefunct}
\title{DEFUNCT: getDefaultNodeReverseSelectionColor}
\usage{
getDefaultNodeReverseSelectionColordefunct
}
\value{
None
}
\description{
This function is defunct and will be removed in the next release.
}
| /man/getDefaultNodeReverseSelectionColor-defunct.Rd | permissive | olbeimarton/RCy3 | R | false | true | 437 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RCy3-deprecated.R
\name{getDefaultNodeReverseSelectionColor}
\alias{getDefaultNodeReverseSelectionColor}
\alias{getDefaultNodeReverseSelectionColordefunct}
\title{DEFUNCT: getDefaultNodeReverseSelectionColor}
\usage{
getDefaultNodeReverseSelectionColordefunct
}
\value{
None
}
\description{
This function is defunct and will be removed in the next release.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/promillo.R
\name{tell_me_how_drunk}
\alias{tell_me_how_drunk}
\title{Tell me how drunk}
\usage{
tell_me_how_drunk(
age,
sex = c("male", "female"),
height,
weight,
drinking_time,
drinks
)
}
\arguments{
\item{age}{person's in years}
\item{sex}{person's sex (male or female)}
\item{height}{person's height in cm}
\item{weight}{person's weight in Kg}
\item{drinking_time}{drinking time in hours}
\item{drinks}{drinks vector e.g., c("schnaps", "wein")}
}
\value{
promille Per mille blood alcohol value
}
\description{
Tell me how drunk
}
| /promillo/man/tell_me_how_drunk.Rd | permissive | fort-w2021/promillo-ex-muskuloes | R | false | true | 628 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/promillo.R
\name{tell_me_how_drunk}
\alias{tell_me_how_drunk}
\title{Tell me how drunk}
\usage{
tell_me_how_drunk(
age,
sex = c("male", "female"),
height,
weight,
drinking_time,
drinks
)
}
\arguments{
\item{age}{person's in years}
\item{sex}{person's sex (male or female)}
\item{height}{person's height in cm}
\item{weight}{person's weight in Kg}
\item{drinking_time}{drinking time in hours}
\item{drinks}{drinks vector e.g., c("schnaps", "wein")}
}
\value{
promille Per mille blood alcohol value
}
\description{
Tell me how drunk
}
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 11590
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 11170
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 11170
c
c Input Parameter (command line, file):
c input filename QBFLIB/Seidl/ASP_Program_Inclusion/T-adeu-32.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 4790
c no.of clauses 11590
c no.of taut cls 196
c
c Output Parameters:
c remaining no.of clauses 11170
c
c QBFLIB/Seidl/ASP_Program_Inclusion/T-adeu-32.qdimacs 4790 11590 E1 [81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 121 122 123 124 125 126 127 128 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 297 298 299 300 301 302 303 304 448 449 466 467 484 485 502 503 520 521 538 539 556 557 574 575 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 1238 1239 1240 1241 1242 1243 1244 1245 1319 1355 1357 1360 1364 1368 1370 1378 1390 1392 1393 1424 1425 1434 1435 1461 1470 1474 1491 1501 1506 1507 1514 1515 1516 1531 1536 1541 1544 1547 1551 1564 1567 1572 1593 1594 1656 1657 1682 1683 1708 1709 1734 1735 1923 1924 1941 1942 1959 1960 1977 1978 1995 1996 2013 2014 2031 2032 2049 2050 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2392 2428 2430 2433 2437 2441 2443 2451 2463 2465 2466 2497 2498 2507 2508 2534 2543 2547 2564 2574 2579 2580 2587 2588 2589 2604 2609 2614 2617 2620 2624 2637 2640 2645 2666 2667 2679 2680 2705 2706 2731 2732 2757 2758 2783 2784 2809 2810 2835 2836 2861 2862 2887 2888 2913 2914 2939 2940 2965 2966 2987 2988 2989 2990 2991 2992 2993 2994 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3551 3587 3589 3592 3596 3600 3602 3610 3622 3624 3625 3656 3657 3666 3667 3693 3702 3706 3723 3733 3738 3739 3746 3747 3748 3763 3768 3773 3776 3779 3783 3796 3799 3804 3825 3826 3838 3839 3864 3865 3890 3891 3916 3917 3942 3943 3968 3969 3994 3995 4020 4021 4046 4047 4072 4073 4098 4099 4124 4125 4171 4172 4173 4174 4175 4176 4177 4178] 196 160 4290 11170 RED
| /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Seidl/ASP_Program_Inclusion/T-adeu-32/T-adeu-32.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 2,312 | r | c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 11590
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 11170
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 11170
c
c Input Parameter (command line, file):
c input filename QBFLIB/Seidl/ASP_Program_Inclusion/T-adeu-32.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 4790
c no.of clauses 11590
c no.of taut cls 196
c
c Output Parameters:
c remaining no.of clauses 11170
c
c QBFLIB/Seidl/ASP_Program_Inclusion/T-adeu-32.qdimacs 4790 11590 E1 [81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 121 122 123 124 125 126 127 128 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 297 298 299 300 301 302 303 304 448 449 466 467 484 485 502 503 520 521 538 539 556 557 574 575 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 1238 1239 1240 1241 1242 1243 1244 1245 1319 1355 1357 1360 1364 1368 1370 1378 1390 1392 1393 1424 1425 1434 1435 1461 1470 1474 1491 1501 1506 1507 1514 1515 1516 1531 1536 1541 1544 1547 1551 1564 1567 1572 1593 1594 1656 1657 1682 1683 1708 1709 1734 1735 1923 1924 1941 1942 1959 1960 1977 1978 1995 1996 2013 2014 2031 2032 2049 2050 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2392 2428 2430 2433 2437 2441 2443 2451 2463 2465 2466 2497 2498 2507 2508 2534 2543 2547 2564 2574 2579 2580 2587 2588 2589 2604 2609 2614 2617 2620 2624 2637 2640 2645 2666 2667 2679 2680 2705 2706 2731 2732 2757 2758 2783 2784 2809 2810 2835 2836 2861 2862 2887 2888 2913 2914 2939 2940 2965 2966 2987 2988 2989 2990 2991 2992 2993 2994 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3551 3587 3589 3592 3596 3600 3602 3610 3622 3624 3625 3656 3657 3666 3667 3693 3702 3706 3723 3733 3738 3739 3746 3747 3748 3763 3768 3773 3776 3779 3783 3796 3799 3804 3825 3826 3838 3839 3864 3865 3890 3891 3916 3917 3942 3943 3968 3969 3994 3995 4020 4021 4046 4047 4072 4073 4098 4099 4124 4125 4171 4172 4173 4174 4175 4176 4177 4178] 196 160 4290 11170 RED
|
## -----------------------------------------------------------------
## IGraph R package
## Copyright (C) 2005-2014 Gabor Csardi <csardi.gabor@gmail.com>
## 334 Harvard street, Cambridge, MA 02139 USA
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
## 02110-1301 USA
##
## -----------------------------------------------------------------
#' Generate scale-free graphs according to the Barabasi-Albert model
#'
#' The BA-model is a very simple stochastic algorithm for building a graph.
#'
#' This is a simple stochastic algorithm to generate a graph. It is a discrete
#' time step model and in each time step a single vertex is added.
#'
#' We start with a single vertex and no edges in the first time step. Then we
#' add one vertex in each time step and the new vertex initiates some edges to
#' old vertices. The probability that an old vertex is chosen is given by
#' \deqn{P[i] \sim k_i^\alpha+a}{P[i] ~ k[i]^alpha + a} where \eqn{k_i}{k[i]}
#' is the in-degree of vertex \eqn{i} in the current time step (more precisely
#' the number of adjacent edges of \eqn{i} which were not initiated by \eqn{i}
#' itself) and \eqn{\alpha}{alpha} and \eqn{a} are parameters given by the
#' \code{power} and \code{zero.appeal} arguments.
#'
#' The number of edges initiated in a time step is given by the \code{m},
#' \code{out.dist} and \code{out.seq} arguments. If \code{out.seq} is given and
#' not NULL then it gives the number of edges to add in a vector, the first
#' element is ignored, the second is the number of edges to add in the second
#' time step and so on. If \code{out.seq} is not given or null and
#' \code{out.dist} is given and not NULL then it is used as a discrete
#' distribution to generate the number of edges in each time step. Its first
#' element is the probability that no edges will be added, the second is the
#' probability that one edge is added, etc. (\code{out.dist} does not need to
#' sum up to one, it normalized automatically.) \code{out.dist} should contain
#' non-negative numbers and at east one element should be positive.
#'
#' If both \code{out.seq} and \code{out.dist} are omitted or NULL then \code{m}
#' will be used, it should be a positive integer constant and \code{m} edges
#' will be added in each time step.
#'
#' \code{sample_pa} generates a directed graph by default, set
#' \code{directed} to \code{FALSE} to generate an undirected graph. Note that
#' even if an undirected graph is generated \eqn{k_i}{k[i]} denotes the number
#' of adjacent edges not initiated by the vertex itself and not the total (in-
#' + out-) degree of the vertex, unless the \code{out.pref} argument is set to
#' \code{TRUE}.
#'
#' @aliases sample_pa barabasi.game ba.game
#' @param n Number of vertices.
#' @param power The power of the preferential attachment, the default is one,
#' ie. linear preferential attachment.
#' @param m Numeric constant, the number of edges to add in each time step This
#' argument is only used if both \code{out.dist} and \code{out.seq} are omitted
#' or NULL.
#' @param out.dist Numeric vector, the distribution of the number of edges to
#' add in each time step. This argument is only used if the \code{out.seq}
#' argument is omitted or NULL.
#' @param out.seq Numeric vector giving the number of edges to add in each time
#' step. Its first element is ignored as no edges are added in the first time
#' step.
#' @param out.pref Logical, if true the total degree is used for calculating
#' the citation probability, otherwise the in-degree is used.
#' @param zero.appeal The \sQuote{attractiveness} of the vertices with no
#' adjacent edges. See details below.
#' @param directed Whether to create a directed graph.
#' @param algorithm The algorithm to use for the graph generation.
#' \code{psumtree} uses a partial prefix-sum tree to generate the graph, this
#' algorithm can handle any \code{power} and \code{zero.appeal} values and
#' never generates multiple edges. \code{psumtree-multiple} also uses a
#' partial prefix-sum tree, but the generation of multiple edges is allowed.
#' Before the 0.6 version igraph used this algorithm if \code{power} was not
#' one, or \code{zero.appeal} was not one. \code{bag} is the algorithm that
#' was previously (before version 0.6) used if \code{power} was one and
#' \code{zero.appeal} was one as well. It works by putting the ids of the
#' vertices into a bag (mutliset, really), exactly as many times as their
#' (in-)degree, plus once more. Then the required number of cited vertices are
#' drawn from the bag, with replacement. This method might generate multiple
#' edges. It only works if \code{power} and \code{zero.appeal} are equal one.
#' @param start.graph \code{NULL} or an igraph graph. If a graph, then the
#' supplied graph is used as a starting graph for the preferential attachment
#' algorithm. The graph should have at least one vertex. If a graph is supplied
#' here and the \code{out.seq} argument is not \code{NULL}, then it should
#' contain the out degrees of the new vertices only, not the ones in the
#' \code{start.graph}.
#' @return A graph object.
#' @author Gabor Csardi \email{csardi.gabor@@gmail.com}
#' @seealso \code{\link{sample_gnp}}
#' @references Barabasi, A.-L. and Albert R. 1999. Emergence of scaling in
#' random networks \emph{Science}, 286 509--512.
#' @export
#' @keywords graphs
#' @examples
#'
#' g <- sample_pa(10000)
#' degree_distribution(g)
#'
sample_pa <- function(n, power=1, m=NULL, out.dist=NULL, out.seq=NULL,
out.pref=FALSE, zero.appeal=1,
directed=TRUE, algorithm=c("psumtree",
"psumtree-multiple", "bag"),
start.graph=NULL) {
if (!is.null(start.graph) && !is_igraph(start.graph)) {
stop("`start.graph' not an `igraph' object")
}
# Checks
if (! is.null(out.seq) && (!is.null(m) || !is.null(out.dist))) {
warning("if `out.seq' is given `m' and `out.dist' should be NULL")
m <- out.dist <- NULL
}
if (is.null(out.seq) && !is.null(out.dist) && !is.null(m)) {
warning("if `out.dist' is given `m' will be ignored")
m <- NULL
}
if (!is.null(m) && m==0) {
warning("`m' is zero, graph will be empty")
}
if (power < 0) {
warning("`power' is negative")
}
if (is.null(m) && is.null(out.dist) && is.null(out.seq)) {
m <- 1
}
n <- as.numeric(n)
power <- as.numeric(power)
if (!is.null(m)) { m <- as.numeric(m) }
if (!is.null(out.dist)) { out.dist <- as.numeric(out.dist) }
if (!is.null(out.seq)) { out.seq <- as.numeric(out.seq) }
out.pref <- as.logical(out.pref)
if (!is.null(out.dist)) {
nn <- if (is.null(start.graph)) n else n-vcount(start.graph)
out.seq <- as.numeric(sample(0:(length(out.dist)-1), nn,
replace=TRUE, prob=out.dist))
}
if (is.null(out.seq)) {
out.seq <- numeric()
}
algorithm <- igraph.match.arg(algorithm)
algorithm1 <- switch(algorithm,
"psumtree"=1, "psumtree-multiple"=2,
"bag"=0)
on.exit( .Call("R_igraph_finalizer", PACKAGE="igraph") )
res <- .Call("R_igraph_barabasi_game", n, power, m, out.seq, out.pref,
zero.appeal, directed, algorithm1, start.graph,
PACKAGE="igraph")
if (igraph_opt("add.params")) {
res$name <- "Barabasi graph"
res$power <- power
res$m <- m
res$zero.appeal <- zero.appeal
res$algorithm <- algorithm
}
res
}
#' @rdname sample_pa
#' @param ... Passed to \code{sample_pa}.
#' @export
pa <- function(...) constructor_spec(sample_pa, ...)
## -----------------------------------------------------------------
#' Generate random graphs according to the G(n,p) Erdos-Renyi model
#'
#' This model is very simple, every possible edge is created with the same
#' constant probability.
#'
#'
#' The graph has \sQuote{n} vertices and for each edge the
#' probability that it is present in the graph is \sQuote{p}.
#'
#' @param n The number of vertices in the graph.
#' @param p The probability for drawing an edge between two
#' arbitrary vertices (G(n,p) graph).
#' @param directed Logical, whether the graph will be directed, defaults to
#' FALSE.
#' @param loops Logical, whether to add loop edges, defaults to FALSE.
#' @return A graph object.
#' @author Gabor Csardi \email{csardi.gabor@@gmail.com}
#' @seealso \code{\link{sample_gnm}}, \code{\link{sample_pa}}
#' @references Erdos, P. and Renyi, A., On random graphs, \emph{Publicationes
#' Mathematicae} 6, 290--297 (1959).
#' @export
#' @keywords graphs
#' @examples
#'
#' g <- sample_gnp(1000, 1/1000)
#' degree_distribution(g)
sample_gnp <- function(n, p, directed = FALSE, loops = FALSE) {
type <- "gnp"
type1 <- switch(type, "gnp"=0, "gnm"=1)
on.exit( .Call("R_igraph_finalizer", PACKAGE="igraph") )
res <- .Call("R_igraph_erdos_renyi_game", as.numeric(n), as.numeric(type1),
as.numeric(p), as.logical(directed), as.logical(loops),
PACKAGE="igraph")
if (igraph_opt("add.params")) {
res$name <- sprintf("Erdos renyi (%s) graph", type)
res$type <- type
res$loops <- loops
res$p <- p
}
res
}
#' @rdname sample_gnp
#' @param ... Passed to \code{sample_app}.
#' @export
gnp <- function(...) constructor_spec(sample_gnp, ...)
## -----------------------------------------------------------------
#' Generate random graphs according to the G(n,m) Erdos-Renyi model
#'
#' This model is very simple, every possible edge is created with the same
#' constant probability.
#'
#' The graph has \sQuote{n} vertices and \sQuote{m} edges,
#' and the \sQuote{m} edges are chosen uniformly randomly from the set of all
#' possible edges. This set includes loop edges as well if the \code{loops}
#' parameter is TRUE.
#'
#' @param n The number of vertices in the graph.
#' @param m The number of edges in the graph.
#' @param directed Logical, whether the graph will be directed, defaults to
#' FALSE.
#' @param loops Logical, whether to add loop edges, defaults to FALSE.
#' @return A graph object.
#' @author Gabor Csardi \email{csardi.gabor@@gmail.com}
#' @seealso \code{\link{sample_gnp}}, \code{\link{sample_pa}}
#' @references Erdos, P. and Renyi, A., On random graphs, \emph{Publicationes
#' Mathematicae} 6, 290--297 (1959).
#' @export
#' @keywords graphs
#' @examples
#'
#' g <- sample_gnm(1000, 1000)
#' degree_distribution(g)
sample_gnm <- function(n, m, directed = FALSE, loops = FALSE) {
type <- "gnm"
type1 <- switch(type, "gnp"=0, "gnm"=1)
on.exit( .Call("R_igraph_finalizer", PACKAGE="igraph") )
res <- .Call("R_igraph_erdos_renyi_game", as.numeric(n), as.numeric(type1),
as.numeric(m), as.logical(directed), as.logical(loops),
PACKAGE="igraph")
if (igraph_opt("add.params")) {
res$name <- sprintf("Erdos renyi (%s) graph", type)
res$type <- type
res$loops <- loops
res$m <- m
}
res
}
#' @rdname sample_gnm
#' @param ... Passed to \code{sample_app}.
#' @export
gnm <- function(...) constructor_spec(sample_gnm, ...)
## -----------------------------------------------------------------
#' Generate random graphs according to the Erdos-Renyi model
#'
#' This model is very simple, every possible edge is created with the same
#' constant probability.
#'
#' In G(n,p) graphs, the graph has \sQuote{n} vertices and for each edge the
#' probability that it is present in the graph is \sQuote{p}.
#'
#' In G(n,m) graphs, the graph has \sQuote{n} vertices and \sQuote{m} edges,
#' and the \sQuote{m} edges are chosen uniformly randomly from the set of all
#' possible edges. This set includes loop edges as well if the \code{loops}
#' parameter is TRUE.
#'
#' \code{random.graph.game} is an alias to this function.
#'
#' @section Deprecated:
#'
#' Since igraph version 0.8.0, both \code{erdos.renyi.game} and
#' \code{random.graph.game} are deprecated, and \code{\link{sample_gnp}} and
#' \code{\link{sample_gnm}} should be used instead.
#'
#' @aliases erdos.renyi.game random.graph.game
#' @param n The number of vertices in the graph.
#' @param p.or.m Either the probability for drawing an edge between two
#' arbitrary vertices (G(n,p) graph), or the number of edges in the graph (for
#' G(n,m) graphs).
#' @param type The type of the random graph to create, either \code{gnp}
#' (G(n,p) graph) or \code{gnm} (G(n,m) graph).
#' @param directed Logical, whether the graph will be directed, defaults to
#' FALSE.
#' @param loops Logical, whether to add loop edges, defaults to FALSE.
#' @param \dots Additional arguments, ignored.
#' @return A graph object.
#' @author Gabor Csardi \email{csardi.gabor@@gmail.com}
#' @seealso \code{\link{sample_pa}}
#' @references Erdos, P. and Renyi, A., On random graphs, \emph{Publicationes
#' Mathematicae} 6, 290--297 (1959).
#' @export
#' @keywords graphs
#' @examples
#'
#' g <- erdos.renyi.game(1000, 1/1000)
#' degree_distribution(g)
#'
erdos.renyi.game <- function(n, p.or.m, type=c("gnp", "gnm"),
directed=FALSE, loops=FALSE, ...) {
type <- igraph.match.arg(type)
type1 <- switch(type, "gnp"=0, "gnm"=1)
on.exit( .Call("R_igraph_finalizer", PACKAGE="igraph") )
res <- .Call("R_igraph_erdos_renyi_game", as.numeric(n), as.numeric(type1),
as.numeric(p.or.m), as.logical(directed), as.logical(loops),
PACKAGE="igraph")
if (igraph_opt("add.params")) {
res$name <- sprintf("Erdos renyi (%s) graph", type)
res$type <- type
res$loops <- loops
if (type=="gnp") { res$p <- p.or.m }
if (type=="gnm") { res$m <- p.or.m }
}
res
}
#' @export
random.graph.game <- erdos.renyi.game
## -----------------------------------------------------------------
#' Generate random graphs with a given degree sequence
#'
#' It is often useful to create a graph with given vertex degrees. This is
#' exactly what \code{sample_degseq} does.
#'
#' The \dQuote{simple} method connects the out-stubs of the edges (undirected
#' graphs) or the out-stubs and in-stubs (directed graphs) together. This way
#' loop edges and also multiple edges may be generated. This method is not
#' adequate if one needs to generate simple graphs with a given degree
#' sequence. The multiple and loop edges can be deleted, but then the degree
#' sequence is distorted and there is nothing to ensure that the graphs are
#' sampled uniformly.
#'
#' The \dQuote{simple.no.multiple} method is similar to \dQuote{simple}, but
#' tries to avoid multiple and loop edges and restarts the generation from
#' scratch if it gets stuck. It is not guaranteed to sample uniformly from the
#' space of all possible graphs with the given sequence, but it is relatively
#' fast and it will eventually succeed if the provided degree sequence is
#' graphical, but there is no upper bound on the number of iterations.
#'
#' The \dQuote{vl} method is a more sophisticated generator. The algorithm and
#' the implementation was done by Fabien Viger and Matthieu Latapy. This
#' generator always generates undirected, connected simple graphs, it is an
#' error to pass the \code{in.deg} argument to it. The algorithm relies on
#' first creating an initial (possibly unconnected) simple undirected graph
#' with the given degree sequence (if this is possible at all). Then some
#' rewiring is done to make the graph connected. Finally a Monte-Carlo
#' algorithm is used to randomize the graph. The \dQuote{vl} samples from the
#' undirected, connected simple graphs unformly. See
#' \url{http://www-rp.lip6.fr/~latapy/FV/generation.html} for details.
#'
#' @aliases degree.sequence.game
#' @param out.deg Numeric vector, the sequence of degrees (for undirected
#' graphs) or out-degrees (for directed graphs). For undirected graphs its sum
#' should be even. For directed graphs its sum should be the same as the sum of
#' \code{in.deg}.
#' @param in.deg For directed graph, the in-degree sequence. By default this is
#' \code{NULL} and an undirected graph is created.
#' @param method Character, the method for generating the graph. Right now the
#' \dQuote{simple}, \dQuote{simple.no.multiple} and \dQuote{vl} methods are
#' implemented.
#' @return The new graph object.
#' @author Gabor Csardi \email{csardi.gabor@@gmail.com}
#' @seealso \code{\link{sample_gnp}}, \code{\link{sample_pa}},
#' \code{\link{simplify}} to get rid of the multiple and/or loops edges.
#' @export
#' @keywords graphs
#' @examples
#'
#' ## The simple generator
#' g <- sample_degseq(rep(2,100))
#' degree(g)
#' is_simple(g) # sometimes TRUE, but can be FALSE
#' g2 <- sample_degseq(1:10, 10:1)
#' degree(g2, mode="out")
#' degree(g2, mode="in")
#'
#' ## The vl generator
#' g3 <- sample_degseq(rep(2,100), method="vl")
#' degree(g3)
#' is_simple(g3) # always TRUE
#'
#' ## Exponential degree distribution
#' ## Note, that we correct the degree sequence if its sum is odd
#' degs <- sample(1:100, 100, replace=TRUE, prob=exp(-0.5*(1:100)))
#' if (sum(degs) %% 2 != 0) { degs[1] <- degs[1] + 1 }
#' g4 <- sample_degseq(degs, method="vl")
#' all(degree(g4) == degs)
#'
#' ## Power-law degree distribution
#' ## Note, that we correct the degree sequence if its sum is odd
#' degs <- sample(1:100, 100, replace=TRUE, prob=(1:100)^-2)
#' if (sum(degs) %% 2 != 0) { degs[1] <- degs[1] + 1 }
#' g5 <- sample_degseq(degs, method="vl")
#' all(degree(g5) == degs)
sample_degseq <- function(out.deg, in.deg=NULL,
method=c("simple", "vl",
"simple.no.multiple")) {
method <- igraph.match.arg(method)
method1 <- switch(method, "simple"=0, "vl"=1, "simple.no.multiple"=2)
if (!is.null(in.deg)) { in.deg <- as.numeric(in.deg) }
on.exit( .Call("R_igraph_finalizer", PACKAGE="igraph") )
res <- .Call("R_igraph_degree_sequence_game", as.numeric(out.deg),
in.deg, as.numeric(method1),
PACKAGE="igraph")
if (igraph_opt("add.params")) {
res$name <- "Degree sequence random graph"
res$method <- method
}
res
}
#' @rdname sample_degseq
#' @param ... Passed to \code{sample_degree}.
#' @export
degseq <- function(...) constructor_spec(sample_degseq, ...)
## -----------------------------------------------------------------
#' Growing random graph generation
#'
#' This function creates a random graph by simulating its stochastic evolution.
#'
#' This is discrete time step model, in each time step a new vertex is added to
#' the graph and \code{m} new edges are created. If \code{citation} is
#' \code{FALSE} these edges are connecting two uniformly randomly chosen
#' vertices, otherwise the edges are connecting new vertex to uniformly
#' randomly chosen old vertices.
#'
#' @aliases growing.random.game
#' @param n Numeric constant, number of vertices in the graph.
#' @param m Numeric constant, number of edges added in each time step.
#' @param directed Logical, whether to create a directed graph.
#' @param citation Logical. If \code{TRUE} a citation graph is created, ie. in
#' each time step the added edges are originating from the new vertex.
#' @return A new graph object.
#' @author Gabor Csardi \email{csardi.gabor@@gmail.com}
#' @seealso \code{\link{sample_pa}}, \code{\link{sample_gnp}}
#' @export
#' @keywords graphs
#' @examples
#'
#' g <- sample_growing(500, citation=FALSE)
#' g2 <- sample_growing(500, citation=TRUE)
#'
sample_growing <- function(n, m=1, directed=TRUE, citation=FALSE) {
on.exit( .Call("R_igraph_finalizer", PACKAGE="igraph") )
res <- .Call("R_igraph_growing_random_game", as.numeric(n), as.numeric(m),
as.logical(directed), as.logical(citation),
PACKAGE="igraph")
if (igraph_opt("add.params")) {
res$name <- "Growing random graph"
res$m <- m
res$citation <- citation
}
res
}
#' @rdname sample_growing
#' @param ... Passed to \code{sample_app}.
#' @export
growing <- function(...) constructor_spec(sample_growing, ...)
## -----------------------------------------------------------------
#' Generate an evolving random graph with preferential attachment and aging
#'
#' This function creates a random graph by simulating its evolution. Each time
#' a new vertex is added it creates a number of links to old vertices and the
#' probability that an old vertex is cited depends on its in-degree
#' (preferential attachment) and age.
#'
#' This is a discrete time step model of a growing graph. We start with a
#' network containing a single vertex (and no edges) in the first time step.
#' Then in each time step (starting with the second) a new vertex is added and
#' it initiates a number of edges to the old vertices in the network. The
#' probability that an old vertex is connected to is proportional to \deqn{P[i]
#' \sim (c\cdot k_i^\alpha+a)(d\cdot l_i^\beta+b)\cdot }{% P[i] ~ (c k[i]^alpha
#' + a) (d l[i]^beta + a)}
#'
#' Here \eqn{k_i}{k[i]} is the in-degree of vertex \eqn{i} in the current time
#' step and \eqn{l_i}{l[i]} is the age of vertex \eqn{i}. The age is simply
#' defined as the number of time steps passed since the vertex is added, with
#' the extension that vertex age is divided to be in \code{aging.bin} bins.
#'
#' \eqn{c}, \eqn{\alpha}{alpha}, \eqn{a}, \eqn{d}, \eqn{\beta}{beta} and
#' \eqn{b} are parameters and they can be set via the following arguments:
#' \code{pa.exp} (\eqn{\alpha}{alpha}, mandatory argument), \code{aging.exp}
#' (\eqn{\beta}{beta}, mandatory argument), \code{zero.deg.appeal} (\eqn{a},
#' optional, the default value is 1), \code{zero.age.appeal} (\eqn{b},
#' optional, the default is 0), \code{deg.coef} (\eqn{c}, optional, the default
#' is 1), and \code{age.coef} (\eqn{d}, optional, the default is 1).
#'
#' The number of edges initiated in each time step is governed by the \code{m},
#' \code{out.seq} and \code{out.pref} parameters. If \code{out.seq} is given
#' then it is interpreted as a vector giving the number of edges to be added in
#' each time step. It should be of length \code{n} (the number of vertices),
#' and its first element will be ignored. If \code{out.seq} is not given (or
#' NULL) and \code{out.dist} is given then it will be used as a discrete
#' probability distribution to generate the number of edges. Its first element
#' gives the probability that zero edges are added at a time step, the second
#' element is the probability that one edge is added, etc. (\code{out.seq}
#' should contain non-negative numbers, but if they don't sum up to 1, they
#' will be normalized to sum up to 1. This behavior is similar to the
#' \code{prob} argument of the \code{sample} command.)
#'
#' By default a directed graph is generated, but it \code{directed} is set to
#' \code{FALSE} then an undirected is created. Even if an undirected graph is
#' generaed \eqn{k_i}{k[i]} denotes only the adjacent edges not initiated by
#' the vertex itself except if \code{out.pref} is set to \code{TRUE}.
#'
#' If the \code{time.window} argument is given (and not NULL) then
#' \eqn{k_i}{k[i]} means only the adjacent edges added in the previous
#' \code{time.window} time steps.
#'
#' This function might generate graphs with multiple edges.
#'
#' @aliases sample_pa_age aging.prefatt.game aging.barabasi.game aging.ba.game
#' @param n The number of vertices in the graph.
#' @param pa.exp The preferantial attachment exponent, see the details below.
#' @param aging.exp The exponent of the aging, usually a non-positive number,
#' see details below.
#' @param m The number of edges each new vertex creates (except the very first
#' vertex). This argument is used only if both the \code{out.dist} and
#' \code{out.seq} arguments are NULL.
#' @param aging.bin The number of bins to use for measuring the age of
#' vertices, see details below.
#' @param out.dist The discrete distribution to generate the number of edges to
#' add in each time step if \code{out.seq} is NULL. See details below.
#' @param out.seq The number of edges to add in each time step, a vector
#' containing as many elements as the number of vertices. See details below.
#' @param out.pref Logical constant, whether to include edges not initiated by
#' the vertex as a basis of preferential attachment. See details below.
#' @param directed Logical constant, whether to generate a directed graph. See
#' details below.
#' @param zero.deg.appeal The degree-dependent part of the
#' \sQuote{attractiveness} of the vertices with no adjacent edges. See also
#' details below.
#' @param zero.age.appeal The age-dependent part of the \sQuote{attrativeness}
#' of the vertices with age zero. It is usually zero, see details below.
#' @param deg.coef The coefficient of the degree-dependent
#' \sQuote{attractiveness}. See details below.
#' @param age.coef The coefficient of the age-dependent part of the
#' \sQuote{attractiveness}. See details below.
#' @param time.window Integer constant, if NULL only adjacent added in the last
#' \code{time.windows} time steps are counted as a basis of the preferential
#' attachment. See also details below.
#' @return A new graph.
#' @author Gabor Csardi \email{csardi.gabor@@gmail.com}
#' @seealso \code{\link{sample_pa}}, \code{\link{sample_gnp}}
#' @export
#' @keywords graphs
#' @examples
#'
#' # The maximum degree for graph with different aging exponents
#' g1 <- sample_pa_age(10000, pa.exp=1, aging.exp=0, aging.bin=1000)
#' g2 <- sample_pa_age(10000, pa.exp=1, aging.exp=-1, aging.bin=1000)
#' g3 <- sample_pa_age(10000, pa.exp=1, aging.exp=-3, aging.bin=1000)
#' max(degree(g1))
#' max(degree(g2))
#' max(degree(g3))
sample_pa_age <- function(n, pa.exp, aging.exp, m=NULL, aging.bin=300,
out.dist=NULL, out.seq=NULL,
out.pref=FALSE, directed=TRUE,
zero.deg.appeal=1, zero.age.appeal=0,
deg.coef=1, age.coef=1,
time.window=NULL) {
# Checks
if (! is.null(out.seq) && (!is.null(m) || !is.null(out.dist))) {
warning("if `out.seq' is given `m' and `out.dist' should be NULL")
m <- out.dist <- NULL
}
if (is.null(out.seq) && !is.null(out.dist) && !is.null(m)) {
warning("if `out.dist' is given `m' will be ignored")
m <- NULL
}
if (!is.null(out.seq) && length(out.seq) != n) {
stop("`out.seq' should be of length `n'")
}
if (!is.null(out.seq) && min(out.seq)<0) {
stop("negative elements in `out.seq'");
}
if (!is.null(m) && m<0) {
stop("`m' is negative")
}
if (!is.null(time.window) && time.window <= 0) {
stop("time window size should be positive")
}
if (!is.null(m) && m==0) {
warning("`m' is zero, graph will be empty")
}
if (pa.exp < 0) {
warning("preferential attachment is negative")
}
if (aging.exp > 0) {
warning("aging exponent is positive")
}
if (zero.deg.appeal <=0 ) {
warning("initial attractiveness is not positive")
}
if (is.null(m) && is.null(out.dist) && is.null(out.seq)) {
m <- 1
}
n <- as.numeric(n)
if (!is.null(m)) { m <- as.numeric(m) }
if (!is.null(out.dist)) { out.dist <- as.numeric(out.dist) }
if (!is.null(out.seq)) { out.seq <- as.numeric(out.seq) }
out.pref <- as.logical(out.pref)
if (!is.null(out.dist)) {
out.seq <- as.numeric(sample(0:(length(out.dist)-1), n,
replace=TRUE, prob=out.dist))
}
if (is.null(out.seq)) {
out.seq <- numeric()
}
on.exit( .Call("R_igraph_finalizer", PACKAGE="igraph") )
res <- if (is.null(time.window)) {
.Call("R_igraph_barabasi_aging_game", as.numeric(n),
as.numeric(pa.exp), as.numeric(aging.exp),
as.numeric(aging.bin), m, out.seq,
out.pref, as.numeric(zero.deg.appeal), as.numeric(zero.age.appeal),
as.numeric(deg.coef), as.numeric(age.coef), directed,
PACKAGE="igraph")
} else {
.Call("R_igraph_recent_degree_aging_game", as.numeric(n),
as.numeric(pa.exp), as.numeric(aging.exp),
as.numeric(aging.bin), m, out.seq, out.pref, as.numeric(zero.deg.appeal),
directed,
time.window,
PACKAGE="igraph")
}
if (igraph_opt("add.params")) {
res$name <- "Aging Barabasi graph"
res$pa.exp <- pa.exp
res$aging.exp <- aging.exp
res$m <- m
res$aging.bin <- aging.bin
res$out.pref <- out.pref
res$zero.deg.appeal <- zero.deg.appeal
res$zero.age.appeal <- zero.age.appeal
res$deg.coef <- deg.coef
res$age.coef <- age.coef
res$time.window <- if (is.null(time.window)) Inf else time.window
}
res
}
#' @rdname sample_pa_age
#' @param ... Passed to \code{sample_pa_age}.
#' @export
pa_age <- function(...) constructor_spec(sample_pa_age, ...)
## -----------------------------------------------------------------
#' Graph generation based on different vertex types
#'
#' These functions implement evolving network models based on different vertex
#' types.
#'
#' For \code{sample_traits_callaway} the simulation goes like this: in each
#' discrete time step a new vertex is added to the graph. The type of this
#' vertex is generated based on \code{type.dist}. Then two vertices are
#' selected uniformly randomly from the graph. The probability that they will
#' be connected depends on the types of these vertices and is taken from
#' \code{pref.matrix}. Then another two vertices are selected and this is
#' repeated \code{edges.per.step} times in each time step.
#'
#' For \code{sample_traits} the simulation goes like this: a single vertex is
#' added at each time step. This new vertex tries to connect to \code{k}
#' vertices in the graph. The probability that such a connection is realized
#' depends on the types of the vertices involved and is taken from
#' \code{pref.matrix}.
#'
#' @aliases sample_traits_callaway sample_traits callaway.traits.game
#' establishment.game
#' @param nodes The number of vertices in the graph.
#' @param types The number of different vertex types.
#' @param edge.per.step The number of edges to add to the graph per time step.
#' @param type.dist The distribution of the vertex types. This is assumed to be
#' stationary in time.
#' @param pref.matrix A matrix giving the preferences of the given vertex
#' types. These should be probabilities, ie. numbers between zero and one.
#' @param directed Logical constant, whether to generate directed graphs.
#' @param k The number of trials per time step, see details below.
#' @return A new graph object.
#' @author Gabor Csardi \email{csardi.gabor@@gmail.com}
#' @export
#' @keywords graphs
#' @examples
#'
#' # two types of vertices, they like only themselves
#' g1 <- sample_traits_callaway(1000, 2, pref.matrix=matrix( c(1,0,0,1), nc=2))
#' g2 <- sample_traits(1000, 2, k=2, pref.matrix=matrix( c(1,0,0,1), nc=2))
sample_traits_callaway <- function(nodes, types, edge.per.step=1,
type.dist=rep(1, types),
pref.matrix=matrix(1, types, types),
directed=FALSE) {
on.exit( .Call("R_igraph_finalizer", PACKAGE="igraph") )
res <- .Call("R_igraph_callaway_traits_game", as.double(nodes),
as.double(types), as.double(edge.per.step),
as.double(type.dist), matrix(as.double(pref.matrix), types,
types),
as.logical(directed),
PACKAGE="igraph")
if (igraph_opt("add.params")) {
res$name <- "Trait-based Callaway graph"
res$types <- types
res$edge.per.step <- edge.per.step
res$type.dist <- type.dist
res$pref.matrix <- pref.matrix
}
res
}
#' @rdname sample_traits_callaway
#' @param ... Passed to the constructor, \code{sample_traits} or
#' \code{sample_traits_callaway}.
#' @export
traits_callaway <- function(...) constructor_spec(sample_traits_callaway, ...)
#' @rdname sample_traits_callaway
#' @export
sample_traits <- function(nodes, types, k=1, type.dist=rep(1, types),
pref.matrix=matrix(1, types, types),
directed=FALSE) {
on.exit( .Call("R_igraph_finalizer", PACKAGE="igraph") )
res <- .Call("R_igraph_establishment_game", as.double(nodes),
as.double(types), as.double(k), as.double(type.dist),
matrix(as.double(pref.matrix), types, types),
as.logical(directed),
PACKAGE="igraph")
if (igraph_opt("add.params")) {
res$name <- "Trait-based growing graph"
res$types <- types
res$k <- k
res$type.dist <- type.dist
res$pref.matrix <- pref.matrix
}
res
}
#' @rdname sample_traits_callaway
#' @export
traits <- function(...) constructor_spec(sample_traits, ...)
## -----------------------------------------------------------------
#' Geometric random graphs
#'
#' Generate a random graph based on the distance of random point on a unit
#' square
#'
#' First a number of points are dropped on a unit square, these points
#' correspond to the vertices of the graph to create. Two points will be
#' connected with an undirected edge if they are closer to each other in
#' Euclidean norm than a given radius. If the \code{torus} argument is
#' \code{TRUE} then a unit area torus is used instead of a square.
#'
#' @aliases grg.game
#' @param nodes The number of vertices in the graph.
#' @param radius The radius within which the vertices will be connected by an
#' edge.
#' @param torus Logical constant, whether to use a torus instead of a square.
#' @param coords Logical scalar, whether to add the positions of the vertices
#' as vertex attributes called \sQuote{\code{x}} and \sQuote{\code{y}}.
#' @return A graph object. If \code{coords} is \code{TRUE} then with vertex
#' attributes \sQuote{\code{x}} and \sQuote{\code{y}}.
#' @author Gabor Csardi \email{csardi.gabor@@gmail.com}, first version was
#' written by Keith Briggs (\url{http://keithbriggs.info/}).
#' @seealso \code{\link{sample_gnp}}
#' @export
#' @keywords graphs
#' @examples
#'
#' g <- sample_grg(1000, 0.05, torus=FALSE)
#' g2 <- sample_grg(1000, 0.05, torus=TRUE)
#'
sample_grg <- function(nodes, radius, torus=FALSE, coords=FALSE) {
on.exit( .Call("R_igraph_finalizer", PACKAGE="igraph") )
res <- .Call("R_igraph_grg_game", as.double(nodes), as.double(radius),
as.logical(torus), as.logical(coords),
PACKAGE="igraph")
if (coords) {
V(res[[1]])$x <- res[[2]]
V(res[[1]])$y <- res[[3]]
}
if (igraph_opt("add.params")) {
res[[1]]$name <- "Geometric random graph"
res[[1]]$radius <- radius
res[[1]]$torus <- torus
}
res[[1]]
}
#' @rdname sample_grg
#' @param ... Passed to \code{sample_grg}.
#' @export
grg <- function(...) constructor_spec(sample_grg, ...)
## -----------------------------------------------------------------
#' Trait-based random generation
#'
#' Generation of random graphs based on different vertex types.
#'
#' Both models generate random graphs with given vertex types. For
#' \code{sample_pref} the probability that two vertices will be connected
#' depends on their type and is given by the \sQuote{pref.matrix} argument.
#' This matrix should be symmetric to make sense but this is not checked. The
#' distribution of the different vertes types is given by the
#' \sQuote{type.dist} vector.
#'
#' For \code{sample_asym_pref} each vertex has an in-type and an
#' out-type and a directed graph is created. The probability that a directed
#' edge is realized from a vertex with a given out-type to a vertex with a
#' given in-type is given in the \sQuote{pref.matrix} argument, which can be
#' asymmetric. The joint distribution for the in- and out-types is given in the
#' \sQuote{type.dist.matrix} argument.
#'
#' @aliases sample_pref sample_asym_pref preference.game asymmetric.preference.game
#' @param nodes The number of vertices in the graphs.
#' @param types The number of different vertex types.
#' @param type.dist The distribution of the vertex types, a numeric vector of
#' length \sQuote{types} containing non-negative numbers. The vector will be
#' normed to obtain probabilities.
#' @param fixed.sizes Fix the number of vertices with a given vertex type
#' label. The \code{type.dist} argument gives the group sizes (i.e. number of
#' vertices with the different labels) in this case.
#' @param type.dist.matrix The joint distribution of the in- and out-vertex
#' types.
#' @param pref.matrix A square matrix giving the preferences of the vertex
#' types. The matrix has \sQuote{types} rows and columns.
#' @param directed Logical constant, whether to create a directed graph.
#' @param loops Logical constant, whether self-loops are allowed in the graph.
#' @return An igraph graph.
#' @author Tamas Nepusz \email{ntamas@@gmail.com} and Gabor Csardi
#' \email{csardi.gabor@@gmail.com} for the R interface
#' @seealso \code{\link{sample_traits}}.
#' \code{\link{sample_traits_callaway}}
#' @export
#' @keywords graphs
#' @examples
#'
#' pf <- matrix( c(1, 0, 0, 1), nr=2)
#' g <- sample_pref(20, 2, pref.matrix=pf)
#' \dontrun{tkplot(g, layout=layout_with_fr)}
#'
#' pf <- matrix( c(0, 1, 0, 0), nr=2)
#' g <- sample_asym_pref(20, 2, pref.matrix=pf)
#' \dontrun{tkplot(g, layout=layout_in_circle)}
#'
sample_pref <- function(nodes, types, type.dist=rep(1, types),
fixed.sizes=FALSE,
pref.matrix=matrix(1, types, types),
directed=FALSE, loops=FALSE) {
if (nrow(pref.matrix) != types || ncol(pref.matrix) != types) {
stop("Invalid size for preference matrix")
}
on.exit( .Call("R_igraph_finalizer", PACKAGE="igraph") )
res <- .Call("R_igraph_preference_game", as.double(nodes),
as.double(types),
as.double(type.dist), as.logical(fixed.sizes),
matrix(as.double(pref.matrix), types, types),
as.logical(directed), as.logical(loops),
PACKAGE="igraph")
V(res[[1]])$type <- res[[2]]+1
if (igraph_opt("add.params")) {
res[[1]]$name <- "Preference random graph"
res[[1]]$types <- types
res[[1]]$type.dist <- type.dist
res[[1]]$fixed.sizes <- fixed.sizes
res[[1]]$pref.matrix <- pref.matrix
res[[1]]$loops <- loops
}
res[[1]]
}
#' @rdname sample_pref
#' @param ... Passed to the constructor, \code{sample_pref} or
#' \code{sample_asym_pref}.
#' @export
pref <- function(...) constructor_spec(sample_pref, ...)
#' @rdname sample_pref
#' @export
sample_asym_pref <- function(nodes, types,
type.dist.matrix=matrix(1, types,types),
pref.matrix=matrix(1, types, types),
loops=FALSE) {
if (nrow(pref.matrix) != types || ncol(pref.matrix) != types) {
stop("Invalid size for preference matrix")
}
if (nrow(type.dist.matrix) != types || ncol(type.dist.matrix) != types) {
stop("Invalid size for type distribution matrix")
}
on.exit( .Call("R_igraph_finalizer", PACKAGE="igraph") )
res <- .Call("R_igraph_asymmetric_preference_game",
as.double(nodes), as.double(types),
matrix(as.double(type.dist.matrix), types, types),
matrix(as.double(pref.matrix), types, types),
as.logical(loops),
PACKAGE="igraph")
if (igraph_opt("add.params")) {
res$name <- "Asymmetric preference random graph"
res$types <- types
res$type.dist.matrix <- type.dist.matrix
res$pref.matrix <- pref.matrix
res$loops <- loops
}
res
}
#' @rdname sample_pref
#' @export
asym_pref <- function(...) constructor_spec(sample_asym_pref, ...)
## -----------------------------------------------------------------
connect <- function(graph, order, mode=c("all", "out", "in", "total")) {
if (!is_igraph(graph)) {
stop("Not a graph object")
}
mode <- igraph.match.arg(mode)
mode <- switch(mode, "out"=1, "in"=2, "all"=3, "total"=3)
on.exit( .Call("R_igraph_finalizer", PACKAGE="igraph") )
.Call("R_igraph_connect_neighborhood", graph, as.numeric(order),
as.numeric(mode),
PACKAGE="igraph")
}
#' The Watts-Strogatz small-world model
#'
#' Generate a graph according to the Watts-Strogatz network model.
#'
#' First a lattice is created with the given \code{dim}, \code{size} and
#' \code{nei} arguments. Then the edges of the lattice are rewired uniformly
#' randomly with probability \code{p}.
#'
#' Note that this function might create graphs with loops and/or multiple
#' edges. You can use \code{\link{simplify}} to get rid of these.
#'
#' @aliases watts.strogatz.game
#' @param dim Integer constant, the dimension of the starting lattice.
#' @param size Integer constant, the size of the lattice along each dimension.
#' @param nei Integer constant, the neighborhood within which the vertices of
#' the lattice will be connected.
#' @param p Real constant between zero and one, the rewiring probability.
#' @param loops Logical scalar, whether loops edges are allowed in the
#' generated graph.
#' @param multiple Logical scalar, whether multiple edges are allowed int the
#' generated graph.
#' @return A graph object.
#' @author Gabor Csardi \email{csardi.gabor@@gmail.com}
#' @seealso \code{\link{make_lattice}}, \code{\link{rewire}}
#' @references Duncan J Watts and Steven H Strogatz: Collective dynamics of
#' \sQuote{small world} networks, Nature 393, 440-442, 1998.
#' @export
#' @keywords graphs
#' @examples
#'
#' g <- sample_smallworld(1, 100, 5, 0.05)
#' mean_distance(g)
#' transitivity(g, type="average")
#'
sample_smallworld <- function(dim, size, nei, p, loops=FALSE,
multiple=FALSE) {
on.exit( .Call("R_igraph_finalizer", PACKAGE="igraph") )
res <- .Call("R_igraph_watts_strogatz_game", as.numeric(dim),
as.numeric(size), as.numeric(nei), as.numeric(p),
as.logical(loops), as.logical(multiple),
PACKAGE="igraph")
if (igraph_opt("add.params")) {
res$name <- "Watts-Strogatz random graph"
res$dim <- dim
res$size <- size
res$nei <- nei
res$p <- p
res$loops <- loops
res$multiple <- multiple
}
res
}
#' @rdname sample_smallworld
#' @param ... Passed to \code{sample_smallworld}.
#' @export
smallworld <- function(...) constructor_spec(sample_smallworld, ...)
## -----------------------------------------------------------------
#' Random citation graphs
#'
#' \code{sample_last_cit} creates a graph, where vertices age, and
#' gain new connections based on how long ago their last citation
#' happened.
#'
#' \code{sample_cit_cit_types} is a stochastic block model where the
#' graph is growing.
#'
#' \code{sample_cit_types} is similarly a growing stochastic block model,
#' but the probability of an edge depends on the (potentiall) cited
#' vertex only.
#'
#' @aliases cited.type.game sample_cit_types citing.cited.type.game
#' sample_cit_cit_types sample_last_cit lastcit.game
#' @param n Number of vertices.
#' @param edges Number of edges per step.
#' @param agebins Number of aging bins.
#' @param pref Vector (\code{sample_last_cit} and \code{sample_cit_types} or
#' matrix (\code{sample_cit_cit_types}) giving the (unnormalized) citation
#' probabilities for the different vertex types.
#' @param directed Logical scalar, whether to generate directed networks.
#' @param types Vector of length \sQuote{\code{n}}, the types of the vertices.
#' Types are numbered from zero.
#' @param attr Logical scalar, whether to add the vertex types to the generated
#' graph as a vertex attribute called \sQuote{\code{type}}.
#' @return A new graph.
#' @author Gabor Csardi \email{csardi.gabor@@gmail.com}
#' @keywords graphs
#' @export
sample_last_cit <- function(n, edges=1, agebins=n/7100, pref=(1:(agebins+1))^-3,
directed=TRUE) {
on.exit( .Call("R_igraph_finalizer", PACKAGE="igraph") )
res <- .Call("R_igraph_lastcit_game", as.numeric(n), as.numeric(edges),
as.numeric(agebins),
as.numeric(pref), as.logical(directed),
PACKAGE="igraph")
if (igraph_opt("add.params")) {
res$name <- "Random citation graph based on last citation"
res$edges <- edges
res$agebins <- agebins
}
res
}
#' @rdname sample_last_cit
#' @param ... Passed to the actual constructor.
#' @export
last_cit <- function(...) constructor_spec(sample_last_cit, ...)
#' @rdname sample_last_cit
#' @export
sample_cit_types <- function(n, edges=1, types=rep(0, n),
pref=rep(1, length(types)),
directed=TRUE, attr=TRUE) {
on.exit( .Call("R_igraph_finalizer", PACKAGE="igraph") )
res <- .Call("R_igraph_cited_type_game", as.numeric(n), as.numeric(edges),
as.numeric(types), as.numeric(pref), as.logical(directed),
PACKAGE="igraph")
if (attr) {
V(res)$type <- types
}
if (igraph_opt("add.params")) {
res$name <- "Random citation graph (cited type)"
res$edges <- edges
}
res
}
#' @rdname sample_last_cit
#' @export
cit_types <- function(...) constructor_spec(sample_cit_types, ...)
#' @rdname sample_last_cit
#' @export
sample_cit_cit_types <- function(n, edges=1, types=rep(0, n),
pref=matrix(1, nrow=length(types),
ncol=length(types)),
directed=TRUE, attr=TRUE) {
pref <- structure(as.numeric(pref), dim=dim(pref))
on.exit( .Call("R_igraph_finalizer", PACKAGE="igraph") )
res <- .Call("R_igraph_citing_cited_type_game", as.numeric(n),
as.numeric(types), pref, as.numeric(edges),
as.logical(directed),
PACKAGE="igraph")
if (attr) {
V(res)$type <- types
}
if (igraph_opt("add.params")) {
res$name <- "Random citation graph (citing & cited type)"
res$edges <- edges
}
res
}
#' @rdname sample_last_cit
#' @export
cit_cit_types <- function(...) constructor_spec(sample_cit_cit_types, ...)
## -----------------------------------------------------------------
#' Bipartite random graphs
#'
#' Generate bipartite graphs using the Erdos-Renyi model
#'
#' Similarly to unipartite (one-mode) networks, we can define the $G(n,p)$, and
#' $G(n,m)$ graph classes for bipartite graphs, via their generating process.
#' In $G(n,p)$ every possible edge between top and bottom vertices is realized
#' with probablity $p$, independently of the rest of the edges. In $G(n,m)$, we
#' uniformly choose $m$ edges to realize.
#'
#' @aliases bipartite.random.game
#' @param n1 Integer scalar, the number of bottom vertices.
#' @param n2 Integer scalar, the number of top vertices.
#' @param type Character scalar, the type of the graph, \sQuote{gnp} creates a
#' $G(n,p)$ graph, \sQuote{gnm} creates a $G(n,m)$ graph. See details below.
#' @param p Real scalar, connection probability for $G(n,p)$ graphs. Should not
#' be given for $G(n,m)$ graphs.
#' @param m Integer scalar, the number of edges for $G(n,p)$ graphs. Should not
#' be given for $G(n,p)$ graphs.
#' @param directed Logical scalar, whether to create a directed graph. See also
#' the \code{mode} argument.
#' @param mode Character scalar, specifies how to direct the edges in directed
#' graphs. If it is \sQuote{out}, then directed edges point from bottom
#' vertices to top vertices. If it is \sQuote{in}, edges point from top
#' vertices to bottom vertices. \sQuote{out} and \sQuote{in} do not generate
#' mutual edges. If this argument is \sQuote{all}, then each edge direction is
#' considered independently and mutual edges might be generated. This argument
#' is ignored for undirected graphs.
#' @return A bipartite igraph graph.
#' @author Gabor Csardi \email{csardi.gabor@@gmail.com}
#' @seealso \code{\link{sample_gnp}} for the unipartite version.
#' @export
#' @keywords graphs
#' @examples
#'
#' ## empty graph
#' sample_bipartite(10, 5, p=0)
#'
#' ## full graph
#' sample_bipartite(10, 5, p=1)
#'
#' ## random bipartite graph
#' sample_bipartite(10, 5, p=.1)
#'
#' ## directed bipartite graph, G(n,m)
#' sample_bipartite(10, 5, type="Gnm", m=20, directed=TRUE, mode="all")
#'
sample_bipartite <- function(n1, n2, type=c("gnp", "gnm"), p, m,
directed=FALSE, mode=c("out", "in", "all")) {
n1 <- as.integer(n1)
n2 <- as.integer(n2)
type <- igraph.match.arg(type)
if (!missing(p)) { p <- as.numeric(p) }
if (!missing(m)) { m <- as.integer(m) }
directed <- as.logical(directed)
mode <- switch(igraph.match.arg(mode), "out"=1, "in"=2, "all"=3)
if (type=="gnp" && missing(p)) {
stop("Connection probability `p' is not given for Gnp graph")
}
if (type=="gnp" && !missing(m)) {
warning("Number of edges `m' is ignored for Gnp graph")
}
if (type=="gnm" && missing(m)) {
stop("Number of edges `m' is not given for Gnm graph")
}
if (type=="gnm" && !missing(p)) {
warning("Connection probability `p' is ignored for Gnp graph")
}
on.exit( .Call("R_igraph_finalizer", PACKAGE="igraph") )
if (type=="gnp") {
res <- .Call("R_igraph_bipartite_game_gnp", n1, n2, p, directed, mode,
PACKAGE="igraph")
res <- set_vertex_attr(res$graph, "type", value=res$types)
res$name <- "Bipartite Gnp random graph"
res$p <- p
} else if (type=="gnm") {
res <- .Call("R_igraph_bipartite_game_gnm", n1, n2, m, directed, mode,
PACKAGE="igraph")
res <- set_vertex_attr(res$graph, "type", value=res$types)
res$name <- "Bipartite Gnm random graph"
res$m <- m
}
res
}
#' @rdname sample_bipartite
#' @param ... Passed to \code{sample_bipartite}.
#' @export
bipartite <- function(...) constructor_spec(sample_bipartite, ...)
#' Sample stochastic block model
#'
#' Sampling from the stochastic block model of networks
#'
#' This function samples graphs from a stochastic block model by (doing the
#' equivalent of) Bernoulli trials for each potential edge with the
#' probabilities given by the Bernoulli rate matrix, \code{pref.matrix}.
#'
#' @aliases sample_sbm sbm.game sbm
#' @param n Number of vertices in the graph.
#' @param pref.matrix The matrix giving the Bernoulli rates. This is a
#' \eqn{K\times K}{KxK} matrix, where \eqn{K} is the number of groups. The
#' probability of creating an edge between vertices from groups \eqn{i} and
#' \eqn{j} is given by element \eqn{(i,j)}. For undirected graphs, this matrix
#' must be symmetric.
#' @param block.sizes Numeric vector giving the number of vertices in each
#' group. The sum of the vector must match the number of vertices.
#' @param directed Logical scalar, whether to generate a directed graph.
#' @param loops Logical scalar, whether self-loops are allowed in the graph.
#' @param \dots Passed to \code{sample_sbm}.
#' @return An igraph graph.
#' @author Gabor Csardi \email{csardi.gabor@@gmail.com}
#' @seealso \code{\link{sample_gnp}}, \code{\link{sample_gnm}}
#' @references Faust, K., & Wasserman, S. (1992a). Blockmodels: Interpretation
#' and evaluation. \emph{Social Networks}, 14, 5--61.
#' @keywords graphs
#' @examples
#'
#' ## Two groups with not only few connection between groups
#' pm <- cbind( c(.1, .001), c(.001, .05) )
#' g <- sample_sbm(1000, pref.matrix=pm, block.sizes=c(300,700))
#' g
#' @export
sample_sbm <- sample_sbm
#' @export
sbm <- function(...) constructor_spec(sample_sbm, ...)
## -----------------------------------------------------------------
#' Sample the hierarchical stochastic block model
#'
#' Sampling from a hierarchical stochastic block model of networks.
#'
#' The function generates a random graph according to the hierarchical
#' stochastic block model.
#'
#' @aliases sample_hierarchical_sbm hierarchical_sbm
#' @param n Integer scalar, the number of vertices.
#' @param m Integer scalar, the number of vertices per block. \code{n / m} must
#' be integer. Alternatively, an integer vector of block sizes, if not all the
#' blocks have equal sizes.
#' @param rho Numeric vector, the fraction of vertices per cluster, within a
#' block. Must sum up to 1, and \code{rho * m} must be integer for all elements
#' of rho. Alternatively a list of rho vectors, one for each block, if they are
#' not the same for all blocks.
#' @param C A square, symmetric numeric matrix, the Bernoulli rates for the
#' clusters within a block. Its size must mach the size of the \code{rho}
#' vector. Alternatively, a list of square matrices, if the Bernoulli rates
#' differ in different blocks.
#' @param p Numeric scalar, the Bernoulli rate of connections between vertices
#' in different blocks.
#' @param \dots Passed to \code{sample_hierarchical_sbm}.
#' @return An igraph graph.
#' @author Gabor Csardi \email{csardi.gabor@@gmail.com}
#' @seealso \code{\link{sbm.game}}
#' @keywords graphs, random graphs
#' @examples
#'
#' ## Ten blocks with three clusters each
#' C <- matrix(c(1 , 3/4, 0,
#' 3/4, 0, 3/4,
#' 0 , 3/4, 3/4), nrow=3)
#' g <- sample_hierarchical_sbm(100, 10, rho=c(3, 3, 4)/10, C=C, p=1/20)
#' g
#' if (require(Matrix)) { image(g[]) }
#' @export
sample_hierarchical_sbm <- function(n, m, rho, C, p) {
mlen <- length(m)
rholen <- if (is.list(rho)) length(rho) else 1
Clen <- if (is.list(C)) length(C) else 1
commonlen <- unique(c(mlen, rholen, Clen))
if (length(commonlen) == 1 && commonlen == 1) {
hsbm.1.game(n, m, rho, C, p)
} else {
commonlen <- setdiff(commonlen, 1)
if (length(commonlen) != 1) {
stop("Lengths of `m', `rho' and `C' must match")
}
m <- rep(m, length.out=commonlen)
rho <- if (is.list(rho)) {
rep(rho, length.out=commonlen)
} else {
rep(list(rho), length.out=commonlen)
}
C <- if (is.list(C)) {
rep(C, length.out=commonlen)
} else {
rep(list(C), length.out=commonlen)
}
hsbm.list.game(n, m, rho, C, p)
}
}
#' @export
hierarchical_sbm <- function(...)
constructor_spec(sample_hierarchical_sbm, ...)
## -----------------------------------------------------------------
#' Generate random graphs according to the random dot product graph model
#'
#' In this model, each vertex is represented by a latent position vector.
#' Probability of an edge between two vertices are given by the dot product of
#' their latent position vectors.
#'
#' The dot product of the latent position vectors should be in the [0,1]
#' interval, otherwise a warning is given. For negative dot products, no edges
#' are added; dot products that are larger than one always add an edge.
#'
#' @aliases sample_dot_product dot_product
#' @param vecs A numeric matrix in which each latent position vector is a
#' column.
#' @param directed A logical scalar, TRUE if the generated graph should be
#' directed.
#' @param \dots Passed to \code{sample_dot_product}.
#' @return An igraph graph object which is the generated random dot product
#' graph.
#' @author Gabor Csardi \email{csardi.gabor@@gmail.com}
#' @seealso \code{\link{sample_dirichlet}}, \code{\link{sample_sphere_surface}}
#' and \code{\link{sample_sphere_volume}} for sampling position vectors.
#' @references Christine Leigh Myers Nickel: Random dot product graphs, a model
#' for social networks. Dissertation, Johns Hopkins University, Maryland, USA,
#' 2006.
#' @keywords graphs
#' @examples
#'
#' ## A randomly generated graph
#' lpvs <- matrix(rnorm(200), 20, 10)
#' lpvs <- apply(lpvs, 2, function(x) { return (abs(x)/sqrt(sum(x^2))) })
#' g <- sample_dot_product(lpvs)
#' g
#'
#' ## Sample latent vectors from the surface of the unit sphere
#' lpvs2 <- sample_sphere_surface(dim=5, n=20)
#' g2 <- sample_dot_product(lpvs2)
#' g2
#' @export
sample_dot_product <- sample_dot_product
#' @export
dot_product <- function(...) constructor_spec(sample_dot_product, ...)
#' A graph with subgraphs that are each a random graph.
#'
#' Create a number of Erdos-Renyi random graphs with identical parameters, and
#' connect them with the specified number of edges.
#'
#'
#' @aliases interconnected.islands.game sample_islands
#' @param islands.n The number of islands in the graph.
#' @param islands.size The size of islands in the graph.
#' @param islands.pin The probability to create each possible edge into each
#' island.
#' @param n.inter The number of edges to create between two islands.
#' @return An igraph graph.
#' @author Samuel Thiriot (\url{https://www.linkedin.com/in/samthiriot})
#' @seealso \code{\link{sample_gnp}}
#' @keywords graphs
#' @examples
#'
#' g <- sample_islands(3, 10, 5/10, 1)
#' oc <- cluster_optimal(g)
#' oc
#' @export
sample_islands <- sample_islands
#' Create a random regular graph
#'
#' Generate a random graph where each vertex has the same degree.
#'
#' This game generates a directed or undirected random graph where the degrees
#' of vertices are equal to a predefined constant k. For undirected graphs, at
#' least one of k and the number of vertices must be even.
#'
#' The game simply uses \code{\link{sample_degseq}} with appropriately
#' constructed degree sequences.
#'
#' @aliases sample_k_regular k.regular.game
#' @param no.of.nodes Integer scalar, the number of vertices in the generated
#' graph.
#' @param k Integer scalar, the degree of each vertex in the graph, or the
#' out-degree and in-degree in a directed graph.
#' @param directed Logical scalar, whether to create a directed graph.
#' @param multiple Logical scalar, whether multiple edges are allowed.
#' @return An igraph graph.
#' @author Tamas Nepusz \email{ntamas@@gmail.com}
#' @seealso \code{\link{sample_degseq}} for a generator with prescribed degree
#' sequence.
#' @keywords graphs
#' @examples
#'
#' ## A simple ring
#' ring <- sample_k_regular(10, 2)
#' plot(ring)
#'
#' ## k-regular graphs on 10 vertices, with k=1:9
#' k10 <- lapply(1:9, sample_k_regular, no.of.nodes=10)
#'
#' layout(matrix(1:9, nrow=3, byrow=TRUE))
#' sapply(k10, plot, vertex.label=NA)
#' @export
sample_k_regular <- sample_k_regular
#' Random graphs from vertex fitness scores
#'
#' This function generates a non-growing random graph with edge probabilities
#' proportional to node fitness scores.
#'
#' This game generates a directed or undirected random graph where the
#' probability of an edge between vertices \eqn{i} and \eqn{j} depends on the
#' fitness scores of the two vertices involved. For undirected graphs, each
#' vertex has a single fitness score. For directed graphs, each vertex has an
#' out- and an in-fitness, and the probability of an edge from \eqn{i} to
#' \eqn{j} depends on the out-fitness of vertex \eqn{i} and the in-fitness of
#' vertex \eqn{j}.
#'
#' The generation process goes as follows. We start from \eqn{N} disconnected
#' nodes (where \eqn{N} is given by the length of the fitness vector). Then we
#' randomly select two vertices \eqn{i} and \eqn{j}, with probabilities
#' proportional to their fitnesses. (When the generated graph is directed,
#' \eqn{i} is selected according to the out-fitnesses and \eqn{j} is selected
#' according to the in-fitnesses). If the vertices are not connected yet (or if
#' multiple edges are allowed), we connect them; otherwise we select a new
#' pair. This is repeated until the desired number of links are created.
#'
#' It can be shown that the \emph{expected} degree of each vertex will be
#' proportional to its fitness, although the actual, observed degree will not
#' be. If you need to generate a graph with an exact degree sequence, consider
#' \code{\link{sample_degseq}} instead.
#'
#' This model is commonly used to generate static scale-free networks. To
#' achieve this, you have to draw the fitness scores from the desired power-law
#' distribution. Alternatively, you may use \code{\link{sample_fitness_pl}}
#' which generates the fitnesses for you with a given exponent.
#'
#' @aliases sample_fitness static.fitness.game
#' @param no.of.edges The number of edges in the generated graph.
#' @param fitness.out A numeric vector containing the fitness of each vertex.
#' For directed graphs, this specifies the out-fitness of each vertex.
#' @param fitness.in If \code{NULL} (the default), the generated graph will be
#' undirected. If not \code{NULL}, then it should be a numeric vector and it
#' specifies the in-fitness of each vertex.
#'
#' If this argument is not \code{NULL}, then a directed graph is generated,
#' otherwise an undirected one.
#' @param loops Logical scalar, whether to allow loop edges in the graph.
#' @param multiple Logical scalar, whether to allow multiple edges in the
#' graph.
#' @return An igraph graph, directed or undirected.
#' @author Tamas Nepusz \email{ntamas@@gmail.com}
#' @references Goh K-I, Kahng B, Kim D: Universal behaviour of load
#' distribution in scale-free networks. \emph{Phys Rev Lett} 87(27):278701,
#' 2001.
#' @keywords graphs
#' @examples
#'
#' N <- 10000
#' g <- sample_fitness(5*N, sample((1:50)^-2, N, replace=TRUE))
#' degree_distribution(g)
#' \dontrun{plot(degree_distribution(g, cumulative=TRUE), log="xy")}
sample_fitness <- sample_fitness
#' Scale-free random graphs, from vertex fitness scores
#'
#' This function generates a non-growing random graph with expected power-law
#' degree distributions.
#'
#' This game generates a directed or undirected random graph where the degrees
#' of vertices follow power-law distributions with prescribed exponents. For
#' directed graphs, the exponents of the in- and out-degree distributions may
#' be specified separately.
#'
#' The game simply uses \code{\link{sample_fitness}} with appropriately
#' constructed fitness vectors. In particular, the fitness of vertex \eqn{i} is
#' \eqn{i^{-alpha}}{i^(-alpha)}, where \eqn{alpha = 1/(gamma-1)} and gamma is
#' the exponent given in the arguments.
#'
#' To remove correlations between in- and out-degrees in case of directed
#' graphs, the in-fitness vector will be shuffled after it has been set up and
#' before \code{\link{sample_fitness}} is called.
#'
#' Note that significant finite size effects may be observed for exponents
#' smaller than 3 in the original formulation of the game. This function
#' provides an argument that lets you remove the finite size effects by
#' assuming that the fitness of vertex \eqn{i} is
#' \eqn{(i+i_0-1)^{-alpha}}{(i+i0-1)^(-alpha)} where \eqn{i_0}{i0} is a
#' constant chosen appropriately to ensure that the maximum degree is less than
#' the square root of the number of edges times the average degree; see the
#' paper of Chung and Lu, and Cho et al for more details.
#'
#' @aliases sample_fitness_pl static.power.law.game
#' @param no.of.nodes The number of vertices in the generated graph.
#' @param no.of.edges The number of edges in the generated graph.
#' @param exponent.out Numeric scalar, the power law exponent of the degree
#' distribution. For directed graphs, this specifies the exponent of the
#' out-degree distribution. It must be greater than or equal to 2. If you pass
#' \code{Inf} here, you will get back an Erdos-Renyi random network.
#' @param exponent.in Numeric scalar. If negative, the generated graph will be
#' undirected. If greater than or equal to 2, this argument specifies the
#' exponent of the in-degree distribution. If non-negative but less than 2, an
#' error will be generated.
#' @param loops Logical scalar, whether to allow loop edges in the generated
#' graph.
#' @param multiple Logical scalar, whether to allow multiple edges in the
#' generated graph.
#' @param finite.size.correction Logical scalar, whether to use the proposed
#' finite size correction of Cho et al., see references below.
#' @return An igraph graph, directed or undirected.
#' @author Tamas Nepusz \email{ntamas@@gmail.com}
#' @references Goh K-I, Kahng B, Kim D: Universal behaviour of load
#' distribution in scale-free networks. \emph{Phys Rev Lett} 87(27):278701,
#' 2001.
#'
#' Chung F and Lu L: Connected components in a random graph with given degree
#' sequences. \emph{Annals of Combinatorics} 6, 125-145, 2002.
#'
#' Cho YS, Kim JS, Park J, Kahng B, Kim D: Percolation transitions in
#' scale-free networks under the Achlioptas process. \emph{Phys Rev Lett}
#' 103:135702, 2009.
#' @keywords graphs
#' @examples
#'
#' g <- sample_fitness_pl(10000, 30000, 2.2, 2.3)
#' \dontrun{plot(degree_distribution(g, cumulative=TRUE, mode="out"), log="xy")}
sample_fitness_pl <- sample_fitness_pl
#' Forest Fire Network Model
#'
#' This is a growing network model, which resembles of how the forest fire
#' spreads by igniting trees close by.
#'
#' The forest fire model intends to reproduce the following network
#' characteristics, observed in real networks: \itemize{ \item Heavy-tailed
#' in-degree distribution. \item Heavy-tailed out-degree distribution. \item
#' Communities. \item Densification power-law. The network is densifying in
#' time, according to a power-law rule. \item Shrinking diameter. The diameter
#' of the network decreases in time. }
#'
#' The network is generated in the following way. One vertex is added at a
#' time. This vertex connects to (cites) \code{ambs} vertices already present
#' in the network, chosen uniformly random. Now, for each cited vertex \eqn{v}
#' we do the following procedure: \enumerate{ \item We generate two random
#' number, \eqn{x} and \eqn{y}, that are geometrically distributed with means
#' \eqn{p/(1-p)} and \eqn{rp(1-rp)}. (\eqn{p} is \code{fw.prob}, \eqn{r} is
#' \code{bw.factor}.) The new vertex cites \eqn{x} outgoing neighbors and
#' \eqn{y} incoming neighbors of \eqn{v}, from those which are not yet cited by
#' the new vertex. If there are less than \eqn{x} or \eqn{y} such vertices
#' available then we cite all of them. \item The same procedure is applied to
#' all the newly cited vertices. }
#'
#' @aliases sample_forestfire forest.fire.game
#' @param nodes The number of vertices in the graph.
#' @param fw.prob The forward burning probability, see details below.
#' @param bw.factor The backward burning ratio. The backward burning
#' probability is calculated as \code{bw.factor*fw.prob}.
#' @param ambs The number of ambassador vertices.
#' @param directed Logical scalar, whether to create a directed graph.
#' @return A simple graph, possibly directed if the \code{directed} argument is
#' \code{TRUE}.
#' @note The version of the model in the published paper is incorrect in the
#' sense that it cannot generate the kind of graphs the authors claim. A
#' corrected version is available from
#' \url{http://www.cs.cmu.edu/~jure/pubs/powergrowth-tkdd.pdf}, our
#' implementation is based on this.
#' @author Gabor Csardi \email{csardi.gabor@@gmail.com}
#' @seealso \code{\link{barabasi.game}} for the basic preferential attachment
#' model.
#' @references Jure Leskovec, Jon Kleinberg and Christos Faloutsos. Graphs over
#' time: densification laws, shrinking diameters and possible explanations.
#' \emph{KDD '05: Proceeding of the eleventh ACM SIGKDD international
#' conference on Knowledge discovery in data mining}, 177--187, 2005.
#' @keywords graphs
#' @examples
#'
#' g <- sample_forestfire(10000, fw.prob=0.37, bw.factor=0.32/0.37)
#' dd1 <- degree_distribution(g, mode="in")
#' dd2 <- degree_distribution(g, mode="out")
#' plot(seq(along=dd1)-1, dd1, log="xy")
#' points(seq(along=dd2)-1, dd2, col=2, pch=2)
sample_forestfire <- sample_forestfire
#' Generate a new random graph from a given graph by randomly
#' adding/removing edges
#'
#' Sample a new graph by perturbing the adjacency matrix of a given graph
#' and shuffling its vertices.
#'
#' Please see the reference given below.
#'
#' @param old.graph The original graph.
#' @param corr A scalar in the unit interval, the target Pearson
#' correlation between the adjacency matrices of the original the generated
#' graph (the adjacency matrix being used as a vector).
#' @param p A numeric scalar, the probability of an edge between two
#' vertices, it must in the open (0,1) interval.
#' @param permutation A numeric vector, a permutation vector that is
#' applied on the vertices of the first graph, to get the second graph. If
#' \code{NULL}, the vertices are not permuted.
#' @return An unweighted graph of the same size as \code{old.graph} such
#' that the correlation coefficient between the entries of the two
#' adjacency matrices is \code{corr}. Note each pair of corresponding
#' matrix entries is a pair of correlated Bernoulli random variables.
#'
#' @seealso \code{\link{sample_correlated_gnp_pair}},
#' \code{\link{sample_gnp}}
#' @references Lyzinski, V., Fishkind, D. E., Priebe, C. E. (2013). Seeded
#' graph matching for correlated Erdos-Renyi graphs.
#' \url{http://arxiv.org/abs/1304.7844}
#' @examples
#' g <- sample_gnp(1000, .1)
#' g2 <- sample_correlated_gnp(g, corr = 0.5)
#' cor(as.vector(g[]), as.vector(g2[]))
#' g
#' g2
sample_correlated_gnp <- sample_correlated_gnp
#' Sample a pair of correlated G(n,p) random graphs
#'
#' Sample a new graph by perturbing the adjacency matrix of a given graph and
#' shuffling its vertices.
#'
#' Please see the reference given below.
#'
#' @param n Numeric scalar, the number of vertices for the sampled graphs.
#' @param corr A scalar in the unit interval, the target Pearson correlation
#' between the adjacency matrices of the original the generated graph (the
#' adjacency matrix being used as a vector).
#' @param p A numeric scalar, the probability of an edge between two vertices,
#' it must in the open (0,1) interval.
#' @param directed Logical scalar, whether to generate directed graphs.
#' @param permutation A numeric vector, a permutation vector that is applied on
#' the vertices of the first graph, to get the second graph. If \code{NULL},
#' the vertices are not permuted.
#' @return A list of two igraph objects, named \code{graph1} and
#' \code{graph2}, which are two graphs whose adjacency matrix entries are
#' correlated with \code{corr}.
#'
#' @seealso \code{\link{sample_correlated_gnp}},
#' \code{\link{sample_gnp}}.
#' @references Lyzinski, V., Fishkind, D. E., Priebe, C. E. (2013). Seeded
#' graph matching for correlated Erdos-Renyi graphs.
#' \url{http://arxiv.org/abs/1304.7844}
#' @keywords graphs,random graphs
#' @examples
#' gg <- sample_correlated_gnp_pair(n = 10, corr = .8, p = .5,
#' directed = FALSE)
#' gg
#' cor(as.vector(gg[[1]][]), as.vector(gg[[2]][]))
sample_correlated_gnp_pair <- sample_correlated_gnp_pair
| /R/games.R | no_license | ktargows/rigraph | R | false | false | 71,075 | r |
## -----------------------------------------------------------------
## IGraph R package
## Copyright (C) 2005-2014 Gabor Csardi <csardi.gabor@gmail.com>
## 334 Harvard street, Cambridge, MA 02139 USA
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
## 02110-1301 USA
##
## -----------------------------------------------------------------
#' Generate scale-free graphs according to the Barabasi-Albert model
#'
#' The BA-model is a very simple stochastic algorithm for building a graph.
#'
#' This is a simple stochastic algorithm to generate a graph. It is a discrete
#' time step model and in each time step a single vertex is added.
#'
#' We start with a single vertex and no edges in the first time step. Then we
#' add one vertex in each time step and the new vertex initiates some edges to
#' old vertices. The probability that an old vertex is chosen is given by
#' \deqn{P[i] \sim k_i^\alpha+a}{P[i] ~ k[i]^alpha + a} where \eqn{k_i}{k[i]}
#' is the in-degree of vertex \eqn{i} in the current time step (more precisely
#' the number of adjacent edges of \eqn{i} which were not initiated by \eqn{i}
#' itself) and \eqn{\alpha}{alpha} and \eqn{a} are parameters given by the
#' \code{power} and \code{zero.appeal} arguments.
#'
#' The number of edges initiated in a time step is given by the \code{m},
#' \code{out.dist} and \code{out.seq} arguments. If \code{out.seq} is given and
#' not NULL then it gives the number of edges to add in a vector, the first
#' element is ignored, the second is the number of edges to add in the second
#' time step and so on. If \code{out.seq} is not given or null and
#' \code{out.dist} is given and not NULL then it is used as a discrete
#' distribution to generate the number of edges in each time step. Its first
#' element is the probability that no edges will be added, the second is the
#' probability that one edge is added, etc. (\code{out.dist} does not need to
#' sum up to one, it normalized automatically.) \code{out.dist} should contain
#' non-negative numbers and at east one element should be positive.
#'
#' If both \code{out.seq} and \code{out.dist} are omitted or NULL then \code{m}
#' will be used, it should be a positive integer constant and \code{m} edges
#' will be added in each time step.
#'
#' \code{sample_pa} generates a directed graph by default, set
#' \code{directed} to \code{FALSE} to generate an undirected graph. Note that
#' even if an undirected graph is generated \eqn{k_i}{k[i]} denotes the number
#' of adjacent edges not initiated by the vertex itself and not the total (in-
#' + out-) degree of the vertex, unless the \code{out.pref} argument is set to
#' \code{TRUE}.
#'
#' @aliases sample_pa barabasi.game ba.game
#' @param n Number of vertices.
#' @param power The power of the preferential attachment, the default is one,
#' ie. linear preferential attachment.
#' @param m Numeric constant, the number of edges to add in each time step This
#' argument is only used if both \code{out.dist} and \code{out.seq} are omitted
#' or NULL.
#' @param out.dist Numeric vector, the distribution of the number of edges to
#' add in each time step. This argument is only used if the \code{out.seq}
#' argument is omitted or NULL.
#' @param out.seq Numeric vector giving the number of edges to add in each time
#' step. Its first element is ignored as no edges are added in the first time
#' step.
#' @param out.pref Logical, if true the total degree is used for calculating
#' the citation probability, otherwise the in-degree is used.
#' @param zero.appeal The \sQuote{attractiveness} of the vertices with no
#' adjacent edges. See details below.
#' @param directed Whether to create a directed graph.
#' @param algorithm The algorithm to use for the graph generation.
#' \code{psumtree} uses a partial prefix-sum tree to generate the graph, this
#' algorithm can handle any \code{power} and \code{zero.appeal} values and
#' never generates multiple edges. \code{psumtree-multiple} also uses a
#' partial prefix-sum tree, but the generation of multiple edges is allowed.
#' Before the 0.6 version igraph used this algorithm if \code{power} was not
#' one, or \code{zero.appeal} was not one. \code{bag} is the algorithm that
#' was previously (before version 0.6) used if \code{power} was one and
#' \code{zero.appeal} was one as well. It works by putting the ids of the
#' vertices into a bag (mutliset, really), exactly as many times as their
#' (in-)degree, plus once more. Then the required number of cited vertices are
#' drawn from the bag, with replacement. This method might generate multiple
#' edges. It only works if \code{power} and \code{zero.appeal} are equal one.
#' @param start.graph \code{NULL} or an igraph graph. If a graph, then the
#' supplied graph is used as a starting graph for the preferential attachment
#' algorithm. The graph should have at least one vertex. If a graph is supplied
#' here and the \code{out.seq} argument is not \code{NULL}, then it should
#' contain the out degrees of the new vertices only, not the ones in the
#' \code{start.graph}.
#' @return A graph object.
#' @author Gabor Csardi \email{csardi.gabor@@gmail.com}
#' @seealso \code{\link{sample_gnp}}
#' @references Barabasi, A.-L. and Albert R. 1999. Emergence of scaling in
#' random networks \emph{Science}, 286 509--512.
#' @export
#' @keywords graphs
#' @examples
#'
#' g <- sample_pa(10000)
#' degree_distribution(g)
#'
sample_pa <- function(n, power=1, m=NULL, out.dist=NULL, out.seq=NULL,
out.pref=FALSE, zero.appeal=1,
directed=TRUE, algorithm=c("psumtree",
"psumtree-multiple", "bag"),
start.graph=NULL) {
if (!is.null(start.graph) && !is_igraph(start.graph)) {
stop("`start.graph' not an `igraph' object")
}
# Checks
if (! is.null(out.seq) && (!is.null(m) || !is.null(out.dist))) {
warning("if `out.seq' is given `m' and `out.dist' should be NULL")
m <- out.dist <- NULL
}
if (is.null(out.seq) && !is.null(out.dist) && !is.null(m)) {
warning("if `out.dist' is given `m' will be ignored")
m <- NULL
}
if (!is.null(m) && m==0) {
warning("`m' is zero, graph will be empty")
}
if (power < 0) {
warning("`power' is negative")
}
if (is.null(m) && is.null(out.dist) && is.null(out.seq)) {
m <- 1
}
n <- as.numeric(n)
power <- as.numeric(power)
if (!is.null(m)) { m <- as.numeric(m) }
if (!is.null(out.dist)) { out.dist <- as.numeric(out.dist) }
if (!is.null(out.seq)) { out.seq <- as.numeric(out.seq) }
out.pref <- as.logical(out.pref)
if (!is.null(out.dist)) {
nn <- if (is.null(start.graph)) n else n-vcount(start.graph)
out.seq <- as.numeric(sample(0:(length(out.dist)-1), nn,
replace=TRUE, prob=out.dist))
}
if (is.null(out.seq)) {
out.seq <- numeric()
}
algorithm <- igraph.match.arg(algorithm)
algorithm1 <- switch(algorithm,
"psumtree"=1, "psumtree-multiple"=2,
"bag"=0)
on.exit( .Call("R_igraph_finalizer", PACKAGE="igraph") )
res <- .Call("R_igraph_barabasi_game", n, power, m, out.seq, out.pref,
zero.appeal, directed, algorithm1, start.graph,
PACKAGE="igraph")
if (igraph_opt("add.params")) {
res$name <- "Barabasi graph"
res$power <- power
res$m <- m
res$zero.appeal <- zero.appeal
res$algorithm <- algorithm
}
res
}
#' @rdname sample_pa
#' @param ... Passed to \code{sample_pa}.
#' @export
pa <- function(...) constructor_spec(sample_pa, ...)
## -----------------------------------------------------------------
#' Generate random graphs according to the G(n,p) Erdos-Renyi model
#'
#' This model is very simple, every possible edge is created with the same
#' constant probability.
#'
#'
#' The graph has \sQuote{n} vertices and for each edge the
#' probability that it is present in the graph is \sQuote{p}.
#'
#' @param n The number of vertices in the graph.
#' @param p The probability for drawing an edge between two
#' arbitrary vertices (G(n,p) graph).
#' @param directed Logical, whether the graph will be directed, defaults to
#' FALSE.
#' @param loops Logical, whether to add loop edges, defaults to FALSE.
#' @return A graph object.
#' @author Gabor Csardi \email{csardi.gabor@@gmail.com}
#' @seealso \code{\link{sample_gnm}}, \code{\link{sample_pa}}
#' @references Erdos, P. and Renyi, A., On random graphs, \emph{Publicationes
#' Mathematicae} 6, 290--297 (1959).
#' @export
#' @keywords graphs
#' @examples
#'
#' g <- sample_gnp(1000, 1/1000)
#' degree_distribution(g)
sample_gnp <- function(n, p, directed = FALSE, loops = FALSE) {
type <- "gnp"
type1 <- switch(type, "gnp"=0, "gnm"=1)
on.exit( .Call("R_igraph_finalizer", PACKAGE="igraph") )
res <- .Call("R_igraph_erdos_renyi_game", as.numeric(n), as.numeric(type1),
as.numeric(p), as.logical(directed), as.logical(loops),
PACKAGE="igraph")
if (igraph_opt("add.params")) {
res$name <- sprintf("Erdos renyi (%s) graph", type)
res$type <- type
res$loops <- loops
res$p <- p
}
res
}
#' @rdname sample_gnp
#' @param ... Passed to \code{sample_app}.
#' @export
gnp <- function(...) constructor_spec(sample_gnp, ...)
## -----------------------------------------------------------------
#' Generate random graphs according to the G(n,m) Erdos-Renyi model
#'
#' This model is very simple, every possible edge is created with the same
#' constant probability.
#'
#' The graph has \sQuote{n} vertices and \sQuote{m} edges,
#' and the \sQuote{m} edges are chosen uniformly randomly from the set of all
#' possible edges. This set includes loop edges as well if the \code{loops}
#' parameter is TRUE.
#'
#' @param n The number of vertices in the graph.
#' @param m The number of edges in the graph.
#' @param directed Logical, whether the graph will be directed, defaults to
#' FALSE.
#' @param loops Logical, whether to add loop edges, defaults to FALSE.
#' @return A graph object.
#' @author Gabor Csardi \email{csardi.gabor@@gmail.com}
#' @seealso \code{\link{sample_gnp}}, \code{\link{sample_pa}}
#' @references Erdos, P. and Renyi, A., On random graphs, \emph{Publicationes
#' Mathematicae} 6, 290--297 (1959).
#' @export
#' @keywords graphs
#' @examples
#'
#' g <- sample_gnm(1000, 1000)
#' degree_distribution(g)
sample_gnm <- function(n, m, directed = FALSE, loops = FALSE) {
type <- "gnm"
type1 <- switch(type, "gnp"=0, "gnm"=1)
on.exit( .Call("R_igraph_finalizer", PACKAGE="igraph") )
res <- .Call("R_igraph_erdos_renyi_game", as.numeric(n), as.numeric(type1),
as.numeric(m), as.logical(directed), as.logical(loops),
PACKAGE="igraph")
if (igraph_opt("add.params")) {
res$name <- sprintf("Erdos renyi (%s) graph", type)
res$type <- type
res$loops <- loops
res$m <- m
}
res
}
#' @rdname sample_gnm
#' @param ... Passed to \code{sample_app}.
#' @export
gnm <- function(...) constructor_spec(sample_gnm, ...)
## -----------------------------------------------------------------
#' Generate random graphs according to the Erdos-Renyi model
#'
#' This model is very simple, every possible edge is created with the same
#' constant probability.
#'
#' In G(n,p) graphs, the graph has \sQuote{n} vertices and for each edge the
#' probability that it is present in the graph is \sQuote{p}.
#'
#' In G(n,m) graphs, the graph has \sQuote{n} vertices and \sQuote{m} edges,
#' and the \sQuote{m} edges are chosen uniformly randomly from the set of all
#' possible edges. This set includes loop edges as well if the \code{loops}
#' parameter is TRUE.
#'
#' \code{random.graph.game} is an alias to this function.
#'
#' @section Deprecated:
#'
#' Since igraph version 0.8.0, both \code{erdos.renyi.game} and
#' \code{random.graph.game} are deprecated, and \code{\link{sample_gnp}} and
#' \code{\link{sample_gnm}} should be used instead.
#'
#' @aliases erdos.renyi.game random.graph.game
#' @param n The number of vertices in the graph.
#' @param p.or.m Either the probability for drawing an edge between two
#' arbitrary vertices (G(n,p) graph), or the number of edges in the graph (for
#' G(n,m) graphs).
#' @param type The type of the random graph to create, either \code{gnp}
#' (G(n,p) graph) or \code{gnm} (G(n,m) graph).
#' @param directed Logical, whether the graph will be directed, defaults to
#' FALSE.
#' @param loops Logical, whether to add loop edges, defaults to FALSE.
#' @param \dots Additional arguments, ignored.
#' @return A graph object.
#' @author Gabor Csardi \email{csardi.gabor@@gmail.com}
#' @seealso \code{\link{sample_pa}}
#' @references Erdos, P. and Renyi, A., On random graphs, \emph{Publicationes
#' Mathematicae} 6, 290--297 (1959).
#' @export
#' @keywords graphs
#' @examples
#'
#' g <- erdos.renyi.game(1000, 1/1000)
#' degree_distribution(g)
#'
erdos.renyi.game <- function(n, p.or.m, type=c("gnp", "gnm"),
directed=FALSE, loops=FALSE, ...) {
type <- igraph.match.arg(type)
type1 <- switch(type, "gnp"=0, "gnm"=1)
on.exit( .Call("R_igraph_finalizer", PACKAGE="igraph") )
res <- .Call("R_igraph_erdos_renyi_game", as.numeric(n), as.numeric(type1),
as.numeric(p.or.m), as.logical(directed), as.logical(loops),
PACKAGE="igraph")
if (igraph_opt("add.params")) {
res$name <- sprintf("Erdos renyi (%s) graph", type)
res$type <- type
res$loops <- loops
if (type=="gnp") { res$p <- p.or.m }
if (type=="gnm") { res$m <- p.or.m }
}
res
}
#' @export
random.graph.game <- erdos.renyi.game
## -----------------------------------------------------------------
#' Generate random graphs with a given degree sequence
#'
#' It is often useful to create a graph with given vertex degrees. This is
#' exactly what \code{sample_degseq} does.
#'
#' The \dQuote{simple} method connects the out-stubs of the edges (undirected
#' graphs) or the out-stubs and in-stubs (directed graphs) together. This way
#' loop edges and also multiple edges may be generated. This method is not
#' adequate if one needs to generate simple graphs with a given degree
#' sequence. The multiple and loop edges can be deleted, but then the degree
#' sequence is distorted and there is nothing to ensure that the graphs are
#' sampled uniformly.
#'
#' The \dQuote{simple.no.multiple} method is similar to \dQuote{simple}, but
#' tries to avoid multiple and loop edges and restarts the generation from
#' scratch if it gets stuck. It is not guaranteed to sample uniformly from the
#' space of all possible graphs with the given sequence, but it is relatively
#' fast and it will eventually succeed if the provided degree sequence is
#' graphical, but there is no upper bound on the number of iterations.
#'
#' The \dQuote{vl} method is a more sophisticated generator. The algorithm and
#' the implementation was done by Fabien Viger and Matthieu Latapy. This
#' generator always generates undirected, connected simple graphs, it is an
#' error to pass the \code{in.deg} argument to it. The algorithm relies on
#' first creating an initial (possibly unconnected) simple undirected graph
#' with the given degree sequence (if this is possible at all). Then some
#' rewiring is done to make the graph connected. Finally a Monte-Carlo
#' algorithm is used to randomize the graph. The \dQuote{vl} samples from the
#' undirected, connected simple graphs unformly. See
#' \url{http://www-rp.lip6.fr/~latapy/FV/generation.html} for details.
#'
#' @aliases degree.sequence.game
#' @param out.deg Numeric vector, the sequence of degrees (for undirected
#' graphs) or out-degrees (for directed graphs). For undirected graphs its sum
#' should be even. For directed graphs its sum should be the same as the sum of
#' \code{in.deg}.
#' @param in.deg For directed graph, the in-degree sequence. By default this is
#' \code{NULL} and an undirected graph is created.
#' @param method Character, the method for generating the graph. Right now the
#' \dQuote{simple}, \dQuote{simple.no.multiple} and \dQuote{vl} methods are
#' implemented.
#' @return The new graph object.
#' @author Gabor Csardi \email{csardi.gabor@@gmail.com}
#' @seealso \code{\link{sample_gnp}}, \code{\link{sample_pa}},
#' \code{\link{simplify}} to get rid of the multiple and/or loops edges.
#' @export
#' @keywords graphs
#' @examples
#'
#' ## The simple generator
#' g <- sample_degseq(rep(2,100))
#' degree(g)
#' is_simple(g) # sometimes TRUE, but can be FALSE
#' g2 <- sample_degseq(1:10, 10:1)
#' degree(g2, mode="out")
#' degree(g2, mode="in")
#'
#' ## The vl generator
#' g3 <- sample_degseq(rep(2,100), method="vl")
#' degree(g3)
#' is_simple(g3) # always TRUE
#'
#' ## Exponential degree distribution
#' ## Note, that we correct the degree sequence if its sum is odd
#' degs <- sample(1:100, 100, replace=TRUE, prob=exp(-0.5*(1:100)))
#' if (sum(degs) %% 2 != 0) { degs[1] <- degs[1] + 1 }
#' g4 <- sample_degseq(degs, method="vl")
#' all(degree(g4) == degs)
#'
#' ## Power-law degree distribution
#' ## Note, that we correct the degree sequence if its sum is odd
#' degs <- sample(1:100, 100, replace=TRUE, prob=(1:100)^-2)
#' if (sum(degs) %% 2 != 0) { degs[1] <- degs[1] + 1 }
#' g5 <- sample_degseq(degs, method="vl")
#' all(degree(g5) == degs)
sample_degseq <- function(out.deg, in.deg=NULL,
method=c("simple", "vl",
"simple.no.multiple")) {
method <- igraph.match.arg(method)
method1 <- switch(method, "simple"=0, "vl"=1, "simple.no.multiple"=2)
if (!is.null(in.deg)) { in.deg <- as.numeric(in.deg) }
on.exit( .Call("R_igraph_finalizer", PACKAGE="igraph") )
res <- .Call("R_igraph_degree_sequence_game", as.numeric(out.deg),
in.deg, as.numeric(method1),
PACKAGE="igraph")
if (igraph_opt("add.params")) {
res$name <- "Degree sequence random graph"
res$method <- method
}
res
}
#' @rdname sample_degseq
#' @param ... Passed to \code{sample_degree}.
#' @export
degseq <- function(...) constructor_spec(sample_degseq, ...)
## -----------------------------------------------------------------
#' Growing random graph generation
#'
#' This function creates a random graph by simulating its stochastic evolution.
#'
#' This is discrete time step model, in each time step a new vertex is added to
#' the graph and \code{m} new edges are created. If \code{citation} is
#' \code{FALSE} these edges are connecting two uniformly randomly chosen
#' vertices, otherwise the edges are connecting new vertex to uniformly
#' randomly chosen old vertices.
#'
#' @aliases growing.random.game
#' @param n Numeric constant, number of vertices in the graph.
#' @param m Numeric constant, number of edges added in each time step.
#' @param directed Logical, whether to create a directed graph.
#' @param citation Logical. If \code{TRUE} a citation graph is created, ie. in
#' each time step the added edges are originating from the new vertex.
#' @return A new graph object.
#' @author Gabor Csardi \email{csardi.gabor@@gmail.com}
#' @seealso \code{\link{sample_pa}}, \code{\link{sample_gnp}}
#' @export
#' @keywords graphs
#' @examples
#'
#' g <- sample_growing(500, citation=FALSE)
#' g2 <- sample_growing(500, citation=TRUE)
#'
sample_growing <- function(n, m=1, directed=TRUE, citation=FALSE) {
on.exit( .Call("R_igraph_finalizer", PACKAGE="igraph") )
res <- .Call("R_igraph_growing_random_game", as.numeric(n), as.numeric(m),
as.logical(directed), as.logical(citation),
PACKAGE="igraph")
if (igraph_opt("add.params")) {
res$name <- "Growing random graph"
res$m <- m
res$citation <- citation
}
res
}
#' @rdname sample_growing
#' @param ... Passed to \code{sample_app}.
#' @export
growing <- function(...) constructor_spec(sample_growing, ...)
## -----------------------------------------------------------------
#' Generate an evolving random graph with preferential attachment and aging
#'
#' This function creates a random graph by simulating its evolution. Each time
#' a new vertex is added it creates a number of links to old vertices and the
#' probability that an old vertex is cited depends on its in-degree
#' (preferential attachment) and age.
#'
#' This is a discrete time step model of a growing graph. We start with a
#' network containing a single vertex (and no edges) in the first time step.
#' Then in each time step (starting with the second) a new vertex is added and
#' it initiates a number of edges to the old vertices in the network. The
#' probability that an old vertex is connected to is proportional to \deqn{P[i]
#' \sim (c\cdot k_i^\alpha+a)(d\cdot l_i^\beta+b)\cdot }{% P[i] ~ (c k[i]^alpha
#' + a) (d l[i]^beta + a)}
#'
#' Here \eqn{k_i}{k[i]} is the in-degree of vertex \eqn{i} in the current time
#' step and \eqn{l_i}{l[i]} is the age of vertex \eqn{i}. The age is simply
#' defined as the number of time steps passed since the vertex is added, with
#' the extension that vertex age is divided to be in \code{aging.bin} bins.
#'
#' \eqn{c}, \eqn{\alpha}{alpha}, \eqn{a}, \eqn{d}, \eqn{\beta}{beta} and
#' \eqn{b} are parameters and they can be set via the following arguments:
#' \code{pa.exp} (\eqn{\alpha}{alpha}, mandatory argument), \code{aging.exp}
#' (\eqn{\beta}{beta}, mandatory argument), \code{zero.deg.appeal} (\eqn{a},
#' optional, the default value is 1), \code{zero.age.appeal} (\eqn{b},
#' optional, the default is 0), \code{deg.coef} (\eqn{c}, optional, the default
#' is 1), and \code{age.coef} (\eqn{d}, optional, the default is 1).
#'
#' The number of edges initiated in each time step is governed by the \code{m},
#' \code{out.seq} and \code{out.pref} parameters. If \code{out.seq} is given
#' then it is interpreted as a vector giving the number of edges to be added in
#' each time step. It should be of length \code{n} (the number of vertices),
#' and its first element will be ignored. If \code{out.seq} is not given (or
#' NULL) and \code{out.dist} is given then it will be used as a discrete
#' probability distribution to generate the number of edges. Its first element
#' gives the probability that zero edges are added at a time step, the second
#' element is the probability that one edge is added, etc. (\code{out.seq}
#' should contain non-negative numbers, but if they don't sum up to 1, they
#' will be normalized to sum up to 1. This behavior is similar to the
#' \code{prob} argument of the \code{sample} command.)
#'
#' By default a directed graph is generated, but it \code{directed} is set to
#' \code{FALSE} then an undirected is created. Even if an undirected graph is
#' generaed \eqn{k_i}{k[i]} denotes only the adjacent edges not initiated by
#' the vertex itself except if \code{out.pref} is set to \code{TRUE}.
#'
#' If the \code{time.window} argument is given (and not NULL) then
#' \eqn{k_i}{k[i]} means only the adjacent edges added in the previous
#' \code{time.window} time steps.
#'
#' This function might generate graphs with multiple edges.
#'
#' @aliases sample_pa_age aging.prefatt.game aging.barabasi.game aging.ba.game
#' @param n The number of vertices in the graph.
#' @param pa.exp The preferantial attachment exponent, see the details below.
#' @param aging.exp The exponent of the aging, usually a non-positive number,
#' see details below.
#' @param m The number of edges each new vertex creates (except the very first
#' vertex). This argument is used only if both the \code{out.dist} and
#' \code{out.seq} arguments are NULL.
#' @param aging.bin The number of bins to use for measuring the age of
#' vertices, see details below.
#' @param out.dist The discrete distribution to generate the number of edges to
#' add in each time step if \code{out.seq} is NULL. See details below.
#' @param out.seq The number of edges to add in each time step, a vector
#' containing as many elements as the number of vertices. See details below.
#' @param out.pref Logical constant, whether to include edges not initiated by
#' the vertex as a basis of preferential attachment. See details below.
#' @param directed Logical constant, whether to generate a directed graph. See
#' details below.
#' @param zero.deg.appeal The degree-dependent part of the
#' \sQuote{attractiveness} of the vertices with no adjacent edges. See also
#' details below.
#' @param zero.age.appeal The age-dependent part of the \sQuote{attrativeness}
#' of the vertices with age zero. It is usually zero, see details below.
#' @param deg.coef The coefficient of the degree-dependent
#' \sQuote{attractiveness}. See details below.
#' @param age.coef The coefficient of the age-dependent part of the
#' \sQuote{attractiveness}. See details below.
#' @param time.window Integer constant, if NULL only adjacent added in the last
#' \code{time.windows} time steps are counted as a basis of the preferential
#' attachment. See also details below.
#' @return A new graph.
#' @author Gabor Csardi \email{csardi.gabor@@gmail.com}
#' @seealso \code{\link{sample_pa}}, \code{\link{sample_gnp}}
#' @export
#' @keywords graphs
#' @examples
#'
#' # The maximum degree for graph with different aging exponents
#' g1 <- sample_pa_age(10000, pa.exp=1, aging.exp=0, aging.bin=1000)
#' g2 <- sample_pa_age(10000, pa.exp=1, aging.exp=-1, aging.bin=1000)
#' g3 <- sample_pa_age(10000, pa.exp=1, aging.exp=-3, aging.bin=1000)
#' max(degree(g1))
#' max(degree(g2))
#' max(degree(g3))
sample_pa_age <- function(n, pa.exp, aging.exp, m=NULL, aging.bin=300,
out.dist=NULL, out.seq=NULL,
out.pref=FALSE, directed=TRUE,
zero.deg.appeal=1, zero.age.appeal=0,
deg.coef=1, age.coef=1,
time.window=NULL) {
# Checks
if (! is.null(out.seq) && (!is.null(m) || !is.null(out.dist))) {
warning("if `out.seq' is given `m' and `out.dist' should be NULL")
m <- out.dist <- NULL
}
if (is.null(out.seq) && !is.null(out.dist) && !is.null(m)) {
warning("if `out.dist' is given `m' will be ignored")
m <- NULL
}
if (!is.null(out.seq) && length(out.seq) != n) {
stop("`out.seq' should be of length `n'")
}
if (!is.null(out.seq) && min(out.seq)<0) {
stop("negative elements in `out.seq'");
}
if (!is.null(m) && m<0) {
stop("`m' is negative")
}
if (!is.null(time.window) && time.window <= 0) {
stop("time window size should be positive")
}
if (!is.null(m) && m==0) {
warning("`m' is zero, graph will be empty")
}
if (pa.exp < 0) {
warning("preferential attachment is negative")
}
if (aging.exp > 0) {
warning("aging exponent is positive")
}
if (zero.deg.appeal <=0 ) {
warning("initial attractiveness is not positive")
}
if (is.null(m) && is.null(out.dist) && is.null(out.seq)) {
m <- 1
}
n <- as.numeric(n)
if (!is.null(m)) { m <- as.numeric(m) }
if (!is.null(out.dist)) { out.dist <- as.numeric(out.dist) }
if (!is.null(out.seq)) { out.seq <- as.numeric(out.seq) }
out.pref <- as.logical(out.pref)
if (!is.null(out.dist)) {
out.seq <- as.numeric(sample(0:(length(out.dist)-1), n,
replace=TRUE, prob=out.dist))
}
if (is.null(out.seq)) {
out.seq <- numeric()
}
on.exit( .Call("R_igraph_finalizer", PACKAGE="igraph") )
res <- if (is.null(time.window)) {
.Call("R_igraph_barabasi_aging_game", as.numeric(n),
as.numeric(pa.exp), as.numeric(aging.exp),
as.numeric(aging.bin), m, out.seq,
out.pref, as.numeric(zero.deg.appeal), as.numeric(zero.age.appeal),
as.numeric(deg.coef), as.numeric(age.coef), directed,
PACKAGE="igraph")
} else {
.Call("R_igraph_recent_degree_aging_game", as.numeric(n),
as.numeric(pa.exp), as.numeric(aging.exp),
as.numeric(aging.bin), m, out.seq, out.pref, as.numeric(zero.deg.appeal),
directed,
time.window,
PACKAGE="igraph")
}
if (igraph_opt("add.params")) {
res$name <- "Aging Barabasi graph"
res$pa.exp <- pa.exp
res$aging.exp <- aging.exp
res$m <- m
res$aging.bin <- aging.bin
res$out.pref <- out.pref
res$zero.deg.appeal <- zero.deg.appeal
res$zero.age.appeal <- zero.age.appeal
res$deg.coef <- deg.coef
res$age.coef <- age.coef
res$time.window <- if (is.null(time.window)) Inf else time.window
}
res
}
#' @rdname sample_pa_age
#' @param ... Passed to \code{sample_pa_age}.
#' @export
pa_age <- function(...) constructor_spec(sample_pa_age, ...)
## -----------------------------------------------------------------
#' Graph generation based on different vertex types
#'
#' These functions implement evolving network models based on different vertex
#' types.
#'
#' For \code{sample_traits_callaway} the simulation goes like this: in each
#' discrete time step a new vertex is added to the graph. The type of this
#' vertex is generated based on \code{type.dist}. Then two vertices are
#' selected uniformly randomly from the graph. The probability that they will
#' be connected depends on the types of these vertices and is taken from
#' \code{pref.matrix}. Then another two vertices are selected and this is
#' repeated \code{edges.per.step} times in each time step.
#'
#' For \code{sample_traits} the simulation goes like this: a single vertex is
#' added at each time step. This new vertex tries to connect to \code{k}
#' vertices in the graph. The probability that such a connection is realized
#' depends on the types of the vertices involved and is taken from
#' \code{pref.matrix}.
#'
#' @aliases sample_traits_callaway sample_traits callaway.traits.game
#' establishment.game
#' @param nodes The number of vertices in the graph.
#' @param types The number of different vertex types.
#' @param edge.per.step The number of edges to add to the graph per time step.
#' @param type.dist The distribution of the vertex types. This is assumed to be
#' stationary in time.
#' @param pref.matrix A matrix giving the preferences of the given vertex
#' types. These should be probabilities, ie. numbers between zero and one.
#' @param directed Logical constant, whether to generate directed graphs.
#' @param k The number of trials per time step, see details below.
#' @return A new graph object.
#' @author Gabor Csardi \email{csardi.gabor@@gmail.com}
#' @export
#' @keywords graphs
#' @examples
#'
#' # two types of vertices, they like only themselves
#' g1 <- sample_traits_callaway(1000, 2, pref.matrix=matrix( c(1,0,0,1), nc=2))
#' g2 <- sample_traits(1000, 2, k=2, pref.matrix=matrix( c(1,0,0,1), nc=2))
sample_traits_callaway <- function(nodes, types, edge.per.step=1,
type.dist=rep(1, types),
pref.matrix=matrix(1, types, types),
directed=FALSE) {
on.exit( .Call("R_igraph_finalizer", PACKAGE="igraph") )
res <- .Call("R_igraph_callaway_traits_game", as.double(nodes),
as.double(types), as.double(edge.per.step),
as.double(type.dist), matrix(as.double(pref.matrix), types,
types),
as.logical(directed),
PACKAGE="igraph")
if (igraph_opt("add.params")) {
res$name <- "Trait-based Callaway graph"
res$types <- types
res$edge.per.step <- edge.per.step
res$type.dist <- type.dist
res$pref.matrix <- pref.matrix
}
res
}
#' @rdname sample_traits_callaway
#' @param ... Passed to the constructor, \code{sample_traits} or
#' \code{sample_traits_callaway}.
#' @export
traits_callaway <- function(...) constructor_spec(sample_traits_callaway, ...)
#' @rdname sample_traits_callaway
#' @export
sample_traits <- function(nodes, types, k=1, type.dist=rep(1, types),
pref.matrix=matrix(1, types, types),
directed=FALSE) {
on.exit( .Call("R_igraph_finalizer", PACKAGE="igraph") )
res <- .Call("R_igraph_establishment_game", as.double(nodes),
as.double(types), as.double(k), as.double(type.dist),
matrix(as.double(pref.matrix), types, types),
as.logical(directed),
PACKAGE="igraph")
if (igraph_opt("add.params")) {
res$name <- "Trait-based growing graph"
res$types <- types
res$k <- k
res$type.dist <- type.dist
res$pref.matrix <- pref.matrix
}
res
}
#' @rdname sample_traits_callaway
#' @export
traits <- function(...) constructor_spec(sample_traits, ...)
## -----------------------------------------------------------------
#' Geometric random graphs
#'
#' Generate a random graph based on the distance of random point on a unit
#' square
#'
#' First a number of points are dropped on a unit square, these points
#' correspond to the vertices of the graph to create. Two points will be
#' connected with an undirected edge if they are closer to each other in
#' Euclidean norm than a given radius. If the \code{torus} argument is
#' \code{TRUE} then a unit area torus is used instead of a square.
#'
#' @aliases grg.game
#' @param nodes The number of vertices in the graph.
#' @param radius The radius within which the vertices will be connected by an
#' edge.
#' @param torus Logical constant, whether to use a torus instead of a square.
#' @param coords Logical scalar, whether to add the positions of the vertices
#' as vertex attributes called \sQuote{\code{x}} and \sQuote{\code{y}}.
#' @return A graph object. If \code{coords} is \code{TRUE} then with vertex
#' attributes \sQuote{\code{x}} and \sQuote{\code{y}}.
#' @author Gabor Csardi \email{csardi.gabor@@gmail.com}, first version was
#' written by Keith Briggs (\url{http://keithbriggs.info/}).
#' @seealso \code{\link{sample_gnp}}
#' @export
#' @keywords graphs
#' @examples
#'
#' g <- sample_grg(1000, 0.05, torus=FALSE)
#' g2 <- sample_grg(1000, 0.05, torus=TRUE)
#'
sample_grg <- function(nodes, radius, torus=FALSE, coords=FALSE) {
on.exit( .Call("R_igraph_finalizer", PACKAGE="igraph") )
res <- .Call("R_igraph_grg_game", as.double(nodes), as.double(radius),
as.logical(torus), as.logical(coords),
PACKAGE="igraph")
if (coords) {
V(res[[1]])$x <- res[[2]]
V(res[[1]])$y <- res[[3]]
}
if (igraph_opt("add.params")) {
res[[1]]$name <- "Geometric random graph"
res[[1]]$radius <- radius
res[[1]]$torus <- torus
}
res[[1]]
}
#' @rdname sample_grg
#' @param ... Passed to \code{sample_grg}.
#' @export
grg <- function(...) constructor_spec(sample_grg, ...)
## -----------------------------------------------------------------
#' Trait-based random generation
#'
#' Generation of random graphs based on different vertex types.
#'
#' Both models generate random graphs with given vertex types. For
#' \code{sample_pref} the probability that two vertices will be connected
#' depends on their type and is given by the \sQuote{pref.matrix} argument.
#' This matrix should be symmetric to make sense but this is not checked. The
#' distribution of the different vertes types is given by the
#' \sQuote{type.dist} vector.
#'
#' For \code{sample_asym_pref} each vertex has an in-type and an
#' out-type and a directed graph is created. The probability that a directed
#' edge is realized from a vertex with a given out-type to a vertex with a
#' given in-type is given in the \sQuote{pref.matrix} argument, which can be
#' asymmetric. The joint distribution for the in- and out-types is given in the
#' \sQuote{type.dist.matrix} argument.
#'
#' @aliases sample_pref sample_asym_pref preference.game asymmetric.preference.game
#' @param nodes The number of vertices in the graphs.
#' @param types The number of different vertex types.
#' @param type.dist The distribution of the vertex types, a numeric vector of
#' length \sQuote{types} containing non-negative numbers. The vector will be
#' normed to obtain probabilities.
#' @param fixed.sizes Fix the number of vertices with a given vertex type
#' label. The \code{type.dist} argument gives the group sizes (i.e. number of
#' vertices with the different labels) in this case.
#' @param type.dist.matrix The joint distribution of the in- and out-vertex
#' types.
#' @param pref.matrix A square matrix giving the preferences of the vertex
#' types. The matrix has \sQuote{types} rows and columns.
#' @param directed Logical constant, whether to create a directed graph.
#' @param loops Logical constant, whether self-loops are allowed in the graph.
#' @return An igraph graph.
#' @author Tamas Nepusz \email{ntamas@@gmail.com} and Gabor Csardi
#' \email{csardi.gabor@@gmail.com} for the R interface
#' @seealso \code{\link{sample_traits}}.
#' \code{\link{sample_traits_callaway}}
#' @export
#' @keywords graphs
#' @examples
#'
#' pf <- matrix( c(1, 0, 0, 1), nr=2)
#' g <- sample_pref(20, 2, pref.matrix=pf)
#' \dontrun{tkplot(g, layout=layout_with_fr)}
#'
#' pf <- matrix( c(0, 1, 0, 0), nr=2)
#' g <- sample_asym_pref(20, 2, pref.matrix=pf)
#' \dontrun{tkplot(g, layout=layout_in_circle)}
#'
sample_pref <- function(nodes, types, type.dist=rep(1, types),
fixed.sizes=FALSE,
pref.matrix=matrix(1, types, types),
directed=FALSE, loops=FALSE) {
if (nrow(pref.matrix) != types || ncol(pref.matrix) != types) {
stop("Invalid size for preference matrix")
}
on.exit( .Call("R_igraph_finalizer", PACKAGE="igraph") )
res <- .Call("R_igraph_preference_game", as.double(nodes),
as.double(types),
as.double(type.dist), as.logical(fixed.sizes),
matrix(as.double(pref.matrix), types, types),
as.logical(directed), as.logical(loops),
PACKAGE="igraph")
V(res[[1]])$type <- res[[2]]+1
if (igraph_opt("add.params")) {
res[[1]]$name <- "Preference random graph"
res[[1]]$types <- types
res[[1]]$type.dist <- type.dist
res[[1]]$fixed.sizes <- fixed.sizes
res[[1]]$pref.matrix <- pref.matrix
res[[1]]$loops <- loops
}
res[[1]]
}
#' @rdname sample_pref
#' @param ... Passed to the constructor, \code{sample_pref} or
#' \code{sample_asym_pref}.
#' @export
pref <- function(...) constructor_spec(sample_pref, ...)
#' @rdname sample_pref
#' @export
sample_asym_pref <- function(nodes, types,
type.dist.matrix=matrix(1, types,types),
pref.matrix=matrix(1, types, types),
loops=FALSE) {
if (nrow(pref.matrix) != types || ncol(pref.matrix) != types) {
stop("Invalid size for preference matrix")
}
if (nrow(type.dist.matrix) != types || ncol(type.dist.matrix) != types) {
stop("Invalid size for type distribution matrix")
}
on.exit( .Call("R_igraph_finalizer", PACKAGE="igraph") )
res <- .Call("R_igraph_asymmetric_preference_game",
as.double(nodes), as.double(types),
matrix(as.double(type.dist.matrix), types, types),
matrix(as.double(pref.matrix), types, types),
as.logical(loops),
PACKAGE="igraph")
if (igraph_opt("add.params")) {
res$name <- "Asymmetric preference random graph"
res$types <- types
res$type.dist.matrix <- type.dist.matrix
res$pref.matrix <- pref.matrix
res$loops <- loops
}
res
}
#' @rdname sample_pref
#' @export
asym_pref <- function(...) constructor_spec(sample_asym_pref, ...)
## -----------------------------------------------------------------
connect <- function(graph, order, mode=c("all", "out", "in", "total")) {
if (!is_igraph(graph)) {
stop("Not a graph object")
}
mode <- igraph.match.arg(mode)
mode <- switch(mode, "out"=1, "in"=2, "all"=3, "total"=3)
on.exit( .Call("R_igraph_finalizer", PACKAGE="igraph") )
.Call("R_igraph_connect_neighborhood", graph, as.numeric(order),
as.numeric(mode),
PACKAGE="igraph")
}
#' The Watts-Strogatz small-world model
#'
#' Generate a graph according to the Watts-Strogatz network model.
#'
#' First a lattice is created with the given \code{dim}, \code{size} and
#' \code{nei} arguments. Then the edges of the lattice are rewired uniformly
#' randomly with probability \code{p}.
#'
#' Note that this function might create graphs with loops and/or multiple
#' edges. You can use \code{\link{simplify}} to get rid of these.
#'
#' @aliases watts.strogatz.game
#' @param dim Integer constant, the dimension of the starting lattice.
#' @param size Integer constant, the size of the lattice along each dimension.
#' @param nei Integer constant, the neighborhood within which the vertices of
#' the lattice will be connected.
#' @param p Real constant between zero and one, the rewiring probability.
#' @param loops Logical scalar, whether loops edges are allowed in the
#' generated graph.
#' @param multiple Logical scalar, whether multiple edges are allowed int the
#' generated graph.
#' @return A graph object.
#' @author Gabor Csardi \email{csardi.gabor@@gmail.com}
#' @seealso \code{\link{make_lattice}}, \code{\link{rewire}}
#' @references Duncan J Watts and Steven H Strogatz: Collective dynamics of
#' \sQuote{small world} networks, Nature 393, 440-442, 1998.
#' @export
#' @keywords graphs
#' @examples
#'
#' g <- sample_smallworld(1, 100, 5, 0.05)
#' mean_distance(g)
#' transitivity(g, type="average")
#'
sample_smallworld <- function(dim, size, nei, p, loops=FALSE,
multiple=FALSE) {
on.exit( .Call("R_igraph_finalizer", PACKAGE="igraph") )
res <- .Call("R_igraph_watts_strogatz_game", as.numeric(dim),
as.numeric(size), as.numeric(nei), as.numeric(p),
as.logical(loops), as.logical(multiple),
PACKAGE="igraph")
if (igraph_opt("add.params")) {
res$name <- "Watts-Strogatz random graph"
res$dim <- dim
res$size <- size
res$nei <- nei
res$p <- p
res$loops <- loops
res$multiple <- multiple
}
res
}
#' @rdname sample_smallworld
#' @param ... Passed to \code{sample_smallworld}.
#' @export
smallworld <- function(...) constructor_spec(sample_smallworld, ...)
## -----------------------------------------------------------------
#' Random citation graphs
#'
#' \code{sample_last_cit} creates a graph, where vertices age, and
#' gain new connections based on how long ago their last citation
#' happened.
#'
#' \code{sample_cit_cit_types} is a stochastic block model where the
#' graph is growing.
#'
#' \code{sample_cit_types} is similarly a growing stochastic block model,
#' but the probability of an edge depends on the (potentiall) cited
#' vertex only.
#'
#' @aliases cited.type.game sample_cit_types citing.cited.type.game
#' sample_cit_cit_types sample_last_cit lastcit.game
#' @param n Number of vertices.
#' @param edges Number of edges per step.
#' @param agebins Number of aging bins.
#' @param pref Vector (\code{sample_last_cit} and \code{sample_cit_types} or
#' matrix (\code{sample_cit_cit_types}) giving the (unnormalized) citation
#' probabilities for the different vertex types.
#' @param directed Logical scalar, whether to generate directed networks.
#' @param types Vector of length \sQuote{\code{n}}, the types of the vertices.
#' Types are numbered from zero.
#' @param attr Logical scalar, whether to add the vertex types to the generated
#' graph as a vertex attribute called \sQuote{\code{type}}.
#' @return A new graph.
#' @author Gabor Csardi \email{csardi.gabor@@gmail.com}
#' @keywords graphs
#' @export
sample_last_cit <- function(n, edges=1, agebins=n/7100, pref=(1:(agebins+1))^-3,
directed=TRUE) {
on.exit( .Call("R_igraph_finalizer", PACKAGE="igraph") )
res <- .Call("R_igraph_lastcit_game", as.numeric(n), as.numeric(edges),
as.numeric(agebins),
as.numeric(pref), as.logical(directed),
PACKAGE="igraph")
if (igraph_opt("add.params")) {
res$name <- "Random citation graph based on last citation"
res$edges <- edges
res$agebins <- agebins
}
res
}
#' @rdname sample_last_cit
#' @param ... Passed to the actual constructor.
#' @export
last_cit <- function(...) constructor_spec(sample_last_cit, ...)
#' @rdname sample_last_cit
#' @export
sample_cit_types <- function(n, edges=1, types=rep(0, n),
pref=rep(1, length(types)),
directed=TRUE, attr=TRUE) {
on.exit( .Call("R_igraph_finalizer", PACKAGE="igraph") )
res <- .Call("R_igraph_cited_type_game", as.numeric(n), as.numeric(edges),
as.numeric(types), as.numeric(pref), as.logical(directed),
PACKAGE="igraph")
if (attr) {
V(res)$type <- types
}
if (igraph_opt("add.params")) {
res$name <- "Random citation graph (cited type)"
res$edges <- edges
}
res
}
#' @rdname sample_last_cit
#' @export
cit_types <- function(...) constructor_spec(sample_cit_types, ...)
#' @rdname sample_last_cit
#' @export
sample_cit_cit_types <- function(n, edges=1, types=rep(0, n),
pref=matrix(1, nrow=length(types),
ncol=length(types)),
directed=TRUE, attr=TRUE) {
pref <- structure(as.numeric(pref), dim=dim(pref))
on.exit( .Call("R_igraph_finalizer", PACKAGE="igraph") )
res <- .Call("R_igraph_citing_cited_type_game", as.numeric(n),
as.numeric(types), pref, as.numeric(edges),
as.logical(directed),
PACKAGE="igraph")
if (attr) {
V(res)$type <- types
}
if (igraph_opt("add.params")) {
res$name <- "Random citation graph (citing & cited type)"
res$edges <- edges
}
res
}
#' @rdname sample_last_cit
#' @export
cit_cit_types <- function(...) constructor_spec(sample_cit_cit_types, ...)
## -----------------------------------------------------------------
#' Bipartite random graphs
#'
#' Generate bipartite graphs using the Erdos-Renyi model
#'
#' Similarly to unipartite (one-mode) networks, we can define the $G(n,p)$, and
#' $G(n,m)$ graph classes for bipartite graphs, via their generating process.
#' In $G(n,p)$ every possible edge between top and bottom vertices is realized
#' with probablity $p$, independently of the rest of the edges. In $G(n,m)$, we
#' uniformly choose $m$ edges to realize.
#'
#' @aliases bipartite.random.game
#' @param n1 Integer scalar, the number of bottom vertices.
#' @param n2 Integer scalar, the number of top vertices.
#' @param type Character scalar, the type of the graph, \sQuote{gnp} creates a
#' $G(n,p)$ graph, \sQuote{gnm} creates a $G(n,m)$ graph. See details below.
#' @param p Real scalar, connection probability for $G(n,p)$ graphs. Should not
#' be given for $G(n,m)$ graphs.
#' @param m Integer scalar, the number of edges for $G(n,p)$ graphs. Should not
#' be given for $G(n,p)$ graphs.
#' @param directed Logical scalar, whether to create a directed graph. See also
#' the \code{mode} argument.
#' @param mode Character scalar, specifies how to direct the edges in directed
#' graphs. If it is \sQuote{out}, then directed edges point from bottom
#' vertices to top vertices. If it is \sQuote{in}, edges point from top
#' vertices to bottom vertices. \sQuote{out} and \sQuote{in} do not generate
#' mutual edges. If this argument is \sQuote{all}, then each edge direction is
#' considered independently and mutual edges might be generated. This argument
#' is ignored for undirected graphs.
#' @return A bipartite igraph graph.
#' @author Gabor Csardi \email{csardi.gabor@@gmail.com}
#' @seealso \code{\link{sample_gnp}} for the unipartite version.
#' @export
#' @keywords graphs
#' @examples
#'
#' ## empty graph
#' sample_bipartite(10, 5, p=0)
#'
#' ## full graph
#' sample_bipartite(10, 5, p=1)
#'
#' ## random bipartite graph
#' sample_bipartite(10, 5, p=.1)
#'
#' ## directed bipartite graph, G(n,m)
#' sample_bipartite(10, 5, type="Gnm", m=20, directed=TRUE, mode="all")
#'
sample_bipartite <- function(n1, n2, type=c("gnp", "gnm"), p, m,
directed=FALSE, mode=c("out", "in", "all")) {
n1 <- as.integer(n1)
n2 <- as.integer(n2)
type <- igraph.match.arg(type)
if (!missing(p)) { p <- as.numeric(p) }
if (!missing(m)) { m <- as.integer(m) }
directed <- as.logical(directed)
mode <- switch(igraph.match.arg(mode), "out"=1, "in"=2, "all"=3)
if (type=="gnp" && missing(p)) {
stop("Connection probability `p' is not given for Gnp graph")
}
if (type=="gnp" && !missing(m)) {
warning("Number of edges `m' is ignored for Gnp graph")
}
if (type=="gnm" && missing(m)) {
stop("Number of edges `m' is not given for Gnm graph")
}
if (type=="gnm" && !missing(p)) {
warning("Connection probability `p' is ignored for Gnp graph")
}
on.exit( .Call("R_igraph_finalizer", PACKAGE="igraph") )
if (type=="gnp") {
res <- .Call("R_igraph_bipartite_game_gnp", n1, n2, p, directed, mode,
PACKAGE="igraph")
res <- set_vertex_attr(res$graph, "type", value=res$types)
res$name <- "Bipartite Gnp random graph"
res$p <- p
} else if (type=="gnm") {
res <- .Call("R_igraph_bipartite_game_gnm", n1, n2, m, directed, mode,
PACKAGE="igraph")
res <- set_vertex_attr(res$graph, "type", value=res$types)
res$name <- "Bipartite Gnm random graph"
res$m <- m
}
res
}
#' @rdname sample_bipartite
#' @param ... Passed to \code{sample_bipartite}.
#' @export
bipartite <- function(...) constructor_spec(sample_bipartite, ...)
#' Sample stochastic block model
#'
#' Sampling from the stochastic block model of networks
#'
#' This function samples graphs from a stochastic block model by (doing the
#' equivalent of) Bernoulli trials for each potential edge with the
#' probabilities given by the Bernoulli rate matrix, \code{pref.matrix}.
#'
#' @aliases sample_sbm sbm.game sbm
#' @param n Number of vertices in the graph.
#' @param pref.matrix The matrix giving the Bernoulli rates. This is a
#' \eqn{K\times K}{KxK} matrix, where \eqn{K} is the number of groups. The
#' probability of creating an edge between vertices from groups \eqn{i} and
#' \eqn{j} is given by element \eqn{(i,j)}. For undirected graphs, this matrix
#' must be symmetric.
#' @param block.sizes Numeric vector giving the number of vertices in each
#' group. The sum of the vector must match the number of vertices.
#' @param directed Logical scalar, whether to generate a directed graph.
#' @param loops Logical scalar, whether self-loops are allowed in the graph.
#' @param \dots Passed to \code{sample_sbm}.
#' @return An igraph graph.
#' @author Gabor Csardi \email{csardi.gabor@@gmail.com}
#' @seealso \code{\link{sample_gnp}}, \code{\link{sample_gnm}}
#' @references Faust, K., & Wasserman, S. (1992a). Blockmodels: Interpretation
#' and evaluation. \emph{Social Networks}, 14, 5--61.
#' @keywords graphs
#' @examples
#'
#' ## Two groups with not only few connection between groups
#' pm <- cbind( c(.1, .001), c(.001, .05) )
#' g <- sample_sbm(1000, pref.matrix=pm, block.sizes=c(300,700))
#' g
#' @export
sample_sbm <- sample_sbm
#' @export
sbm <- function(...) constructor_spec(sample_sbm, ...)
## -----------------------------------------------------------------
#' Sample the hierarchical stochastic block model
#'
#' Sampling from a hierarchical stochastic block model of networks.
#'
#' The function generates a random graph according to the hierarchical
#' stochastic block model.
#'
#' @aliases sample_hierarchical_sbm hierarchical_sbm
#' @param n Integer scalar, the number of vertices.
#' @param m Integer scalar, the number of vertices per block. \code{n / m} must
#' be integer. Alternatively, an integer vector of block sizes, if not all the
#' blocks have equal sizes.
#' @param rho Numeric vector, the fraction of vertices per cluster, within a
#' block. Must sum up to 1, and \code{rho * m} must be integer for all elements
#' of rho. Alternatively a list of rho vectors, one for each block, if they are
#' not the same for all blocks.
#' @param C A square, symmetric numeric matrix, the Bernoulli rates for the
#' clusters within a block. Its size must mach the size of the \code{rho}
#' vector. Alternatively, a list of square matrices, if the Bernoulli rates
#' differ in different blocks.
#' @param p Numeric scalar, the Bernoulli rate of connections between vertices
#' in different blocks.
#' @param \dots Passed to \code{sample_hierarchical_sbm}.
#' @return An igraph graph.
#' @author Gabor Csardi \email{csardi.gabor@@gmail.com}
#' @seealso \code{\link{sbm.game}}
#' @keywords graphs, random graphs
#' @examples
#'
#' ## Ten blocks with three clusters each
#' C <- matrix(c(1 , 3/4, 0,
#' 3/4, 0, 3/4,
#' 0 , 3/4, 3/4), nrow=3)
#' g <- sample_hierarchical_sbm(100, 10, rho=c(3, 3, 4)/10, C=C, p=1/20)
#' g
#' if (require(Matrix)) { image(g[]) }
#' @export
sample_hierarchical_sbm <- function(n, m, rho, C, p) {
mlen <- length(m)
rholen <- if (is.list(rho)) length(rho) else 1
Clen <- if (is.list(C)) length(C) else 1
commonlen <- unique(c(mlen, rholen, Clen))
if (length(commonlen) == 1 && commonlen == 1) {
hsbm.1.game(n, m, rho, C, p)
} else {
commonlen <- setdiff(commonlen, 1)
if (length(commonlen) != 1) {
stop("Lengths of `m', `rho' and `C' must match")
}
m <- rep(m, length.out=commonlen)
rho <- if (is.list(rho)) {
rep(rho, length.out=commonlen)
} else {
rep(list(rho), length.out=commonlen)
}
C <- if (is.list(C)) {
rep(C, length.out=commonlen)
} else {
rep(list(C), length.out=commonlen)
}
hsbm.list.game(n, m, rho, C, p)
}
}
#' @export
hierarchical_sbm <- function(...)
constructor_spec(sample_hierarchical_sbm, ...)
## -----------------------------------------------------------------
#' Generate random graphs according to the random dot product graph model
#'
#' In this model, each vertex is represented by a latent position vector.
#' Probability of an edge between two vertices are given by the dot product of
#' their latent position vectors.
#'
#' The dot product of the latent position vectors should be in the [0,1]
#' interval, otherwise a warning is given. For negative dot products, no edges
#' are added; dot products that are larger than one always add an edge.
#'
#' @aliases sample_dot_product dot_product
#' @param vecs A numeric matrix in which each latent position vector is a
#' column.
#' @param directed A logical scalar, TRUE if the generated graph should be
#' directed.
#' @param \dots Passed to \code{sample_dot_product}.
#' @return An igraph graph object which is the generated random dot product
#' graph.
#' @author Gabor Csardi \email{csardi.gabor@@gmail.com}
#' @seealso \code{\link{sample_dirichlet}}, \code{\link{sample_sphere_surface}}
#' and \code{\link{sample_sphere_volume}} for sampling position vectors.
#' @references Christine Leigh Myers Nickel: Random dot product graphs, a model
#' for social networks. Dissertation, Johns Hopkins University, Maryland, USA,
#' 2006.
#' @keywords graphs
#' @examples
#'
#' ## A randomly generated graph
#' lpvs <- matrix(rnorm(200), 20, 10)
#' lpvs <- apply(lpvs, 2, function(x) { return (abs(x)/sqrt(sum(x^2))) })
#' g <- sample_dot_product(lpvs)
#' g
#'
#' ## Sample latent vectors from the surface of the unit sphere
#' lpvs2 <- sample_sphere_surface(dim=5, n=20)
#' g2 <- sample_dot_product(lpvs2)
#' g2
#' @export
sample_dot_product <- sample_dot_product
#' @export
dot_product <- function(...) constructor_spec(sample_dot_product, ...)
#' A graph with subgraphs that are each a random graph.
#'
#' Create a number of Erdos-Renyi random graphs with identical parameters, and
#' connect them with the specified number of edges.
#'
#'
#' @aliases interconnected.islands.game sample_islands
#' @param islands.n The number of islands in the graph.
#' @param islands.size The size of islands in the graph.
#' @param islands.pin The probability to create each possible edge into each
#' island.
#' @param n.inter The number of edges to create between two islands.
#' @return An igraph graph.
#' @author Samuel Thiriot (\url{https://www.linkedin.com/in/samthiriot})
#' @seealso \code{\link{sample_gnp}}
#' @keywords graphs
#' @examples
#'
#' g <- sample_islands(3, 10, 5/10, 1)
#' oc <- cluster_optimal(g)
#' oc
#' @export
sample_islands <- sample_islands
#' Create a random regular graph
#'
#' Generate a random graph where each vertex has the same degree.
#'
#' This game generates a directed or undirected random graph where the degrees
#' of vertices are equal to a predefined constant k. For undirected graphs, at
#' least one of k and the number of vertices must be even.
#'
#' The game simply uses \code{\link{sample_degseq}} with appropriately
#' constructed degree sequences.
#'
#' @aliases sample_k_regular k.regular.game
#' @param no.of.nodes Integer scalar, the number of vertices in the generated
#' graph.
#' @param k Integer scalar, the degree of each vertex in the graph, or the
#' out-degree and in-degree in a directed graph.
#' @param directed Logical scalar, whether to create a directed graph.
#' @param multiple Logical scalar, whether multiple edges are allowed.
#' @return An igraph graph.
#' @author Tamas Nepusz \email{ntamas@@gmail.com}
#' @seealso \code{\link{sample_degseq}} for a generator with prescribed degree
#' sequence.
#' @keywords graphs
#' @examples
#'
#' ## A simple ring
#' ring <- sample_k_regular(10, 2)
#' plot(ring)
#'
#' ## k-regular graphs on 10 vertices, with k=1:9
#' k10 <- lapply(1:9, sample_k_regular, no.of.nodes=10)
#'
#' layout(matrix(1:9, nrow=3, byrow=TRUE))
#' sapply(k10, plot, vertex.label=NA)
#' @export
sample_k_regular <- sample_k_regular
#' Random graphs from vertex fitness scores
#'
#' This function generates a non-growing random graph with edge probabilities
#' proportional to node fitness scores.
#'
#' This game generates a directed or undirected random graph where the
#' probability of an edge between vertices \eqn{i} and \eqn{j} depends on the
#' fitness scores of the two vertices involved. For undirected graphs, each
#' vertex has a single fitness score. For directed graphs, each vertex has an
#' out- and an in-fitness, and the probability of an edge from \eqn{i} to
#' \eqn{j} depends on the out-fitness of vertex \eqn{i} and the in-fitness of
#' vertex \eqn{j}.
#'
#' The generation process goes as follows. We start from \eqn{N} disconnected
#' nodes (where \eqn{N} is given by the length of the fitness vector). Then we
#' randomly select two vertices \eqn{i} and \eqn{j}, with probabilities
#' proportional to their fitnesses. (When the generated graph is directed,
#' \eqn{i} is selected according to the out-fitnesses and \eqn{j} is selected
#' according to the in-fitnesses). If the vertices are not connected yet (or if
#' multiple edges are allowed), we connect them; otherwise we select a new
#' pair. This is repeated until the desired number of links are created.
#'
#' It can be shown that the \emph{expected} degree of each vertex will be
#' proportional to its fitness, although the actual, observed degree will not
#' be. If you need to generate a graph with an exact degree sequence, consider
#' \code{\link{sample_degseq}} instead.
#'
#' This model is commonly used to generate static scale-free networks. To
#' achieve this, you have to draw the fitness scores from the desired power-law
#' distribution. Alternatively, you may use \code{\link{sample_fitness_pl}}
#' which generates the fitnesses for you with a given exponent.
#'
#' @aliases sample_fitness static.fitness.game
#' @param no.of.edges The number of edges in the generated graph.
#' @param fitness.out A numeric vector containing the fitness of each vertex.
#' For directed graphs, this specifies the out-fitness of each vertex.
#' @param fitness.in If \code{NULL} (the default), the generated graph will be
#' undirected. If not \code{NULL}, then it should be a numeric vector and it
#' specifies the in-fitness of each vertex.
#'
#' If this argument is not \code{NULL}, then a directed graph is generated,
#' otherwise an undirected one.
#' @param loops Logical scalar, whether to allow loop edges in the graph.
#' @param multiple Logical scalar, whether to allow multiple edges in the
#' graph.
#' @return An igraph graph, directed or undirected.
#' @author Tamas Nepusz \email{ntamas@@gmail.com}
#' @references Goh K-I, Kahng B, Kim D: Universal behaviour of load
#' distribution in scale-free networks. \emph{Phys Rev Lett} 87(27):278701,
#' 2001.
#' @keywords graphs
#' @examples
#'
#' N <- 10000
#' g <- sample_fitness(5*N, sample((1:50)^-2, N, replace=TRUE))
#' degree_distribution(g)
#' \dontrun{plot(degree_distribution(g, cumulative=TRUE), log="xy")}
sample_fitness <- sample_fitness
#' Scale-free random graphs, from vertex fitness scores
#'
#' This function generates a non-growing random graph with expected power-law
#' degree distributions.
#'
#' This game generates a directed or undirected random graph where the degrees
#' of vertices follow power-law distributions with prescribed exponents. For
#' directed graphs, the exponents of the in- and out-degree distributions may
#' be specified separately.
#'
#' The game simply uses \code{\link{sample_fitness}} with appropriately
#' constructed fitness vectors. In particular, the fitness of vertex \eqn{i} is
#' \eqn{i^{-alpha}}{i^(-alpha)}, where \eqn{alpha = 1/(gamma-1)} and gamma is
#' the exponent given in the arguments.
#'
#' To remove correlations between in- and out-degrees in case of directed
#' graphs, the in-fitness vector will be shuffled after it has been set up and
#' before \code{\link{sample_fitness}} is called.
#'
#' Note that significant finite size effects may be observed for exponents
#' smaller than 3 in the original formulation of the game. This function
#' provides an argument that lets you remove the finite size effects by
#' assuming that the fitness of vertex \eqn{i} is
#' \eqn{(i+i_0-1)^{-alpha}}{(i+i0-1)^(-alpha)} where \eqn{i_0}{i0} is a
#' constant chosen appropriately to ensure that the maximum degree is less than
#' the square root of the number of edges times the average degree; see the
#' paper of Chung and Lu, and Cho et al for more details.
#'
#' @aliases sample_fitness_pl static.power.law.game
#' @param no.of.nodes The number of vertices in the generated graph.
#' @param no.of.edges The number of edges in the generated graph.
#' @param exponent.out Numeric scalar, the power law exponent of the degree
#' distribution. For directed graphs, this specifies the exponent of the
#' out-degree distribution. It must be greater than or equal to 2. If you pass
#' \code{Inf} here, you will get back an Erdos-Renyi random network.
#' @param exponent.in Numeric scalar. If negative, the generated graph will be
#' undirected. If greater than or equal to 2, this argument specifies the
#' exponent of the in-degree distribution. If non-negative but less than 2, an
#' error will be generated.
#' @param loops Logical scalar, whether to allow loop edges in the generated
#' graph.
#' @param multiple Logical scalar, whether to allow multiple edges in the
#' generated graph.
#' @param finite.size.correction Logical scalar, whether to use the proposed
#' finite size correction of Cho et al., see references below.
#' @return An igraph graph, directed or undirected.
#' @author Tamas Nepusz \email{ntamas@@gmail.com}
#' @references Goh K-I, Kahng B, Kim D: Universal behaviour of load
#' distribution in scale-free networks. \emph{Phys Rev Lett} 87(27):278701,
#' 2001.
#'
#' Chung F and Lu L: Connected components in a random graph with given degree
#' sequences. \emph{Annals of Combinatorics} 6, 125-145, 2002.
#'
#' Cho YS, Kim JS, Park J, Kahng B, Kim D: Percolation transitions in
#' scale-free networks under the Achlioptas process. \emph{Phys Rev Lett}
#' 103:135702, 2009.
#' @keywords graphs
#' @examples
#'
#' g <- sample_fitness_pl(10000, 30000, 2.2, 2.3)
#' \dontrun{plot(degree_distribution(g, cumulative=TRUE, mode="out"), log="xy")}
sample_fitness_pl <- sample_fitness_pl
#' Forest Fire Network Model
#'
#' This is a growing network model, which resembles of how the forest fire
#' spreads by igniting trees close by.
#'
#' The forest fire model intends to reproduce the following network
#' characteristics, observed in real networks: \itemize{ \item Heavy-tailed
#' in-degree distribution. \item Heavy-tailed out-degree distribution. \item
#' Communities. \item Densification power-law. The network is densifying in
#' time, according to a power-law rule. \item Shrinking diameter. The diameter
#' of the network decreases in time. }
#'
#' The network is generated in the following way. One vertex is added at a
#' time. This vertex connects to (cites) \code{ambs} vertices already present
#' in the network, chosen uniformly random. Now, for each cited vertex \eqn{v}
#' we do the following procedure: \enumerate{ \item We generate two random
#' number, \eqn{x} and \eqn{y}, that are geometrically distributed with means
#' \eqn{p/(1-p)} and \eqn{rp(1-rp)}. (\eqn{p} is \code{fw.prob}, \eqn{r} is
#' \code{bw.factor}.) The new vertex cites \eqn{x} outgoing neighbors and
#' \eqn{y} incoming neighbors of \eqn{v}, from those which are not yet cited by
#' the new vertex. If there are less than \eqn{x} or \eqn{y} such vertices
#' available then we cite all of them. \item The same procedure is applied to
#' all the newly cited vertices. }
#'
#' @aliases sample_forestfire forest.fire.game
#' @param nodes The number of vertices in the graph.
#' @param fw.prob The forward burning probability, see details below.
#' @param bw.factor The backward burning ratio. The backward burning
#' probability is calculated as \code{bw.factor*fw.prob}.
#' @param ambs The number of ambassador vertices.
#' @param directed Logical scalar, whether to create a directed graph.
#' @return A simple graph, possibly directed if the \code{directed} argument is
#' \code{TRUE}.
#' @note The version of the model in the published paper is incorrect in the
#' sense that it cannot generate the kind of graphs the authors claim. A
#' corrected version is available from
#' \url{http://www.cs.cmu.edu/~jure/pubs/powergrowth-tkdd.pdf}, our
#' implementation is based on this.
#' @author Gabor Csardi \email{csardi.gabor@@gmail.com}
#' @seealso \code{\link{barabasi.game}} for the basic preferential attachment
#' model.
#' @references Jure Leskovec, Jon Kleinberg and Christos Faloutsos. Graphs over
#' time: densification laws, shrinking diameters and possible explanations.
#' \emph{KDD '05: Proceeding of the eleventh ACM SIGKDD international
#' conference on Knowledge discovery in data mining}, 177--187, 2005.
#' @keywords graphs
#' @examples
#'
#' g <- sample_forestfire(10000, fw.prob=0.37, bw.factor=0.32/0.37)
#' dd1 <- degree_distribution(g, mode="in")
#' dd2 <- degree_distribution(g, mode="out")
#' plot(seq(along=dd1)-1, dd1, log="xy")
#' points(seq(along=dd2)-1, dd2, col=2, pch=2)
sample_forestfire <- sample_forestfire
#' Generate a new random graph from a given graph by randomly
#' adding/removing edges
#'
#' Sample a new graph by perturbing the adjacency matrix of a given graph
#' and shuffling its vertices.
#'
#' Please see the reference given below.
#'
#' @param old.graph The original graph.
#' @param corr A scalar in the unit interval, the target Pearson
#' correlation between the adjacency matrices of the original the generated
#' graph (the adjacency matrix being used as a vector).
#' @param p A numeric scalar, the probability of an edge between two
#' vertices, it must in the open (0,1) interval.
#' @param permutation A numeric vector, a permutation vector that is
#' applied on the vertices of the first graph, to get the second graph. If
#' \code{NULL}, the vertices are not permuted.
#' @return An unweighted graph of the same size as \code{old.graph} such
#' that the correlation coefficient between the entries of the two
#' adjacency matrices is \code{corr}. Note each pair of corresponding
#' matrix entries is a pair of correlated Bernoulli random variables.
#'
#' @seealso \code{\link{sample_correlated_gnp_pair}},
#' \code{\link{sample_gnp}}
#' @references Lyzinski, V., Fishkind, D. E., Priebe, C. E. (2013). Seeded
#' graph matching for correlated Erdos-Renyi graphs.
#' \url{http://arxiv.org/abs/1304.7844}
#' @examples
#' g <- sample_gnp(1000, .1)
#' g2 <- sample_correlated_gnp(g, corr = 0.5)
#' cor(as.vector(g[]), as.vector(g2[]))
#' g
#' g2
sample_correlated_gnp <- sample_correlated_gnp
#' Sample a pair of correlated G(n,p) random graphs
#'
#' Sample a new graph by perturbing the adjacency matrix of a given graph and
#' shuffling its vertices.
#'
#' Please see the reference given below.
#'
#' @param n Numeric scalar, the number of vertices for the sampled graphs.
#' @param corr A scalar in the unit interval, the target Pearson correlation
#' between the adjacency matrices of the original the generated graph (the
#' adjacency matrix being used as a vector).
#' @param p A numeric scalar, the probability of an edge between two vertices,
#' it must in the open (0,1) interval.
#' @param directed Logical scalar, whether to generate directed graphs.
#' @param permutation A numeric vector, a permutation vector that is applied on
#' the vertices of the first graph, to get the second graph. If \code{NULL},
#' the vertices are not permuted.
#' @return A list of two igraph objects, named \code{graph1} and
#' \code{graph2}, which are two graphs whose adjacency matrix entries are
#' correlated with \code{corr}.
#'
#' @seealso \code{\link{sample_correlated_gnp}},
#' \code{\link{sample_gnp}}.
#' @references Lyzinski, V., Fishkind, D. E., Priebe, C. E. (2013). Seeded
#' graph matching for correlated Erdos-Renyi graphs.
#' \url{http://arxiv.org/abs/1304.7844}
#' @keywords graphs,random graphs
#' @examples
#' gg <- sample_correlated_gnp_pair(n = 10, corr = .8, p = .5,
#' directed = FALSE)
#' gg
#' cor(as.vector(gg[[1]][]), as.vector(gg[[2]][]))
sample_correlated_gnp_pair <- sample_correlated_gnp_pair
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/x169.R
\name{x169}
\alias{x169}
\title{x169 graphs
List all possible x169 graphs based on given SIX nodes}
\usage{
x169(x)
}
\arguments{
\item{x}{The vector representing nodes}
}
\value{
A matrix listing edges of x169 graphs
}
\description{
x169 graphs
List all possible x169 graphs based on given SIX nodes
}
\examples{
x169(c(1:6))
}
| /man/x169.Rd | no_license | placeboo/subgraph | R | false | true | 414 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/x169.R
\name{x169}
\alias{x169}
\title{x169 graphs
List all possible x169 graphs based on given SIX nodes}
\usage{
x169(x)
}
\arguments{
\item{x}{The vector representing nodes}
}
\value{
A matrix listing edges of x169 graphs
}
\description{
x169 graphs
List all possible x169 graphs based on given SIX nodes
}
\examples{
x169(c(1:6))
}
|
\name{text2color}
\alias{text2color}
\title{Map Words to Colors}
\usage{
text2color(words, recode.words, colors)
}
\arguments{
\item{words}{A vector of words.}
\item{recode.words}{A vector of unique words or a list of
unique word vectors that will be matched against
corresponding colors.}
\item{colors}{A vector of colors of equal in length to
recode.words + 1(the +1 is for unmatched words).}
}
\value{
Returns a vector of mapped colors equal in length to the
words vector.
}
\description{
A dictionary lookup that maps words to colors.
}
\examples{
\dontrun{
set.seed(10)
x <- data.frame(X1 = sample(Top25Words[1:10], 20, TRUE))
#blue was recycled
text2color(x$X1, qcv(the, and, is), qcv(red, green, blue))
text2color(x$X1, qcv(the, and, is), qcv(red, green, blue, white))
x$X2 <- text2color(x$X1, list(qcv(the, and, is), "that"),
qcv(red, green, white))
x
}
}
\seealso{
\code{\link[qdap]{lookup}}
}
\keyword{color,}
\keyword{dictionary}
\keyword{lookup,}
\keyword{recode,}
| /man/text2color.Rd | no_license | abresler/qdap | R | false | false | 1,007 | rd | \name{text2color}
\alias{text2color}
\title{Map Words to Colors}
\usage{
text2color(words, recode.words, colors)
}
\arguments{
\item{words}{A vector of words.}
\item{recode.words}{A vector of unique words or a list of
unique word vectors that will be matched against
corresponding colors.}
\item{colors}{A vector of colors of equal in length to
recode.words + 1(the +1 is for unmatched words).}
}
\value{
Returns a vector of mapped colors equal in length to the
words vector.
}
\description{
A dictionary lookup that maps words to colors.
}
\examples{
\dontrun{
set.seed(10)
x <- data.frame(X1 = sample(Top25Words[1:10], 20, TRUE))
#blue was recycled
text2color(x$X1, qcv(the, and, is), qcv(red, green, blue))
text2color(x$X1, qcv(the, and, is), qcv(red, green, blue, white))
x$X2 <- text2color(x$X1, list(qcv(the, and, is), "that"),
qcv(red, green, white))
x
}
}
\seealso{
\code{\link[qdap]{lookup}}
}
\keyword{color,}
\keyword{dictionary}
\keyword{lookup,}
\keyword{recode,}
|
# Jake Yeung
# Date of Creation: 2021-02-11
# File: ~/projects/scchic/scripts/rstudioserver_analysis/spikeins/correct_batch_effects/1-correct_batch_effects_from_TES_from_LDA.R
#
rm(list=ls())
library(dplyr)
library(tidyr)
library(ggplot2)
library(data.table)
library(Matrix)
library(scchicFuncs)
library(topicmodels)
library(JFuncs)
library(hash)
library(igraph)
library(umap)
library(TxDb.Mmusculus.UCSC.mm10.knownGene)
library(org.Mm.eg.db)
library(ChIPseeker)
library(GenomicRanges)
library(DescTools)
library(topicmodels)
jsettings <- umap.defaults
jsettings$n_neighbors <- 30
jsettings$min_dist <- 0.1
jsettings$random_state <- 123
ncores <- 8
hubprefix <- "/home/jyeung/hub_oudenaarden"
jmarks <- c("H3K4me1"); names(jmarks) <- jmarks
outdir <- "/home/jyeung/hub_oudenaarden/jyeung/data/scChiC/from_rstudioserver/robjs_batch_correction_output"
outrdata2 <- file.path(outdir, paste0("batch_corrected_imputed_values.TES.k4_k9.mat.namesfix.", Sys.Date(), ".TES.from_LDA.RData"))
assertthat::assert_that(!file.exists(outrdata2))
# Load LDA outputs --------------------------------------------------------
# for K27me3
# indir.lda <- "jyeung/data/scChiC/from_rstudioserver/count_tables.BM.k4_k9_TES_genomewide"
indir.lda <- "jyeung/data/scChiC/raw_demultiplexed/LDA_outputs_all_spikeins/ldaAnalysisBins_mouse_spikein_BM_dbl_reseq.varfilt.TES_genomewide"
inf.lda.lst <- lapply(jmarks, function(jmark){
print(jmark)
# fname <- paste0("count_name.", jmark, ".k4_k9_TES_genomewide.2021-02-11.rds")
fname <- paste0("lda_outputs.count_name.", jmark, ".k4_k9_TES_genomewide.2021-02-11.K-30.binarize.FALSE/ldaOut.count_name.", jmark, ".k4_k9_TES_genomewide.2021-02-11.K-30.Robj")
inf.lda.tmp <- file.path(hubprefix, indir.lda, fname)
assertthat::assert_that(file.exists(inf.lda.tmp))
return(inf.lda.tmp)
})
out.lst <- lapply(jmarks, function(jmark){
print(jmark)
inf.lda <- inf.lda.lst[[jmark]]
load(inf.lda, v=T) # out.lda, count.mat
tm.result <- posterior(out.lda)
return(list(tm.result = tm.result, count.mat = count.mat))
})
count.mat.lst <- lapply(out.lst, function(out){
out$count.mat
})
# Load meta data ---------------------------------------------------------
indir.metas <- file.path(hubprefix, "jyeung/data/scChiC/from_rstudioserver/count_tables.BMround2.from_peaks.sitecount_mat.split_old_and_new/add_experi")
dat.metas <- lapply(jmarks, function(jmark){
fname <- paste0("count_mat_from_sitecount_mat.", jmark, ".filtNAcells_allbins.from_same_annot_file.metadata.2020-12-28.with_experi.txt")
fread(file.path(indir.metas, fname))
})
# add jrep2 for batch correction?
dat.metas <- lapply(jmarks, function(jmark){
dat.metas.tmp <- dat.metas[[jmark]]
if (jmark != "H3K9me3"){
dat.metas.tmp$jrep2 <- sapply(dat.metas.tmp$jrep, function(x) ifelse(x == "rep1old", "zold", "anew"))
} else {
dat.metas.tmp$jrep2 <- sapply(dat.metas.tmp$jrep, function(x) ifelse(x != "rep1old", "zold", "anew"))
}
return(dat.metas.tmp)
})
# Select bins and correct -------------------------------------------------
imputed.lst <- lapply(jmarks, function(jmark){
print(jmark)
xmat <- log2(t(out.lst[[jmark]]$tm.result$topics %*% out.lst[[jmark]]$tm.result$terms))
print("Dim before")
print(dim(xmat))
rnames.tmp <- rownames(xmat)
cnames.tmp <- colnames(xmat)
rnames.tmp.i <- !duplicated(rnames.tmp)
cnames.tmp.i <- !duplicated(cnames.tmp)
xmat <- xmat[rnames.tmp.i, cnames.tmp.i]
print("Dim after")
print(dim(xmat))
return(xmat)
})
imputed.long.lst <- lapply(jmarks, function(jmark){
rnames.keep <- rownames(imputed.lst[[jmark]]) # keep all
jmat.filt <- imputed.lst[[jmark]][rnames.keep, ] %>%
data.table::melt()
colnames(jmat.filt) <- c("rname", "cell", "log2exprs")
jmat.filt <- jmat.filt %>%
left_join(., dat.metas[[jmark]])
return(jmat.filt)
})
# Correct batch ----------------------------------------------------------
print("Correcting batch multicore 4")
system.time(
# dat.adj.lst <- lapply(imputed.long.lst, function(jdat){
dat.adj.lst <- mclapply(jmarks, function(jmark){
jdat <- imputed.long.lst[[jmark]]
if (jmark != "H3K27me3"){
dat.adj <- jdat %>%
group_by(rname) %>%
do(AdjustBatchEffect(.))
} else {
dat.adj <- jdat
dat.adj$plateadj2 <- 0
dat.adj$clstradj2 <- 0
dat.adj$log2exprsadj <- dat.adj$log2exprs
}
return(dat.adj)
}, mc.cores = ncores)
)
dat.adj.lst2 <- lapply(dat.adj.lst, function(jdat){
subset(jdat, select = c(rname, cell, log2exprs, cluster, batch, jrep, jrep2, plateadj2, clstradj2, log2exprsadj))
})
mat.adj.lst <- lapply(dat.adj.lst2, function(dat.adj){
mat.adj <- data.table::dcast(dat.adj, formula = rname ~ cell, value.var = "log2exprsadj")
})
save(mat.adj.lst, count.mat.lst, file = outrdata2)
| /scripts/rstudioserver_analysis/spikeins/correct_batch_effects/1-correct_batch_effects_from_TES_from_LDA.R | no_license | jakeyeung/sortchicAllScripts | R | false | false | 4,814 | r | # Jake Yeung
# Date of Creation: 2021-02-11
# File: ~/projects/scchic/scripts/rstudioserver_analysis/spikeins/correct_batch_effects/1-correct_batch_effects_from_TES_from_LDA.R
#
rm(list=ls())
library(dplyr)
library(tidyr)
library(ggplot2)
library(data.table)
library(Matrix)
library(scchicFuncs)
library(topicmodels)
library(JFuncs)
library(hash)
library(igraph)
library(umap)
library(TxDb.Mmusculus.UCSC.mm10.knownGene)
library(org.Mm.eg.db)
library(ChIPseeker)
library(GenomicRanges)
library(DescTools)
library(topicmodels)
jsettings <- umap.defaults
jsettings$n_neighbors <- 30
jsettings$min_dist <- 0.1
jsettings$random_state <- 123
ncores <- 8
hubprefix <- "/home/jyeung/hub_oudenaarden"
jmarks <- c("H3K4me1"); names(jmarks) <- jmarks
outdir <- "/home/jyeung/hub_oudenaarden/jyeung/data/scChiC/from_rstudioserver/robjs_batch_correction_output"
outrdata2 <- file.path(outdir, paste0("batch_corrected_imputed_values.TES.k4_k9.mat.namesfix.", Sys.Date(), ".TES.from_LDA.RData"))
assertthat::assert_that(!file.exists(outrdata2))
# Load LDA outputs --------------------------------------------------------
# for K27me3
# indir.lda <- "jyeung/data/scChiC/from_rstudioserver/count_tables.BM.k4_k9_TES_genomewide"
indir.lda <- "jyeung/data/scChiC/raw_demultiplexed/LDA_outputs_all_spikeins/ldaAnalysisBins_mouse_spikein_BM_dbl_reseq.varfilt.TES_genomewide"
inf.lda.lst <- lapply(jmarks, function(jmark){
print(jmark)
# fname <- paste0("count_name.", jmark, ".k4_k9_TES_genomewide.2021-02-11.rds")
fname <- paste0("lda_outputs.count_name.", jmark, ".k4_k9_TES_genomewide.2021-02-11.K-30.binarize.FALSE/ldaOut.count_name.", jmark, ".k4_k9_TES_genomewide.2021-02-11.K-30.Robj")
inf.lda.tmp <- file.path(hubprefix, indir.lda, fname)
assertthat::assert_that(file.exists(inf.lda.tmp))
return(inf.lda.tmp)
})
out.lst <- lapply(jmarks, function(jmark){
print(jmark)
inf.lda <- inf.lda.lst[[jmark]]
load(inf.lda, v=T) # out.lda, count.mat
tm.result <- posterior(out.lda)
return(list(tm.result = tm.result, count.mat = count.mat))
})
count.mat.lst <- lapply(out.lst, function(out){
out$count.mat
})
# Load meta data ---------------------------------------------------------
indir.metas <- file.path(hubprefix, "jyeung/data/scChiC/from_rstudioserver/count_tables.BMround2.from_peaks.sitecount_mat.split_old_and_new/add_experi")
dat.metas <- lapply(jmarks, function(jmark){
fname <- paste0("count_mat_from_sitecount_mat.", jmark, ".filtNAcells_allbins.from_same_annot_file.metadata.2020-12-28.with_experi.txt")
fread(file.path(indir.metas, fname))
})
# add jrep2 for batch correction?
dat.metas <- lapply(jmarks, function(jmark){
dat.metas.tmp <- dat.metas[[jmark]]
if (jmark != "H3K9me3"){
dat.metas.tmp$jrep2 <- sapply(dat.metas.tmp$jrep, function(x) ifelse(x == "rep1old", "zold", "anew"))
} else {
dat.metas.tmp$jrep2 <- sapply(dat.metas.tmp$jrep, function(x) ifelse(x != "rep1old", "zold", "anew"))
}
return(dat.metas.tmp)
})
# Select bins and correct -------------------------------------------------
imputed.lst <- lapply(jmarks, function(jmark){
print(jmark)
xmat <- log2(t(out.lst[[jmark]]$tm.result$topics %*% out.lst[[jmark]]$tm.result$terms))
print("Dim before")
print(dim(xmat))
rnames.tmp <- rownames(xmat)
cnames.tmp <- colnames(xmat)
rnames.tmp.i <- !duplicated(rnames.tmp)
cnames.tmp.i <- !duplicated(cnames.tmp)
xmat <- xmat[rnames.tmp.i, cnames.tmp.i]
print("Dim after")
print(dim(xmat))
return(xmat)
})
imputed.long.lst <- lapply(jmarks, function(jmark){
rnames.keep <- rownames(imputed.lst[[jmark]]) # keep all
jmat.filt <- imputed.lst[[jmark]][rnames.keep, ] %>%
data.table::melt()
colnames(jmat.filt) <- c("rname", "cell", "log2exprs")
jmat.filt <- jmat.filt %>%
left_join(., dat.metas[[jmark]])
return(jmat.filt)
})
# Correct batch ----------------------------------------------------------
print("Correcting batch multicore 4")
system.time(
# dat.adj.lst <- lapply(imputed.long.lst, function(jdat){
dat.adj.lst <- mclapply(jmarks, function(jmark){
jdat <- imputed.long.lst[[jmark]]
if (jmark != "H3K27me3"){
dat.adj <- jdat %>%
group_by(rname) %>%
do(AdjustBatchEffect(.))
} else {
dat.adj <- jdat
dat.adj$plateadj2 <- 0
dat.adj$clstradj2 <- 0
dat.adj$log2exprsadj <- dat.adj$log2exprs
}
return(dat.adj)
}, mc.cores = ncores)
)
dat.adj.lst2 <- lapply(dat.adj.lst, function(jdat){
subset(jdat, select = c(rname, cell, log2exprs, cluster, batch, jrep, jrep2, plateadj2, clstradj2, log2exprsadj))
})
mat.adj.lst <- lapply(dat.adj.lst2, function(dat.adj){
mat.adj <- data.table::dcast(dat.adj, formula = rname ~ cell, value.var = "log2exprsadj")
})
save(mat.adj.lst, count.mat.lst, file = outrdata2)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.